diff options
398 files changed, 44306 insertions, 91337 deletions
diff --git a/editor/editor_node.cpp b/editor/editor_node.cpp index 52cf54350f..ca8b278ab8 100644 --- a/editor/editor_node.cpp +++ b/editor/editor_node.cpp @@ -91,7 +91,6 @@ #include "editor/export_template_manager.h" #include "editor/filesystem_dock.h" #include "editor/import/editor_import_collada.h" -#include "editor/import/editor_scene_importer_gltf.h" #include "editor/import/resource_importer_bitmask.h" #include "editor/import/resource_importer_csv.h" #include "editor/import/resource_importer_csv_translation.h" @@ -103,6 +102,7 @@ #include "editor/import/resource_importer_texture.h" #include "editor/import/resource_importer_texture_atlas.h" #include "editor/import/resource_importer_wav.h" +#include "editor/import/scene_importer_mesh_node_3d.h" #include "editor/import_dock.h" #include "editor/multi_node_edit.h" #include "editor/node_dock.h" @@ -3673,7 +3673,7 @@ void EditorNode::register_editor_types() { ClassDB::register_class<EditorFeatureProfile>(); ClassDB::register_class<EditorSpinSlider>(); ClassDB::register_class<EditorSceneImporterMesh>(); - ClassDB::register_class<EditorSceneImporterMeshNode>(); + ClassDB::register_class<EditorSceneImporterMeshNode3D>(); ClassDB::register_virtual_class<FileSystemDock>(); @@ -5712,10 +5712,6 @@ EditorNode::EditorNode() { import_obj2.instance(); import_scene->add_importer(import_obj2); - Ref<EditorSceneImporterGLTF> import_gltf; - import_gltf.instance(); - import_scene->add_importer(import_gltf); - Ref<EditorSceneImporterESCN> import_escn; import_escn.instance(); import_scene->add_importer(import_escn); diff --git a/editor/import/editor_import_collada.cpp b/editor/import/editor_import_collada.cpp index 4e93fe6f12..db5b7b36aa 100644 --- a/editor/import/editor_import_collada.cpp +++ b/editor/import/editor_import_collada.cpp @@ -33,6 +33,7 @@ #include "core/os/os.h" #include "editor/editor_node.h" #include "editor/import/collada.h" +#include "editor/import/scene_importer_mesh_node_3d.h" #include "scene/3d/camera_3d.h" #include "scene/3d/light_3d.h" #include "scene/3d/mesh_instance_3d.h" @@ -278,8 +279,8 @@ Error ColladaImport::_create_scene(Collada::Node *p_node, Node3D *p_parent) { node = memnew(Path3D); } else { //mesh since nothing else - node = memnew(EditorSceneImporterMeshNode); - //Object::cast_to<EditorSceneImporterMeshNode>(node)->set_flag(GeometryInstance3D::FLAG_USE_BAKED_LIGHT, true); + node = memnew(EditorSceneImporterMeshNode3D); + //Object::cast_to<EditorSceneImporterMeshNode3D>(node)->set_flag(GeometryInstance3D::FLAG_USE_BAKED_LIGHT, true); } } break; case Collada::Node::TYPE_SKELETON: { @@ -1003,10 +1004,10 @@ Error ColladaImport::_create_resources(Collada::Node *p_node, bool p_use_compres } } - if (Object::cast_to<EditorSceneImporterMeshNode>(node)) { + if (Object::cast_to<EditorSceneImporterMeshNode3D>(node)) { Collada::NodeGeometry *ng2 = static_cast<Collada::NodeGeometry *>(p_node); - EditorSceneImporterMeshNode *mi = Object::cast_to<EditorSceneImporterMeshNode>(node); + EditorSceneImporterMeshNode3D *mi = Object::cast_to<EditorSceneImporterMeshNode3D>(node); ERR_FAIL_COND_V(!mi, ERR_BUG); diff --git a/editor/import/editor_scene_importer_gltf.cpp b/editor/import/editor_scene_importer_gltf.cpp deleted file mode 100644 index 1059692ca0..0000000000 --- a/editor/import/editor_scene_importer_gltf.cpp +++ /dev/null @@ -1,3254 +0,0 @@ -/*************************************************************************/ -/* editor_scene_importer_gltf.cpp */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#include "editor_scene_importer_gltf.h" - -#include "core/crypto/crypto_core.h" -#include "core/io/json.h" -#include "core/math/disjoint_set.h" -#include "core/math/math_defs.h" -#include "core/os/file_access.h" -#include "core/os/os.h" -#include "modules/regex/regex.h" -#include "scene/3d/bone_attachment_3d.h" -#include "scene/3d/camera_3d.h" -#include "scene/3d/mesh_instance_3d.h" -#include "scene/animation/animation_player.h" -#include "scene/resources/surface_tool.h" - -uint32_t EditorSceneImporterGLTF::get_import_flags() const { - return IMPORT_SCENE | IMPORT_ANIMATION; -} - -void EditorSceneImporterGLTF::get_extensions(List<String> *r_extensions) const { - r_extensions->push_back("gltf"); - r_extensions->push_back("glb"); -} - -Error EditorSceneImporterGLTF::_parse_json(const String &p_path, GLTFState &state) { - Error err; - FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); - if (!f) { - return err; - } - - Vector<uint8_t> array; - array.resize(f->get_len()); - f->get_buffer(array.ptrw(), array.size()); - String text; - text.parse_utf8((const char *)array.ptr(), array.size()); - - String err_txt; - int err_line; - Variant v; - err = JSON::parse(text, v, err_txt, err_line); - if (err != OK) { - _err_print_error("", p_path.utf8().get_data(), err_line, err_txt.utf8().get_data(), ERR_HANDLER_SCRIPT); - return err; - } - state.json = v; - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_glb(const String &p_path, GLTFState &state) { - Error err; - FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); - if (!f) { - return err; - } - - uint32_t magic = f->get_32(); - ERR_FAIL_COND_V(magic != 0x46546C67, ERR_FILE_UNRECOGNIZED); //glTF - f->get_32(); // version - f->get_32(); // length - - uint32_t chunk_length = f->get_32(); - uint32_t chunk_type = f->get_32(); - - ERR_FAIL_COND_V(chunk_type != 0x4E4F534A, ERR_PARSE_ERROR); //JSON - Vector<uint8_t> json_data; - json_data.resize(chunk_length); - uint32_t len = f->get_buffer(json_data.ptrw(), chunk_length); - ERR_FAIL_COND_V(len != chunk_length, ERR_FILE_CORRUPT); - - String text; - text.parse_utf8((const char *)json_data.ptr(), json_data.size()); - - String err_txt; - int err_line; - Variant v; - err = JSON::parse(text, v, err_txt, err_line); - if (err != OK) { - _err_print_error("", p_path.utf8().get_data(), err_line, err_txt.utf8().get_data(), ERR_HANDLER_SCRIPT); - return err; - } - - state.json = v; - - //data? - - chunk_length = f->get_32(); - chunk_type = f->get_32(); - - if (f->eof_reached()) { - return OK; //all good - } - - ERR_FAIL_COND_V(chunk_type != 0x004E4942, ERR_PARSE_ERROR); //BIN - - state.glb_data.resize(chunk_length); - len = f->get_buffer(state.glb_data.ptrw(), chunk_length); - ERR_FAIL_COND_V(len != chunk_length, ERR_FILE_CORRUPT); - - return OK; -} - -static Vector3 _arr_to_vec3(const Array &p_array) { - ERR_FAIL_COND_V(p_array.size() != 3, Vector3()); - return Vector3(p_array[0], p_array[1], p_array[2]); -} - -static Quat _arr_to_quat(const Array &p_array) { - ERR_FAIL_COND_V(p_array.size() != 4, Quat()); - return Quat(p_array[0], p_array[1], p_array[2], p_array[3]); -} - -static Transform _arr_to_xform(const Array &p_array) { - ERR_FAIL_COND_V(p_array.size() != 16, Transform()); - - Transform xform; - xform.basis.set_axis(Vector3::AXIS_X, Vector3(p_array[0], p_array[1], p_array[2])); - xform.basis.set_axis(Vector3::AXIS_Y, Vector3(p_array[4], p_array[5], p_array[6])); - xform.basis.set_axis(Vector3::AXIS_Z, Vector3(p_array[8], p_array[9], p_array[10])); - xform.set_origin(Vector3(p_array[12], p_array[13], p_array[14])); - - return xform; -} - -String EditorSceneImporterGLTF::_sanitize_scene_name(const String &name) { - RegEx regex("([^a-zA-Z0-9_ -]+)"); - String p_name = regex.sub(name, "", true); - return p_name; -} - -String EditorSceneImporterGLTF::_gen_unique_name(GLTFState &state, const String &p_name) { - const String s_name = _sanitize_scene_name(p_name); - - String name; - int index = 1; - while (true) { - name = s_name; - - if (index > 1) { - name += " " + itos(index); - } - if (!state.unique_names.has(name)) { - break; - } - index++; - } - - state.unique_names.insert(name); - - return name; -} - -String EditorSceneImporterGLTF::_sanitize_bone_name(const String &name) { - String p_name = name.camelcase_to_underscore(true); - - RegEx pattern_nocolon(":"); - p_name = pattern_nocolon.sub(p_name, "_", true); - - RegEx pattern_noslash("/"); - p_name = pattern_noslash.sub(p_name, "_", true); - - RegEx pattern_nospace(" +"); - p_name = pattern_nospace.sub(p_name, "_", true); - - RegEx pattern_multiple("_+"); - p_name = pattern_multiple.sub(p_name, "_", true); - - RegEx pattern_padded("0+(\\d+)"); - p_name = pattern_padded.sub(p_name, "$1", true); - - return p_name; -} - -String EditorSceneImporterGLTF::_gen_unique_bone_name(GLTFState &state, const GLTFSkeletonIndex skel_i, const String &p_name) { - String s_name = _sanitize_bone_name(p_name); - if (s_name.empty()) { - s_name = "bone"; - } - String name; - int index = 1; - while (true) { - name = s_name; - - if (index > 1) { - name += "_" + itos(index); - } - if (!state.skeletons[skel_i].unique_names.has(name)) { - break; - } - index++; - } - - state.skeletons.write[skel_i].unique_names.insert(name); - - return name; -} - -Error EditorSceneImporterGLTF::_parse_scenes(GLTFState &state) { - ERR_FAIL_COND_V(!state.json.has("scenes"), ERR_FILE_CORRUPT); - const Array &scenes = state.json["scenes"]; - int loaded_scene = 0; - if (state.json.has("scene")) { - loaded_scene = state.json["scene"]; - } else { - WARN_PRINT("The load-time scene is not defined in the glTF2 file. Picking the first scene."); - } - - if (scenes.size()) { - ERR_FAIL_COND_V(loaded_scene >= scenes.size(), ERR_FILE_CORRUPT); - const Dictionary &s = scenes[loaded_scene]; - ERR_FAIL_COND_V(!s.has("nodes"), ERR_UNAVAILABLE); - const Array &nodes = s["nodes"]; - for (int j = 0; j < nodes.size(); j++) { - state.root_nodes.push_back(nodes[j]); - } - - if (s.has("name") && s["name"] != "") { - state.scene_name = _gen_unique_name(state, s["name"]); - } else { - state.scene_name = _gen_unique_name(state, "Scene"); - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_nodes(GLTFState &state) { - ERR_FAIL_COND_V(!state.json.has("nodes"), ERR_FILE_CORRUPT); - const Array &nodes = state.json["nodes"]; - for (int i = 0; i < nodes.size(); i++) { - GLTFNode *node = memnew(GLTFNode); - const Dictionary &n = nodes[i]; - - if (n.has("name")) { - node->name = n["name"]; - } - if (n.has("camera")) { - node->camera = n["camera"]; - } - if (n.has("mesh")) { - node->mesh = n["mesh"]; - } - if (n.has("skin")) { - node->skin = n["skin"]; - } - if (n.has("matrix")) { - node->xform = _arr_to_xform(n["matrix"]); - - } else { - if (n.has("translation")) { - node->translation = _arr_to_vec3(n["translation"]); - } - if (n.has("rotation")) { - node->rotation = _arr_to_quat(n["rotation"]); - } - if (n.has("scale")) { - node->scale = _arr_to_vec3(n["scale"]); - } - - node->xform.basis.set_quat_scale(node->rotation, node->scale); - node->xform.origin = node->translation; - } - if (n.has("extensions")) { - Dictionary extensions = n["extensions"]; - if (extensions.has("KHR_lights_punctual")) { - Dictionary lights_punctual = extensions["KHR_lights_punctual"]; - if (lights_punctual.has("light")) { - GLTFLightIndex light = lights_punctual["light"]; - node->light = light; - } - } - } - if (n.has("children")) { - const Array &children = n["children"]; - for (int j = 0; j < children.size(); j++) { - node->children.push_back(children[j]); - } - } - - state.nodes.push_back(node); - } - - // build the hierarchy - for (GLTFNodeIndex node_i = 0; node_i < state.nodes.size(); node_i++) { - for (int j = 0; j < state.nodes[node_i]->children.size(); j++) { - GLTFNodeIndex child_i = state.nodes[node_i]->children[j]; - - ERR_FAIL_INDEX_V(child_i, state.nodes.size(), ERR_FILE_CORRUPT); - ERR_CONTINUE(state.nodes[child_i]->parent != -1); //node already has a parent, wtf. - - state.nodes[child_i]->parent = node_i; - } - } - - _compute_node_heights(state); - - return OK; -} - -void EditorSceneImporterGLTF::_compute_node_heights(GLTFState &state) { - state.root_nodes.clear(); - for (GLTFNodeIndex node_i = 0; node_i < state.nodes.size(); ++node_i) { - GLTFNode *node = state.nodes[node_i]; - node->height = 0; - - GLTFNodeIndex current_i = node_i; - while (current_i >= 0) { - const GLTFNodeIndex parent_i = state.nodes[current_i]->parent; - if (parent_i >= 0) { - ++node->height; - } - current_i = parent_i; - } - - if (node->height == 0) { - state.root_nodes.push_back(node_i); - } - } -} - -static Vector<uint8_t> _parse_base64_uri(const String &uri) { - int start = uri.find(","); - ERR_FAIL_COND_V(start == -1, Vector<uint8_t>()); - - CharString substr = uri.right(start + 1).ascii(); - - int strlen = substr.length(); - - Vector<uint8_t> buf; - buf.resize(strlen / 4 * 3 + 1 + 1); - - size_t len = 0; - ERR_FAIL_COND_V(CryptoCore::b64_decode(buf.ptrw(), buf.size(), &len, (unsigned char *)substr.get_data(), strlen) != OK, Vector<uint8_t>()); - - buf.resize(len); - - return buf; -} - -Error EditorSceneImporterGLTF::_parse_buffers(GLTFState &state, const String &p_base_path) { - if (!state.json.has("buffers")) { - return OK; - } - - const Array &buffers = state.json["buffers"]; - for (GLTFBufferIndex i = 0; i < buffers.size(); i++) { - if (i == 0 && state.glb_data.size()) { - state.buffers.push_back(state.glb_data); - - } else { - const Dictionary &buffer = buffers[i]; - if (buffer.has("uri")) { - Vector<uint8_t> buffer_data; - String uri = buffer["uri"]; - - if (uri.begins_with("data:")) { // Embedded data using base64. - // Validate data MIME types and throw an error if it's one we don't know/support. - if (!uri.begins_with("data:application/octet-stream;base64") && - !uri.begins_with("data:application/gltf-buffer;base64")) { - ERR_PRINT("glTF: Got buffer with an unknown URI data type: " + uri); - } - buffer_data = _parse_base64_uri(uri); - } else { // Relative path to an external image file. - uri = p_base_path.plus_file(uri).replace("\\", "/"); // Fix for Windows. - buffer_data = FileAccess::get_file_as_array(uri); - ERR_FAIL_COND_V_MSG(buffer.size() == 0, ERR_PARSE_ERROR, "glTF: Couldn't load binary file as an array: " + uri); - } - - ERR_FAIL_COND_V(!buffer.has("byteLength"), ERR_PARSE_ERROR); - int byteLength = buffer["byteLength"]; - ERR_FAIL_COND_V(byteLength < buffer_data.size(), ERR_PARSE_ERROR); - state.buffers.push_back(buffer_data); - } - } - } - - print_verbose("glTF: Total buffers: " + itos(state.buffers.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_buffer_views(GLTFState &state) { - ERR_FAIL_COND_V(!state.json.has("bufferViews"), ERR_FILE_CORRUPT); - const Array &buffers = state.json["bufferViews"]; - for (GLTFBufferViewIndex i = 0; i < buffers.size(); i++) { - const Dictionary &d = buffers[i]; - - GLTFBufferView buffer_view; - - ERR_FAIL_COND_V(!d.has("buffer"), ERR_PARSE_ERROR); - buffer_view.buffer = d["buffer"]; - ERR_FAIL_COND_V(!d.has("byteLength"), ERR_PARSE_ERROR); - buffer_view.byte_length = d["byteLength"]; - - if (d.has("byteOffset")) { - buffer_view.byte_offset = d["byteOffset"]; - } - - if (d.has("byteStride")) { - buffer_view.byte_stride = d["byteStride"]; - } - - if (d.has("target")) { - const int target = d["target"]; - buffer_view.indices = target == ELEMENT_ARRAY_BUFFER; - } - - state.buffer_views.push_back(buffer_view); - } - - print_verbose("glTF: Total buffer views: " + itos(state.buffer_views.size())); - - return OK; -} - -EditorSceneImporterGLTF::GLTFType EditorSceneImporterGLTF::_get_type_from_str(const String &p_string) { - if (p_string == "SCALAR") { - return TYPE_SCALAR; - } - - if (p_string == "VEC2") { - return TYPE_VEC2; - } - if (p_string == "VEC3") { - return TYPE_VEC3; - } - if (p_string == "VEC4") { - return TYPE_VEC4; - } - - if (p_string == "MAT2") { - return TYPE_MAT2; - } - if (p_string == "MAT3") { - return TYPE_MAT3; - } - if (p_string == "MAT4") { - return TYPE_MAT4; - } - - ERR_FAIL_V(TYPE_SCALAR); -} - -Error EditorSceneImporterGLTF::_parse_accessors(GLTFState &state) { - ERR_FAIL_COND_V(!state.json.has("accessors"), ERR_FILE_CORRUPT); - const Array &accessors = state.json["accessors"]; - for (GLTFAccessorIndex i = 0; i < accessors.size(); i++) { - const Dictionary &d = accessors[i]; - - GLTFAccessor accessor; - - ERR_FAIL_COND_V(!d.has("componentType"), ERR_PARSE_ERROR); - accessor.component_type = d["componentType"]; - ERR_FAIL_COND_V(!d.has("count"), ERR_PARSE_ERROR); - accessor.count = d["count"]; - ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); - accessor.type = _get_type_from_str(d["type"]); - - if (d.has("bufferView")) { - accessor.buffer_view = d["bufferView"]; //optional because it may be sparse... - } - - if (d.has("byteOffset")) { - accessor.byte_offset = d["byteOffset"]; - } - - if (d.has("max")) { - accessor.max = d["max"]; - } - - if (d.has("min")) { - accessor.min = d["min"]; - } - - if (d.has("sparse")) { - //eeh.. - - const Dictionary &s = d["sparse"]; - - ERR_FAIL_COND_V(!s.has("count"), ERR_PARSE_ERROR); - accessor.sparse_count = s["count"]; - ERR_FAIL_COND_V(!s.has("indices"), ERR_PARSE_ERROR); - const Dictionary &si = s["indices"]; - - ERR_FAIL_COND_V(!si.has("bufferView"), ERR_PARSE_ERROR); - accessor.sparse_indices_buffer_view = si["bufferView"]; - ERR_FAIL_COND_V(!si.has("componentType"), ERR_PARSE_ERROR); - accessor.sparse_indices_component_type = si["componentType"]; - - if (si.has("byteOffset")) { - accessor.sparse_indices_byte_offset = si["byteOffset"]; - } - - ERR_FAIL_COND_V(!s.has("values"), ERR_PARSE_ERROR); - const Dictionary &sv = s["values"]; - - ERR_FAIL_COND_V(!sv.has("bufferView"), ERR_PARSE_ERROR); - accessor.sparse_values_buffer_view = sv["bufferView"]; - if (sv.has("byteOffset")) { - accessor.sparse_values_byte_offset = sv["byteOffset"]; - } - } - - state.accessors.push_back(accessor); - } - - print_verbose("glTF: Total accessors: " + itos(state.accessors.size())); - - return OK; -} - -String EditorSceneImporterGLTF::_get_component_type_name(const uint32_t p_component) { - switch (p_component) { - case COMPONENT_TYPE_BYTE: - return "Byte"; - case COMPONENT_TYPE_UNSIGNED_BYTE: - return "UByte"; - case COMPONENT_TYPE_SHORT: - return "Short"; - case COMPONENT_TYPE_UNSIGNED_SHORT: - return "UShort"; - case COMPONENT_TYPE_INT: - return "Int"; - case COMPONENT_TYPE_FLOAT: - return "Float"; - } - - return "<Error>"; -} - -String EditorSceneImporterGLTF::_get_type_name(const GLTFType p_component) { - static const char *names[] = { - "float", - "vec2", - "vec3", - "vec4", - "mat2", - "mat3", - "mat4" - }; - - return names[p_component]; -} - -Error EditorSceneImporterGLTF::_decode_buffer_view(GLTFState &state, double *dst, const GLTFBufferViewIndex p_buffer_view, const int skip_every, const int skip_bytes, const int element_size, const int count, const GLTFType type, const int component_count, const int component_type, const int component_size, const bool normalized, const int byte_offset, const bool for_vertex) { - const GLTFBufferView &bv = state.buffer_views[p_buffer_view]; - - int stride = bv.byte_stride ? bv.byte_stride : element_size; - if (for_vertex && stride % 4) { - stride += 4 - (stride % 4); //according to spec must be multiple of 4 - } - - ERR_FAIL_INDEX_V(bv.buffer, state.buffers.size(), ERR_PARSE_ERROR); - - const uint32_t offset = bv.byte_offset + byte_offset; - Vector<uint8_t> buffer = state.buffers[bv.buffer]; //copy on write, so no performance hit - const uint8_t *bufptr = buffer.ptr(); - - //use to debug - print_verbose("glTF: type " + _get_type_name(type) + " component type: " + _get_component_type_name(component_type) + " stride: " + itos(stride) + " amount " + itos(count)); - print_verbose("glTF: accessor offset" + itos(byte_offset) + " view offset: " + itos(bv.byte_offset) + " total buffer len: " + itos(buffer.size()) + " view len " + itos(bv.byte_length)); - - const int buffer_end = (stride * (count - 1)) + element_size; - ERR_FAIL_COND_V(buffer_end > bv.byte_length, ERR_PARSE_ERROR); - - ERR_FAIL_COND_V((int)(offset + buffer_end) > buffer.size(), ERR_PARSE_ERROR); - - //fill everything as doubles - - for (int i = 0; i < count; i++) { - const uint8_t *src = &bufptr[offset + i * stride]; - - for (int j = 0; j < component_count; j++) { - if (skip_every && j > 0 && (j % skip_every) == 0) { - src += skip_bytes; - } - - double d = 0; - - switch (component_type) { - case COMPONENT_TYPE_BYTE: { - int8_t b = int8_t(*src); - if (normalized) { - d = (double(b) / 128.0); - } else { - d = double(b); - } - } break; - case COMPONENT_TYPE_UNSIGNED_BYTE: { - uint8_t b = *src; - if (normalized) { - d = (double(b) / 255.0); - } else { - d = double(b); - } - } break; - case COMPONENT_TYPE_SHORT: { - int16_t s = *(int16_t *)src; - if (normalized) { - d = (double(s) / 32768.0); - } else { - d = double(s); - } - } break; - case COMPONENT_TYPE_UNSIGNED_SHORT: { - uint16_t s = *(uint16_t *)src; - if (normalized) { - d = (double(s) / 65535.0); - } else { - d = double(s); - } - - } break; - case COMPONENT_TYPE_INT: { - d = *(int *)src; - } break; - case COMPONENT_TYPE_FLOAT: { - d = *(float *)src; - } break; - } - - *dst++ = d; - src += component_size; - } - } - - return OK; -} - -int EditorSceneImporterGLTF::_get_component_type_size(const int component_type) { - switch (component_type) { - case COMPONENT_TYPE_BYTE: - return 1; - break; - case COMPONENT_TYPE_UNSIGNED_BYTE: - return 1; - break; - case COMPONENT_TYPE_SHORT: - return 2; - break; - case COMPONENT_TYPE_UNSIGNED_SHORT: - return 2; - break; - case COMPONENT_TYPE_INT: - return 4; - break; - case COMPONENT_TYPE_FLOAT: - return 4; - break; - default: { - ERR_FAIL_V(0); - } - } - return 0; -} - -Vector<double> EditorSceneImporterGLTF::_decode_accessor(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - //spec, for reference: - //https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#data-alignment - - ERR_FAIL_INDEX_V(p_accessor, state.accessors.size(), Vector<double>()); - - const GLTFAccessor &a = state.accessors[p_accessor]; - - const int component_count_for_type[7] = { - 1, 2, 3, 4, 4, 9, 16 - }; - - const int component_count = component_count_for_type[a.type]; - const int component_size = _get_component_type_size(a.component_type); - ERR_FAIL_COND_V(component_size == 0, Vector<double>()); - int element_size = component_count * component_size; - - int skip_every = 0; - int skip_bytes = 0; - //special case of alignments, as described in spec - switch (a.component_type) { - case COMPONENT_TYPE_BYTE: - case COMPONENT_TYPE_UNSIGNED_BYTE: { - if (a.type == TYPE_MAT2) { - skip_every = 2; - skip_bytes = 2; - element_size = 8; //override for this case - } - if (a.type == TYPE_MAT3) { - skip_every = 3; - skip_bytes = 1; - element_size = 12; //override for this case - } - - } break; - case COMPONENT_TYPE_SHORT: - case COMPONENT_TYPE_UNSIGNED_SHORT: { - if (a.type == TYPE_MAT3) { - skip_every = 6; - skip_bytes = 4; - element_size = 16; //override for this case - } - } break; - default: { - } - } - - Vector<double> dst_buffer; - dst_buffer.resize(component_count * a.count); - double *dst = dst_buffer.ptrw(); - - if (a.buffer_view >= 0) { - ERR_FAIL_INDEX_V(a.buffer_view, state.buffer_views.size(), Vector<double>()); - - const Error err = _decode_buffer_view(state, dst, a.buffer_view, skip_every, skip_bytes, element_size, a.count, a.type, component_count, a.component_type, component_size, a.normalized, a.byte_offset, p_for_vertex); - if (err != OK) { - return Vector<double>(); - } - - } else { - //fill with zeros, as bufferview is not defined. - for (int i = 0; i < (a.count * component_count); i++) { - dst_buffer.write[i] = 0; - } - } - - if (a.sparse_count > 0) { - // I could not find any file using this, so this code is so far untested - Vector<double> indices; - indices.resize(a.sparse_count); - const int indices_component_size = _get_component_type_size(a.sparse_indices_component_type); - - Error err = _decode_buffer_view(state, indices.ptrw(), a.sparse_indices_buffer_view, 0, 0, indices_component_size, a.sparse_count, TYPE_SCALAR, 1, a.sparse_indices_component_type, indices_component_size, false, a.sparse_indices_byte_offset, false); - if (err != OK) { - return Vector<double>(); - } - - Vector<double> data; - data.resize(component_count * a.sparse_count); - err = _decode_buffer_view(state, data.ptrw(), a.sparse_values_buffer_view, skip_every, skip_bytes, element_size, a.sparse_count, a.type, component_count, a.component_type, component_size, a.normalized, a.sparse_values_byte_offset, p_for_vertex); - if (err != OK) { - return Vector<double>(); - } - - for (int i = 0; i < indices.size(); i++) { - const int write_offset = int(indices[i]) * component_count; - - for (int j = 0; j < component_count; j++) { - dst[write_offset + j] = data[i * component_count + j]; - } - } - } - - return dst_buffer; -} - -Vector<int> EditorSceneImporterGLTF::_decode_accessor_as_ints(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<int> ret; - - if (attribs.size() == 0) { - return ret; - } - - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size(); - ret.resize(ret_size); - { - int *w = ret.ptrw(); - for (int i = 0; i < ret_size; i++) { - w[i] = int(attribs_ptr[i]); - } - } - return ret; -} - -Vector<float> EditorSceneImporterGLTF::_decode_accessor_as_floats(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<float> ret; - - if (attribs.size() == 0) { - return ret; - } - - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size(); - ret.resize(ret_size); - { - float *w = ret.ptrw(); - for (int i = 0; i < ret_size; i++) { - w[i] = float(attribs_ptr[i]); - } - } - return ret; -} - -Vector<Vector2> EditorSceneImporterGLTF::_decode_accessor_as_vec2(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Vector2> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 2 != 0, ret); - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size() / 2; - ret.resize(ret_size); - { - Vector2 *w = ret.ptrw(); - for (int i = 0; i < ret_size; i++) { - w[i] = Vector2(attribs_ptr[i * 2 + 0], attribs_ptr[i * 2 + 1]); - } - } - return ret; -} - -Vector<Vector3> EditorSceneImporterGLTF::_decode_accessor_as_vec3(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Vector3> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 3 != 0, ret); - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size() / 3; - ret.resize(ret_size); - { - Vector3 *w = ret.ptrw(); - for (int i = 0; i < ret_size; i++) { - w[i] = Vector3(attribs_ptr[i * 3 + 0], attribs_ptr[i * 3 + 1], attribs_ptr[i * 3 + 2]); - } - } - return ret; -} - -Vector<Color> EditorSceneImporterGLTF::_decode_accessor_as_color(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Color> ret; - - if (attribs.size() == 0) { - return ret; - } - - const int type = state.accessors[p_accessor].type; - ERR_FAIL_COND_V(!(type == TYPE_VEC3 || type == TYPE_VEC4), ret); - int vec_len = 3; - if (type == TYPE_VEC4) { - vec_len = 4; - } - - ERR_FAIL_COND_V(attribs.size() % vec_len != 0, ret); - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size() / vec_len; - ret.resize(ret_size); - { - Color *w = ret.ptrw(); - for (int i = 0; i < ret_size; i++) { - w[i] = Color(attribs_ptr[i * vec_len + 0], attribs_ptr[i * vec_len + 1], attribs_ptr[i * vec_len + 2], vec_len == 4 ? attribs_ptr[i * 4 + 3] : 1.0); - } - } - return ret; -} - -Vector<Quat> EditorSceneImporterGLTF::_decode_accessor_as_quat(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Quat> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 4 != 0, ret); - const double *attribs_ptr = attribs.ptr(); - const int ret_size = attribs.size() / 4; - ret.resize(ret_size); - { - for (int i = 0; i < ret_size; i++) { - ret.write[i] = Quat(attribs_ptr[i * 4 + 0], attribs_ptr[i * 4 + 1], attribs_ptr[i * 4 + 2], attribs_ptr[i * 4 + 3]).normalized(); - } - } - return ret; -} - -Vector<Transform2D> EditorSceneImporterGLTF::_decode_accessor_as_xform2d(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Transform2D> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 4 != 0, ret); - ret.resize(attribs.size() / 4); - for (int i = 0; i < ret.size(); i++) { - ret.write[i][0] = Vector2(attribs[i * 4 + 0], attribs[i * 4 + 1]); - ret.write[i][1] = Vector2(attribs[i * 4 + 2], attribs[i * 4 + 3]); - } - return ret; -} - -Vector<Basis> EditorSceneImporterGLTF::_decode_accessor_as_basis(GLTFState &state, const GLTFAccessorIndex p_accessor, bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Basis> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 9 != 0, ret); - ret.resize(attribs.size() / 9); - for (int i = 0; i < ret.size(); i++) { - ret.write[i].set_axis(0, Vector3(attribs[i * 9 + 0], attribs[i * 9 + 1], attribs[i * 9 + 2])); - ret.write[i].set_axis(1, Vector3(attribs[i * 9 + 3], attribs[i * 9 + 4], attribs[i * 9 + 5])); - ret.write[i].set_axis(2, Vector3(attribs[i * 9 + 6], attribs[i * 9 + 7], attribs[i * 9 + 8])); - } - return ret; -} - -Vector<Transform> EditorSceneImporterGLTF::_decode_accessor_as_xform(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { - const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); - Vector<Transform> ret; - - if (attribs.size() == 0) { - return ret; - } - - ERR_FAIL_COND_V(attribs.size() % 16 != 0, ret); - ret.resize(attribs.size() / 16); - for (int i = 0; i < ret.size(); i++) { - ret.write[i].basis.set_axis(0, Vector3(attribs[i * 16 + 0], attribs[i * 16 + 1], attribs[i * 16 + 2])); - ret.write[i].basis.set_axis(1, Vector3(attribs[i * 16 + 4], attribs[i * 16 + 5], attribs[i * 16 + 6])); - ret.write[i].basis.set_axis(2, Vector3(attribs[i * 16 + 8], attribs[i * 16 + 9], attribs[i * 16 + 10])); - ret.write[i].set_origin(Vector3(attribs[i * 16 + 12], attribs[i * 16 + 13], attribs[i * 16 + 14])); - } - return ret; -} - -Error EditorSceneImporterGLTF::_parse_meshes(GLTFState &state) { - if (!state.json.has("meshes")) { - return OK; - } - - Array meshes = state.json["meshes"]; - for (GLTFMeshIndex i = 0; i < meshes.size(); i++) { - print_verbose("glTF: Parsing mesh: " + itos(i)); - Dictionary d = meshes[i]; - - GLTFMesh mesh; - mesh.mesh.instance(); - bool has_vertex_color = false; - - ERR_FAIL_COND_V(!d.has("primitives"), ERR_PARSE_ERROR); - - Array primitives = d["primitives"]; - const Dictionary &extras = d.has("extras") ? (Dictionary)d["extras"] : Dictionary(); - - for (int j = 0; j < primitives.size(); j++) { - Dictionary p = primitives[j]; - - Array array; - array.resize(Mesh::ARRAY_MAX); - - ERR_FAIL_COND_V(!p.has("attributes"), ERR_PARSE_ERROR); - - Dictionary a = p["attributes"]; - - Mesh::PrimitiveType primitive = Mesh::PRIMITIVE_TRIANGLES; - if (p.has("mode")) { - const int mode = p["mode"]; - ERR_FAIL_INDEX_V(mode, 7, ERR_FILE_CORRUPT); - static const Mesh::PrimitiveType primitives2[7] = { - Mesh::PRIMITIVE_POINTS, - Mesh::PRIMITIVE_LINES, - Mesh::PRIMITIVE_LINES, //loop not supported, should ce converted - Mesh::PRIMITIVE_LINES, - Mesh::PRIMITIVE_TRIANGLES, - Mesh::PRIMITIVE_TRIANGLE_STRIP, - Mesh::PRIMITIVE_TRIANGLES, //fan not supported, should be converted -#ifndef _MSC_VER -#warning line loop and triangle fan are not supported and need to be converted to lines and triangles -#endif - - }; - - primitive = primitives2[mode]; - } - - ERR_FAIL_COND_V(!a.has("POSITION"), ERR_PARSE_ERROR); - if (a.has("POSITION")) { - array[Mesh::ARRAY_VERTEX] = _decode_accessor_as_vec3(state, a["POSITION"], true); - } - if (a.has("NORMAL")) { - array[Mesh::ARRAY_NORMAL] = _decode_accessor_as_vec3(state, a["NORMAL"], true); - } - if (a.has("TANGENT")) { - array[Mesh::ARRAY_TANGENT] = _decode_accessor_as_floats(state, a["TANGENT"], true); - } - if (a.has("TEXCOORD_0")) { - array[Mesh::ARRAY_TEX_UV] = _decode_accessor_as_vec2(state, a["TEXCOORD_0"], true); - } - if (a.has("TEXCOORD_1")) { - array[Mesh::ARRAY_TEX_UV2] = _decode_accessor_as_vec2(state, a["TEXCOORD_1"], true); - } - if (a.has("COLOR_0")) { - array[Mesh::ARRAY_COLOR] = _decode_accessor_as_color(state, a["COLOR_0"], true); - has_vertex_color = true; - } - if (a.has("JOINTS_0")) { - array[Mesh::ARRAY_BONES] = _decode_accessor_as_ints(state, a["JOINTS_0"], true); - } - if (a.has("WEIGHTS_0")) { - Vector<float> weights = _decode_accessor_as_floats(state, a["WEIGHTS_0"], true); - { //gltf does not seem to normalize the weights for some reason.. - int wc = weights.size(); - float *w = weights.ptrw(); - - for (int k = 0; k < wc; k += 4) { - float total = 0.0; - total += w[k + 0]; - total += w[k + 1]; - total += w[k + 2]; - total += w[k + 3]; - if (total > 0.0) { - w[k + 0] /= total; - w[k + 1] /= total; - w[k + 2] /= total; - w[k + 3] /= total; - } - } - } - array[Mesh::ARRAY_WEIGHTS] = weights; - } - - if (p.has("indices")) { - Vector<int> indices = _decode_accessor_as_ints(state, p["indices"], false); - - if (primitive == Mesh::PRIMITIVE_TRIANGLES) { - //swap around indices, convert ccw to cw for front face - - const int is = indices.size(); - int *w = indices.ptrw(); - for (int k = 0; k < is; k += 3) { - SWAP(w[k + 1], w[k + 2]); - } - } - array[Mesh::ARRAY_INDEX] = indices; - - } else if (primitive == Mesh::PRIMITIVE_TRIANGLES) { - //generate indices because they need to be swapped for CW/CCW - const Vector<Vector3> &vertices = array[Mesh::ARRAY_VERTEX]; - ERR_FAIL_COND_V(vertices.size() == 0, ERR_PARSE_ERROR); - Vector<int> indices; - const int vs = vertices.size(); - indices.resize(vs); - { - int *w = indices.ptrw(); - for (int k = 0; k < vs; k += 3) { - w[k] = k; - w[k + 1] = k + 2; - w[k + 2] = k + 1; - } - } - array[Mesh::ARRAY_INDEX] = indices; - } - - bool generate_tangents = (primitive == Mesh::PRIMITIVE_TRIANGLES && !a.has("TANGENT") && a.has("TEXCOORD_0") && a.has("NORMAL")); - - if (generate_tangents) { - //must generate mikktspace tangents.. ergh.. - Ref<SurfaceTool> st; - st.instance(); - st->create_from_triangle_arrays(array); - st->generate_tangents(); - array = st->commit_to_arrays(); - } - - Array morphs; - //blend shapes - if (p.has("targets")) { - print_verbose("glTF: Mesh has targets"); - const Array &targets = p["targets"]; - - //ideally BLEND_SHAPE_MODE_RELATIVE since gltf2 stores in displacement - //but it could require a larger refactor? - mesh.mesh->set_blend_shape_mode(Mesh::BLEND_SHAPE_MODE_NORMALIZED); - - if (j == 0) { - const Array &target_names = extras.has("targetNames") ? (Array)extras["targetNames"] : Array(); - for (int k = 0; k < targets.size(); k++) { - const String name = k < target_names.size() ? (String)target_names[k] : String("morph_") + itos(k); - mesh.mesh->add_blend_shape(name); - } - } - - for (int k = 0; k < targets.size(); k++) { - const Dictionary &t = targets[k]; - - Array array_copy; - array_copy.resize(Mesh::ARRAY_MAX); - - for (int l = 0; l < Mesh::ARRAY_MAX; l++) { - array_copy[l] = array[l]; - } - - array_copy[Mesh::ARRAY_INDEX] = Variant(); - - if (t.has("POSITION")) { - Vector<Vector3> varr = _decode_accessor_as_vec3(state, t["POSITION"], true); - const Vector<Vector3> src_varr = array[Mesh::ARRAY_VERTEX]; - const int size = src_varr.size(); - ERR_FAIL_COND_V(size == 0, ERR_PARSE_ERROR); - { - const int max_idx = varr.size(); - varr.resize(size); - - Vector3 *w_varr = varr.ptrw(); - const Vector3 *r_varr = varr.ptr(); - const Vector3 *r_src_varr = src_varr.ptr(); - for (int l = 0; l < size; l++) { - if (l < max_idx) { - w_varr[l] = r_varr[l] + r_src_varr[l]; - } else { - w_varr[l] = r_src_varr[l]; - } - } - } - array_copy[Mesh::ARRAY_VERTEX] = varr; - } - if (t.has("NORMAL")) { - Vector<Vector3> narr = _decode_accessor_as_vec3(state, t["NORMAL"], true); - const Vector<Vector3> src_narr = array[Mesh::ARRAY_NORMAL]; - int size = src_narr.size(); - ERR_FAIL_COND_V(size == 0, ERR_PARSE_ERROR); - { - int max_idx = narr.size(); - narr.resize(size); - - Vector3 *w_narr = narr.ptrw(); - const Vector3 *r_narr = narr.ptr(); - const Vector3 *r_src_narr = src_narr.ptr(); - for (int l = 0; l < size; l++) { - if (l < max_idx) { - w_narr[l] = r_narr[l] + r_src_narr[l]; - } else { - w_narr[l] = r_src_narr[l]; - } - } - } - array_copy[Mesh::ARRAY_NORMAL] = narr; - } - if (t.has("TANGENT")) { - const Vector<Vector3> tangents_v3 = _decode_accessor_as_vec3(state, t["TANGENT"], true); - const Vector<float> src_tangents = array[Mesh::ARRAY_TANGENT]; - ERR_FAIL_COND_V(src_tangents.size() == 0, ERR_PARSE_ERROR); - - Vector<float> tangents_v4; - - { - int max_idx = tangents_v3.size(); - - int size4 = src_tangents.size(); - tangents_v4.resize(size4); - float *w4 = tangents_v4.ptrw(); - - const Vector3 *r3 = tangents_v3.ptr(); - const float *r4 = src_tangents.ptr(); - - for (int l = 0; l < size4 / 4; l++) { - if (l < max_idx) { - w4[l * 4 + 0] = r3[l].x + r4[l * 4 + 0]; - w4[l * 4 + 1] = r3[l].y + r4[l * 4 + 1]; - w4[l * 4 + 2] = r3[l].z + r4[l * 4 + 2]; - } else { - w4[l * 4 + 0] = r4[l * 4 + 0]; - w4[l * 4 + 1] = r4[l * 4 + 1]; - w4[l * 4 + 2] = r4[l * 4 + 2]; - } - w4[l * 4 + 3] = r4[l * 4 + 3]; //copy flip value - } - } - - array_copy[Mesh::ARRAY_TANGENT] = tangents_v4; - } - - if (generate_tangents) { - Ref<SurfaceTool> st; - st.instance(); - st->create_from_triangle_arrays(array_copy); - st->deindex(); - st->generate_tangents(); - array_copy = st->commit_to_arrays(); - } - - morphs.push_back(array_copy); - } - } - - //just add it - - Ref<Material> mat; - if (p.has("material")) { - const int material = p["material"]; - ERR_FAIL_INDEX_V(material, state.materials.size(), ERR_FILE_CORRUPT); - Ref<StandardMaterial3D> mat3d = state.materials[material]; - if (has_vertex_color) { - mat3d->set_flag(StandardMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); - } - mat = mat3d; - - } else if (has_vertex_color) { - Ref<StandardMaterial3D> mat3d; - mat3d.instance(); - mat3d->set_flag(StandardMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); - mat = mat3d; - } - - mesh.mesh->add_surface(primitive, array, morphs, Dictionary(), mat); - } - - mesh.blend_weights.resize(mesh.mesh->get_blend_shape_count()); - for (int32_t weight_i = 0; weight_i < mesh.blend_weights.size(); weight_i++) { - mesh.blend_weights.write[weight_i] = 0.0f; - } - - if (d.has("weights")) { - const Array &weights = d["weights"]; - ERR_FAIL_COND_V(mesh.blend_weights.size() != weights.size(), ERR_PARSE_ERROR); - for (int j = 0; j < weights.size(); j++) { - mesh.blend_weights.write[j] = weights[j]; - } - } - - state.meshes.push_back(mesh); - } - - print_verbose("glTF: Total meshes: " + itos(state.meshes.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_images(GLTFState &state, const String &p_base_path) { - if (!state.json.has("images")) { - return OK; - } - - // Ref: https://github.com/KhronosGroup/glTF/blob/master/specification/2.0/README.md#images - - const Array &images = state.json["images"]; - for (int i = 0; i < images.size(); i++) { - const Dictionary &d = images[i]; - - // glTF 2.0 supports PNG and JPEG types, which can be specified as (from spec): - // "- a URI to an external file in one of the supported images formats, or - // - a URI with embedded base64-encoded data, or - // - a reference to a bufferView; in that case mimeType must be defined." - // Since mimeType is optional for external files and base64 data, we'll have to - // fall back on letting Godot parse the data to figure out if it's PNG or JPEG. - - // We'll assume that we use either URI or bufferView, so let's warn the user - // if their image somehow uses both. And fail if it has neither. - ERR_CONTINUE_MSG(!d.has("uri") && !d.has("bufferView"), "Invalid image definition in glTF file, it should specific an 'uri' or 'bufferView'."); - if (d.has("uri") && d.has("bufferView")) { - WARN_PRINT("Invalid image definition in glTF file using both 'uri' and 'bufferView'. 'bufferView' will take precedence."); - } - - String mimetype; - if (d.has("mimeType")) { // Should be "image/png" or "image/jpeg". - mimetype = d["mimeType"]; - } - - Vector<uint8_t> data; - const uint8_t *data_ptr = nullptr; - int data_size = 0; - - if (d.has("uri")) { - // Handles the first two bullet points from the spec (embedded data, or external file). - String uri = d["uri"]; - - if (uri.begins_with("data:")) { // Embedded data using base64. - // Validate data MIME types and throw a warning if it's one we don't know/support. - if (!uri.begins_with("data:application/octet-stream;base64") && - !uri.begins_with("data:application/gltf-buffer;base64") && - !uri.begins_with("data:image/png;base64") && - !uri.begins_with("data:image/jpeg;base64")) { - WARN_PRINT(vformat("glTF: Image index '%d' uses an unsupported URI data type: %s. Skipping it.", i, uri)); - state.images.push_back(Ref<Texture2D>()); // Placeholder to keep count. - continue; - } - data = _parse_base64_uri(uri); - data_ptr = data.ptr(); - data_size = data.size(); - // mimeType is optional, but if we have it defined in the URI, let's use it. - if (mimetype.empty()) { - if (uri.begins_with("data:image/png;base64")) { - mimetype = "image/png"; - } else if (uri.begins_with("data:image/jpeg;base64")) { - mimetype = "image/jpeg"; - } - } - } else { // Relative path to an external image file. - uri = p_base_path.plus_file(uri).replace("\\", "/"); // Fix for Windows. - // The spec says that if mimeType is defined, we should enforce it. - // So we should only rely on ResourceLoader::load if mimeType is not defined, - // otherwise we should use the same logic as for buffers. - if (mimetype == "image/png" || mimetype == "image/jpeg") { - // Load data buffer and rely on PNG and JPEG-specific logic below to load the image. - // This makes it possible to load a file with a wrong extension but correct MIME type, - // e.g. "foo.jpg" containing PNG data and with MIME type "image/png". ResourceLoader would fail. - data = FileAccess::get_file_as_array(uri); - ERR_FAIL_COND_V_MSG(data.size() == 0, ERR_PARSE_ERROR, "glTF: Couldn't load image file as an array: " + uri); - data_ptr = data.ptr(); - data_size = data.size(); - } else { - // Good old ResourceLoader will rely on file extension. - Ref<Texture2D> texture = ResourceLoader::load(uri); - state.images.push_back(texture); - continue; - } - } - } else if (d.has("bufferView")) { - // Handles the third bullet point from the spec (bufferView). - ERR_FAIL_COND_V_MSG(mimetype.empty(), ERR_FILE_CORRUPT, - vformat("glTF: Image index '%d' specifies 'bufferView' but no 'mimeType', which is invalid.", i)); - - const GLTFBufferViewIndex bvi = d["bufferView"]; - - ERR_FAIL_INDEX_V(bvi, state.buffer_views.size(), ERR_PARAMETER_RANGE_ERROR); - - const GLTFBufferView &bv = state.buffer_views[bvi]; - - const GLTFBufferIndex bi = bv.buffer; - ERR_FAIL_INDEX_V(bi, state.buffers.size(), ERR_PARAMETER_RANGE_ERROR); - - ERR_FAIL_COND_V(bv.byte_offset + bv.byte_length > state.buffers[bi].size(), ERR_FILE_CORRUPT); - - data_ptr = &state.buffers[bi][bv.byte_offset]; - data_size = bv.byte_length; - } - - Ref<Image> img; - - if (mimetype == "image/png") { // Load buffer as PNG. - ERR_FAIL_COND_V(Image::_png_mem_loader_func == nullptr, ERR_UNAVAILABLE); - img = Image::_png_mem_loader_func(data_ptr, data_size); - } else if (mimetype == "image/jpeg") { // Loader buffer as JPEG. - ERR_FAIL_COND_V(Image::_jpg_mem_loader_func == nullptr, ERR_UNAVAILABLE); - img = Image::_jpg_mem_loader_func(data_ptr, data_size); - } else { - // We can land here if we got an URI with base64-encoded data with application/* MIME type, - // and the optional mimeType property was not defined to tell us how to handle this data (or was invalid). - // So let's try PNG first, then JPEG. - ERR_FAIL_COND_V(Image::_png_mem_loader_func == nullptr, ERR_UNAVAILABLE); - img = Image::_png_mem_loader_func(data_ptr, data_size); - if (img.is_null()) { - ERR_FAIL_COND_V(Image::_jpg_mem_loader_func == nullptr, ERR_UNAVAILABLE); - img = Image::_jpg_mem_loader_func(data_ptr, data_size); - } - } - - ERR_FAIL_COND_V_MSG(img.is_null(), ERR_FILE_CORRUPT, - vformat("glTF: Couldn't load image index '%d' with its given mimetype: %s.", i, mimetype)); - - Ref<ImageTexture> t; - t.instance(); - t->create_from_image(img); - - state.images.push_back(t); - } - - print_verbose("glTF: Total images: " + itos(state.images.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_textures(GLTFState &state) { - if (!state.json.has("textures")) { - return OK; - } - - const Array &textures = state.json["textures"]; - for (GLTFTextureIndex i = 0; i < textures.size(); i++) { - const Dictionary &d = textures[i]; - - ERR_FAIL_COND_V(!d.has("source"), ERR_PARSE_ERROR); - - GLTFTexture t; - t.src_image = d["source"]; - state.textures.push_back(t); - } - - return OK; -} - -Ref<Texture2D> EditorSceneImporterGLTF::_get_texture(GLTFState &state, const GLTFTextureIndex p_texture) { - ERR_FAIL_INDEX_V(p_texture, state.textures.size(), Ref<Texture2D>()); - const GLTFImageIndex image = state.textures[p_texture].src_image; - - ERR_FAIL_INDEX_V(image, state.images.size(), Ref<Texture2D>()); - - return state.images[image]; -} - -Error EditorSceneImporterGLTF::_parse_materials(GLTFState &state) { - if (!state.json.has("materials")) { - return OK; - } - - const Array &materials = state.json["materials"]; - for (GLTFMaterialIndex i = 0; i < materials.size(); i++) { - const Dictionary &d = materials[i]; - - Ref<StandardMaterial3D> material; - material.instance(); - if (d.has("name")) { - material->set_name(d["name"]); - } - //don't do this here only if vertex color exists - //material->set_flag(StandardMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); - - if (d.has("pbrMetallicRoughness")) { - const Dictionary &mr = d["pbrMetallicRoughness"]; - if (mr.has("baseColorFactor")) { - const Array &arr = mr["baseColorFactor"]; - ERR_FAIL_COND_V(arr.size() != 4, ERR_PARSE_ERROR); - const Color c = Color(arr[0], arr[1], arr[2], arr[3]).to_srgb(); - - material->set_albedo(c); - } - - if (mr.has("baseColorTexture")) { - const Dictionary &bct = mr["baseColorTexture"]; - if (bct.has("index")) { - material->set_texture(StandardMaterial3D::TEXTURE_ALBEDO, _get_texture(state, bct["index"])); - } - if (!mr.has("baseColorFactor")) { - material->set_albedo(Color(1, 1, 1)); - } - } - - if (mr.has("metallicFactor")) { - material->set_metallic(mr["metallicFactor"]); - } else { - material->set_metallic(1.0); - } - - if (mr.has("roughnessFactor")) { - material->set_roughness(mr["roughnessFactor"]); - } else { - material->set_roughness(1.0); - } - - if (mr.has("metallicRoughnessTexture")) { - const Dictionary &bct = mr["metallicRoughnessTexture"]; - if (bct.has("index")) { - const Ref<Texture2D> t = _get_texture(state, bct["index"]); - material->set_texture(StandardMaterial3D::TEXTURE_METALLIC, t); - material->set_metallic_texture_channel(StandardMaterial3D::TEXTURE_CHANNEL_BLUE); - material->set_texture(StandardMaterial3D::TEXTURE_ROUGHNESS, t); - material->set_roughness_texture_channel(StandardMaterial3D::TEXTURE_CHANNEL_GREEN); - if (!mr.has("metallicFactor")) { - material->set_metallic(1); - } - if (!mr.has("roughnessFactor")) { - material->set_roughness(1); - } - } - } - } - - if (d.has("normalTexture")) { - const Dictionary &bct = d["normalTexture"]; - if (bct.has("index")) { - material->set_texture(StandardMaterial3D::TEXTURE_NORMAL, _get_texture(state, bct["index"])); - material->set_feature(StandardMaterial3D::FEATURE_NORMAL_MAPPING, true); - } - if (bct.has("scale")) { - material->set_normal_scale(bct["scale"]); - } - } - if (d.has("occlusionTexture")) { - const Dictionary &bct = d["occlusionTexture"]; - if (bct.has("index")) { - material->set_texture(StandardMaterial3D::TEXTURE_AMBIENT_OCCLUSION, _get_texture(state, bct["index"])); - material->set_ao_texture_channel(StandardMaterial3D::TEXTURE_CHANNEL_RED); - material->set_feature(StandardMaterial3D::FEATURE_AMBIENT_OCCLUSION, true); - } - } - - if (d.has("emissiveFactor")) { - const Array &arr = d["emissiveFactor"]; - ERR_FAIL_COND_V(arr.size() != 3, ERR_PARSE_ERROR); - const Color c = Color(arr[0], arr[1], arr[2]).to_srgb(); - material->set_feature(StandardMaterial3D::FEATURE_EMISSION, true); - - material->set_emission(c); - } - - if (d.has("emissiveTexture")) { - const Dictionary &bct = d["emissiveTexture"]; - if (bct.has("index")) { - material->set_texture(StandardMaterial3D::TEXTURE_EMISSION, _get_texture(state, bct["index"])); - material->set_feature(StandardMaterial3D::FEATURE_EMISSION, true); - material->set_emission(Color(0, 0, 0)); - } - } - - if (d.has("doubleSided")) { - const bool ds = d["doubleSided"]; - if (ds) { - material->set_cull_mode(StandardMaterial3D::CULL_DISABLED); - } - } - - if (d.has("alphaMode")) { - const String &am = d["alphaMode"]; - if (am == "BLEND") { - material->set_transparency(StandardMaterial3D::TRANSPARENCY_ALPHA_DEPTH_PRE_PASS); - } else if (am == "MASK") { - material->set_transparency(StandardMaterial3D::TRANSPARENCY_ALPHA_SCISSOR); - if (d.has("alphaCutoff")) { - material->set_alpha_scissor_threshold(d["alphaCutoff"]); - } else { - material->set_alpha_scissor_threshold(0.5f); - } - } - } - - state.materials.push_back(material); - } - - print_verbose("glTF: Total materials: " + itos(state.materials.size())); - - return OK; -} - -EditorSceneImporterGLTF::GLTFNodeIndex EditorSceneImporterGLTF::_find_highest_node(GLTFState &state, const Vector<GLTFNodeIndex> &subset) { - int highest = -1; - GLTFNodeIndex best_node = -1; - - for (int i = 0; i < subset.size(); ++i) { - const GLTFNodeIndex node_i = subset[i]; - const GLTFNode *node = state.nodes[node_i]; - - if (highest == -1 || node->height < highest) { - highest = node->height; - best_node = node_i; - } - } - - return best_node; -} - -bool EditorSceneImporterGLTF::_capture_nodes_in_skin(GLTFState &state, GLTFSkin &skin, const GLTFNodeIndex node_index) { - bool found_joint = false; - - for (int i = 0; i < state.nodes[node_index]->children.size(); ++i) { - found_joint |= _capture_nodes_in_skin(state, skin, state.nodes[node_index]->children[i]); - } - - if (found_joint) { - // Mark it if we happen to find another skins joint... - if (state.nodes[node_index]->joint && skin.joints.find(node_index) < 0) { - skin.joints.push_back(node_index); - } else if (skin.non_joints.find(node_index) < 0) { - skin.non_joints.push_back(node_index); - } - } - - if (skin.joints.find(node_index) > 0) { - return true; - } - - return false; -} - -void EditorSceneImporterGLTF::_capture_nodes_for_multirooted_skin(GLTFState &state, GLTFSkin &skin) { - DisjointSet<GLTFNodeIndex> disjoint_set; - - for (int i = 0; i < skin.joints.size(); ++i) { - const GLTFNodeIndex node_index = skin.joints[i]; - const GLTFNodeIndex parent = state.nodes[node_index]->parent; - disjoint_set.insert(node_index); - - if (skin.joints.find(parent) >= 0) { - disjoint_set.create_union(parent, node_index); - } - } - - Vector<GLTFNodeIndex> roots; - disjoint_set.get_representatives(roots); - - if (roots.size() <= 1) { - return; - } - - int maxHeight = -1; - - // Determine the max height rooted tree - for (int i = 0; i < roots.size(); ++i) { - const GLTFNodeIndex root = roots[i]; - - if (maxHeight == -1 || state.nodes[root]->height < maxHeight) { - maxHeight = state.nodes[root]->height; - } - } - - // Go up the tree till all of the multiple roots of the skin are at the same hierarchy level. - // This sucks, but 99% of all game engines (not just Godot) would have this same issue. - for (int i = 0; i < roots.size(); ++i) { - GLTFNodeIndex current_node = roots[i]; - while (state.nodes[current_node]->height > maxHeight) { - GLTFNodeIndex parent = state.nodes[current_node]->parent; - - if (state.nodes[parent]->joint && skin.joints.find(parent) < 0) { - skin.joints.push_back(parent); - } else if (skin.non_joints.find(parent) < 0) { - skin.non_joints.push_back(parent); - } - - current_node = parent; - } - - // replace the roots - roots.write[i] = current_node; - } - - // Climb up the tree until they all have the same parent - bool all_same; - - do { - all_same = true; - const GLTFNodeIndex first_parent = state.nodes[roots[0]]->parent; - - for (int i = 1; i < roots.size(); ++i) { - all_same &= (first_parent == state.nodes[roots[i]]->parent); - } - - if (!all_same) { - for (int i = 0; i < roots.size(); ++i) { - const GLTFNodeIndex current_node = roots[i]; - const GLTFNodeIndex parent = state.nodes[current_node]->parent; - - if (state.nodes[parent]->joint && skin.joints.find(parent) < 0) { - skin.joints.push_back(parent); - } else if (skin.non_joints.find(parent) < 0) { - skin.non_joints.push_back(parent); - } - - roots.write[i] = parent; - } - } - - } while (!all_same); -} - -Error EditorSceneImporterGLTF::_expand_skin(GLTFState &state, GLTFSkin &skin) { - _capture_nodes_for_multirooted_skin(state, skin); - - // Grab all nodes that lay in between skin joints/nodes - DisjointSet<GLTFNodeIndex> disjoint_set; - - Vector<GLTFNodeIndex> all_skin_nodes; - all_skin_nodes.append_array(skin.joints); - all_skin_nodes.append_array(skin.non_joints); - - for (int i = 0; i < all_skin_nodes.size(); ++i) { - const GLTFNodeIndex node_index = all_skin_nodes[i]; - const GLTFNodeIndex parent = state.nodes[node_index]->parent; - disjoint_set.insert(node_index); - - if (all_skin_nodes.find(parent) >= 0) { - disjoint_set.create_union(parent, node_index); - } - } - - Vector<GLTFNodeIndex> out_owners; - disjoint_set.get_representatives(out_owners); - - Vector<GLTFNodeIndex> out_roots; - - for (int i = 0; i < out_owners.size(); ++i) { - Vector<GLTFNodeIndex> set; - disjoint_set.get_members(set, out_owners[i]); - - const GLTFNodeIndex root = _find_highest_node(state, set); - ERR_FAIL_COND_V(root < 0, FAILED); - out_roots.push_back(root); - } - - out_roots.sort(); - - for (int i = 0; i < out_roots.size(); ++i) { - _capture_nodes_in_skin(state, skin, out_roots[i]); - } - - skin.roots = out_roots; - - return OK; -} - -Error EditorSceneImporterGLTF::_verify_skin(GLTFState &state, GLTFSkin &skin) { - // This may seem duplicated from expand_skins, but this is really a sanity check! (so it kinda is) - // In case additional interpolating logic is added to the skins, this will help ensure that you - // do not cause it to self implode into a fiery blaze - - // We are going to re-calculate the root nodes and compare them to the ones saved in the skin, - // then ensure the multiple trees (if they exist) are on the same sublevel - - // Grab all nodes that lay in between skin joints/nodes - DisjointSet<GLTFNodeIndex> disjoint_set; - - Vector<GLTFNodeIndex> all_skin_nodes; - all_skin_nodes.append_array(skin.joints); - all_skin_nodes.append_array(skin.non_joints); - - for (int i = 0; i < all_skin_nodes.size(); ++i) { - const GLTFNodeIndex node_index = all_skin_nodes[i]; - const GLTFNodeIndex parent = state.nodes[node_index]->parent; - disjoint_set.insert(node_index); - - if (all_skin_nodes.find(parent) >= 0) { - disjoint_set.create_union(parent, node_index); - } - } - - Vector<GLTFNodeIndex> out_owners; - disjoint_set.get_representatives(out_owners); - - Vector<GLTFNodeIndex> out_roots; - - for (int i = 0; i < out_owners.size(); ++i) { - Vector<GLTFNodeIndex> set; - disjoint_set.get_members(set, out_owners[i]); - - const GLTFNodeIndex root = _find_highest_node(state, set); - ERR_FAIL_COND_V(root < 0, FAILED); - out_roots.push_back(root); - } - - out_roots.sort(); - - ERR_FAIL_COND_V(out_roots.size() == 0, FAILED); - - // Make sure the roots are the exact same (they better be) - ERR_FAIL_COND_V(out_roots.size() != skin.roots.size(), FAILED); - for (int i = 0; i < out_roots.size(); ++i) { - ERR_FAIL_COND_V(out_roots[i] != skin.roots[i], FAILED); - } - - // Single rooted skin? Perfectly ok! - if (out_roots.size() == 1) { - return OK; - } - - // Make sure all parents of a multi-rooted skin are the SAME - const GLTFNodeIndex parent = state.nodes[out_roots[0]]->parent; - for (int i = 1; i < out_roots.size(); ++i) { - if (state.nodes[out_roots[i]]->parent != parent) { - return FAILED; - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_skins(GLTFState &state) { - if (!state.json.has("skins")) { - return OK; - } - - const Array &skins = state.json["skins"]; - - // Create the base skins, and mark nodes that are joints - for (int i = 0; i < skins.size(); i++) { - const Dictionary &d = skins[i]; - - GLTFSkin skin; - - ERR_FAIL_COND_V(!d.has("joints"), ERR_PARSE_ERROR); - - const Array &joints = d["joints"]; - - if (d.has("inverseBindMatrices")) { - skin.inverse_binds = _decode_accessor_as_xform(state, d["inverseBindMatrices"], false); - ERR_FAIL_COND_V(skin.inverse_binds.size() != joints.size(), ERR_PARSE_ERROR); - } - - for (int j = 0; j < joints.size(); j++) { - const GLTFNodeIndex node = joints[j]; - ERR_FAIL_INDEX_V(node, state.nodes.size(), ERR_PARSE_ERROR); - - skin.joints.push_back(node); - skin.joints_original.push_back(node); - - state.nodes[node]->joint = true; - } - - if (d.has("name")) { - skin.name = d["name"]; - } - - if (d.has("skeleton")) { - skin.skin_root = d["skeleton"]; - } - - state.skins.push_back(skin); - } - - for (GLTFSkinIndex i = 0; i < state.skins.size(); ++i) { - GLTFSkin &skin = state.skins.write[i]; - - // Expand the skin to capture all the extra non-joints that lie in between the actual joints, - // and expand the hierarchy to ensure multi-rooted trees lie on the same height level - ERR_FAIL_COND_V(_expand_skin(state, skin), ERR_PARSE_ERROR); - ERR_FAIL_COND_V(_verify_skin(state, skin), ERR_PARSE_ERROR); - } - - print_verbose("glTF: Total skins: " + itos(state.skins.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_determine_skeletons(GLTFState &state) { - // Using a disjoint set, we are going to potentially combine all skins that are actually branches - // of a main skeleton, or treat skins defining the same set of nodes as ONE skeleton. - // This is another unclear issue caused by the current glTF specification. - - DisjointSet<GLTFNodeIndex> skeleton_sets; - - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - const GLTFSkin &skin = state.skins[skin_i]; - - Vector<GLTFNodeIndex> all_skin_nodes; - all_skin_nodes.append_array(skin.joints); - all_skin_nodes.append_array(skin.non_joints); - - for (int i = 0; i < all_skin_nodes.size(); ++i) { - const GLTFNodeIndex node_index = all_skin_nodes[i]; - const GLTFNodeIndex parent = state.nodes[node_index]->parent; - skeleton_sets.insert(node_index); - - if (all_skin_nodes.find(parent) >= 0) { - skeleton_sets.create_union(parent, node_index); - } - } - - // We are going to connect the separate skin subtrees in each skin together - // so that the final roots are entire sets of valid skin trees - for (int i = 1; i < skin.roots.size(); ++i) { - skeleton_sets.create_union(skin.roots[0], skin.roots[i]); - } - } - - { // attempt to joint all touching subsets (siblings/parent are part of another skin) - Vector<GLTFNodeIndex> groups_representatives; - skeleton_sets.get_representatives(groups_representatives); - - Vector<GLTFNodeIndex> highest_group_members; - Vector<Vector<GLTFNodeIndex>> groups; - for (int i = 0; i < groups_representatives.size(); ++i) { - Vector<GLTFNodeIndex> group; - skeleton_sets.get_members(group, groups_representatives[i]); - highest_group_members.push_back(_find_highest_node(state, group)); - groups.push_back(group); - } - - for (int i = 0; i < highest_group_members.size(); ++i) { - const GLTFNodeIndex node_i = highest_group_members[i]; - - // Attach any siblings together (this needs to be done n^2/2 times) - for (int j = i + 1; j < highest_group_members.size(); ++j) { - const GLTFNodeIndex node_j = highest_group_members[j]; - - // Even if they are siblings under the root! :) - if (state.nodes[node_i]->parent == state.nodes[node_j]->parent) { - skeleton_sets.create_union(node_i, node_j); - } - } - - // Attach any parenting going on together (we need to do this n^2 times) - const GLTFNodeIndex node_i_parent = state.nodes[node_i]->parent; - if (node_i_parent >= 0) { - for (int j = 0; j < groups.size() && i != j; ++j) { - const Vector<GLTFNodeIndex> &group = groups[j]; - - if (group.find(node_i_parent) >= 0) { - const GLTFNodeIndex node_j = highest_group_members[j]; - skeleton_sets.create_union(node_i, node_j); - } - } - } - } - } - - // At this point, the skeleton groups should be finalized - Vector<GLTFNodeIndex> skeleton_owners; - skeleton_sets.get_representatives(skeleton_owners); - - // Mark all the skins actual skeletons, after we have merged them - for (GLTFSkeletonIndex skel_i = 0; skel_i < skeleton_owners.size(); ++skel_i) { - const GLTFNodeIndex skeleton_owner = skeleton_owners[skel_i]; - GLTFSkeleton skeleton; - - Vector<GLTFNodeIndex> skeleton_nodes; - skeleton_sets.get_members(skeleton_nodes, skeleton_owner); - - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - GLTFSkin &skin = state.skins.write[skin_i]; - - // If any of the the skeletons nodes exist in a skin, that skin now maps to the skeleton - for (int i = 0; i < skeleton_nodes.size(); ++i) { - GLTFNodeIndex skel_node_i = skeleton_nodes[i]; - if (skin.joints.find(skel_node_i) >= 0 || skin.non_joints.find(skel_node_i) >= 0) { - skin.skeleton = skel_i; - continue; - } - } - } - - Vector<GLTFNodeIndex> non_joints; - for (int i = 0; i < skeleton_nodes.size(); ++i) { - const GLTFNodeIndex node_i = skeleton_nodes[i]; - - if (state.nodes[node_i]->joint) { - skeleton.joints.push_back(node_i); - } else { - non_joints.push_back(node_i); - } - } - - state.skeletons.push_back(skeleton); - - _reparent_non_joint_skeleton_subtrees(state, state.skeletons.write[skel_i], non_joints); - } - - for (GLTFSkeletonIndex skel_i = 0; skel_i < state.skeletons.size(); ++skel_i) { - GLTFSkeleton &skeleton = state.skeletons.write[skel_i]; - - for (int i = 0; i < skeleton.joints.size(); ++i) { - const GLTFNodeIndex node_i = skeleton.joints[i]; - GLTFNode *node = state.nodes[node_i]; - - ERR_FAIL_COND_V(!node->joint, ERR_PARSE_ERROR); - ERR_FAIL_COND_V(node->skeleton >= 0, ERR_PARSE_ERROR); - node->skeleton = skel_i; - } - - ERR_FAIL_COND_V(_determine_skeleton_roots(state, skel_i), ERR_PARSE_ERROR); - } - - return OK; -} - -Error EditorSceneImporterGLTF::_reparent_non_joint_skeleton_subtrees(GLTFState &state, GLTFSkeleton &skeleton, const Vector<GLTFNodeIndex> &non_joints) { - DisjointSet<GLTFNodeIndex> subtree_set; - - // Populate the disjoint set with ONLY non joints that are in the skeleton hierarchy (non_joints vector) - // This way we can find any joints that lie in between joints, as the current glTF specification - // mentions nothing about non-joints being in between joints of the same skin. Hopefully one day we - // can remove this code. - - // skinD depicted here explains this issue: - // https://github.com/KhronosGroup/glTF-Asset-Generator/blob/master/Output/Positive/Animation_Skin - - for (int i = 0; i < non_joints.size(); ++i) { - const GLTFNodeIndex node_i = non_joints[i]; - - subtree_set.insert(node_i); - - const GLTFNodeIndex parent_i = state.nodes[node_i]->parent; - if (parent_i >= 0 && non_joints.find(parent_i) >= 0 && !state.nodes[parent_i]->joint) { - subtree_set.create_union(parent_i, node_i); - } - } - - // Find all the non joint subtrees and re-parent them to a new "fake" joint - - Vector<GLTFNodeIndex> non_joint_subtree_roots; - subtree_set.get_representatives(non_joint_subtree_roots); - - for (int root_i = 0; root_i < non_joint_subtree_roots.size(); ++root_i) { - const GLTFNodeIndex subtree_root = non_joint_subtree_roots[root_i]; - - Vector<GLTFNodeIndex> subtree_nodes; - subtree_set.get_members(subtree_nodes, subtree_root); - - for (int subtree_i = 0; subtree_i < subtree_nodes.size(); ++subtree_i) { - ERR_FAIL_COND_V(_reparent_to_fake_joint(state, skeleton, subtree_nodes[subtree_i]), FAILED); - - // We modified the tree, recompute all the heights - _compute_node_heights(state); - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_reparent_to_fake_joint(GLTFState &state, GLTFSkeleton &skeleton, const GLTFNodeIndex node_index) { - GLTFNode *node = state.nodes[node_index]; - - // Can we just "steal" this joint if it is just a spatial node? - if (node->skin < 0 && node->mesh < 0 && node->camera < 0) { - node->joint = true; - // Add the joint to the skeletons joints - skeleton.joints.push_back(node_index); - return OK; - } - - GLTFNode *fake_joint = memnew(GLTFNode); - const GLTFNodeIndex fake_joint_index = state.nodes.size(); - state.nodes.push_back(fake_joint); - - // We better not be a joint, or we messed up in our logic - if (node->joint) { - return FAILED; - } - - fake_joint->translation = node->translation; - fake_joint->rotation = node->rotation; - fake_joint->scale = node->scale; - fake_joint->xform = node->xform; - fake_joint->joint = true; - - // We can use the exact same name here, because the joint will be inside a skeleton and not the scene - fake_joint->name = node->name; - - // Clear the nodes transforms, since it will be parented to the fake joint - node->translation = Vector3(0, 0, 0); - node->rotation = Quat(); - node->scale = Vector3(1, 1, 1); - node->xform = Transform(); - - // Transfer the node children to the fake joint - for (int child_i = 0; child_i < node->children.size(); ++child_i) { - GLTFNode *child = state.nodes[node->children[child_i]]; - child->parent = fake_joint_index; - } - - fake_joint->children = node->children; - node->children.clear(); - - // add the fake joint to the parent and remove the original joint - if (node->parent >= 0) { - GLTFNode *parent = state.nodes[node->parent]; - parent->children.erase(node_index); - parent->children.push_back(fake_joint_index); - fake_joint->parent = node->parent; - } - - // Add the node to the fake joint - fake_joint->children.push_back(node_index); - node->parent = fake_joint_index; - node->fake_joint_parent = fake_joint_index; - - // Add the fake joint to the skeletons joints - skeleton.joints.push_back(fake_joint_index); - - // Replace skin_skeletons with fake joints if we must. - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - GLTFSkin &skin = state.skins.write[skin_i]; - if (skin.skin_root == node_index) { - skin.skin_root = fake_joint_index; - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_determine_skeleton_roots(GLTFState &state, const GLTFSkeletonIndex skel_i) { - DisjointSet<GLTFNodeIndex> disjoint_set; - - for (GLTFNodeIndex i = 0; i < state.nodes.size(); ++i) { - const GLTFNode *node = state.nodes[i]; - - if (node->skeleton != skel_i) { - continue; - } - - disjoint_set.insert(i); - - if (node->parent >= 0 && state.nodes[node->parent]->skeleton == skel_i) { - disjoint_set.create_union(node->parent, i); - } - } - - GLTFSkeleton &skeleton = state.skeletons.write[skel_i]; - - Vector<GLTFNodeIndex> owners; - disjoint_set.get_representatives(owners); - - Vector<GLTFNodeIndex> roots; - - for (int i = 0; i < owners.size(); ++i) { - Vector<GLTFNodeIndex> set; - disjoint_set.get_members(set, owners[i]); - const GLTFNodeIndex root = _find_highest_node(state, set); - ERR_FAIL_COND_V(root < 0, FAILED); - roots.push_back(root); - } - - roots.sort(); - - skeleton.roots = roots; - - if (roots.size() == 0) { - return FAILED; - } else if (roots.size() == 1) { - return OK; - } - - // Check that the subtrees have the same parent root - const GLTFNodeIndex parent = state.nodes[roots[0]]->parent; - for (int i = 1; i < roots.size(); ++i) { - if (state.nodes[roots[i]]->parent != parent) { - return FAILED; - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_create_skeletons(GLTFState &state) { - for (GLTFSkeletonIndex skel_i = 0; skel_i < state.skeletons.size(); ++skel_i) { - GLTFSkeleton &gltf_skeleton = state.skeletons.write[skel_i]; - - Skeleton3D *skeleton = memnew(Skeleton3D); - gltf_skeleton.godot_skeleton = skeleton; - - // Make a unique name, no gltf node represents this skeleton - skeleton->set_name(_gen_unique_name(state, "Skeleton")); - - List<GLTFNodeIndex> bones; - - for (int i = 0; i < gltf_skeleton.roots.size(); ++i) { - bones.push_back(gltf_skeleton.roots[i]); - } - - // Make the skeleton creation deterministic by going through the roots in - // a sorted order, and DEPTH FIRST - bones.sort(); - - while (!bones.empty()) { - const GLTFNodeIndex node_i = bones.front()->get(); - bones.pop_front(); - - GLTFNode *node = state.nodes[node_i]; - ERR_FAIL_COND_V(node->skeleton != skel_i, FAILED); - - { // Add all child nodes to the stack (deterministically) - Vector<GLTFNodeIndex> child_nodes; - for (int i = 0; i < node->children.size(); ++i) { - const GLTFNodeIndex child_i = node->children[i]; - if (state.nodes[child_i]->skeleton == skel_i) { - child_nodes.push_back(child_i); - } - } - - // Depth first insertion - child_nodes.sort(); - for (int i = child_nodes.size() - 1; i >= 0; --i) { - bones.push_front(child_nodes[i]); - } - } - - const int bone_index = skeleton->get_bone_count(); - - if (node->name.empty()) { - node->name = "bone"; - } - - node->name = _gen_unique_bone_name(state, skel_i, node->name); - - skeleton->add_bone(node->name); - skeleton->set_bone_rest(bone_index, node->xform); - - if (node->parent >= 0 && state.nodes[node->parent]->skeleton == skel_i) { - const int bone_parent = skeleton->find_bone(state.nodes[node->parent]->name); - ERR_FAIL_COND_V(bone_parent < 0, FAILED); - skeleton->set_bone_parent(bone_index, skeleton->find_bone(state.nodes[node->parent]->name)); - } - - state.scene_nodes.insert(node_i, skeleton); - } - } - - ERR_FAIL_COND_V(_map_skin_joints_indices_to_skeleton_bone_indices(state), ERR_PARSE_ERROR); - - return OK; -} - -Error EditorSceneImporterGLTF::_map_skin_joints_indices_to_skeleton_bone_indices(GLTFState &state) { - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - GLTFSkin &skin = state.skins.write[skin_i]; - - const GLTFSkeleton &skeleton = state.skeletons[skin.skeleton]; - - for (int joint_index = 0; joint_index < skin.joints_original.size(); ++joint_index) { - const GLTFNodeIndex node_i = skin.joints_original[joint_index]; - const GLTFNode *node = state.nodes[node_i]; - - skin.joint_i_to_name.insert(joint_index, node->name); - - const int bone_index = skeleton.godot_skeleton->find_bone(node->name); - ERR_FAIL_COND_V(bone_index < 0, FAILED); - - skin.joint_i_to_bone_i.insert(joint_index, bone_index); - } - } - - return OK; -} - -Error EditorSceneImporterGLTF::_create_skins(GLTFState &state) { - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - GLTFSkin &gltf_skin = state.skins.write[skin_i]; - - Ref<Skin> skin; - skin.instance(); - - // Some skins don't have IBM's! What absolute monsters! - const bool has_ibms = !gltf_skin.inverse_binds.empty(); - - for (int joint_i = 0; joint_i < gltf_skin.joints_original.size(); ++joint_i) { - Transform xform; - if (has_ibms) { - xform = gltf_skin.inverse_binds[joint_i]; - } - - if (state.use_named_skin_binds) { - StringName name = gltf_skin.joint_i_to_name[joint_i]; - skin->add_named_bind(name, xform); - } else { - int bone_i = gltf_skin.joint_i_to_bone_i[joint_i]; - skin->add_bind(bone_i, xform); - } - } - - gltf_skin.godot_skin = skin; - } - - // Purge the duplicates! - _remove_duplicate_skins(state); - - // Create unique names now, after removing duplicates - for (GLTFSkinIndex skin_i = 0; skin_i < state.skins.size(); ++skin_i) { - Ref<Skin> skin = state.skins[skin_i].godot_skin; - if (skin->get_name().empty()) { - // Make a unique name, no gltf node represents this skin - skin->set_name(_gen_unique_name(state, "Skin")); - } - } - - return OK; -} - -bool EditorSceneImporterGLTF::_skins_are_same(const Ref<Skin> &skin_a, const Ref<Skin> &skin_b) { - if (skin_a->get_bind_count() != skin_b->get_bind_count()) { - return false; - } - - for (int i = 0; i < skin_a->get_bind_count(); ++i) { - if (skin_a->get_bind_bone(i) != skin_b->get_bind_bone(i)) { - return false; - } - - Transform a_xform = skin_a->get_bind_pose(i); - Transform b_xform = skin_b->get_bind_pose(i); - - if (a_xform != b_xform) { - return false; - } - } - - return true; -} - -void EditorSceneImporterGLTF::_remove_duplicate_skins(GLTFState &state) { - for (int i = 0; i < state.skins.size(); ++i) { - for (int j = i + 1; j < state.skins.size(); ++j) { - const Ref<Skin> &skin_i = state.skins[i].godot_skin; - const Ref<Skin> &skin_j = state.skins[j].godot_skin; - - if (_skins_are_same(skin_i, skin_j)) { - // replace it and delete the old - state.skins.write[j].godot_skin = skin_i; - } - } - } -} - -Error EditorSceneImporterGLTF::_parse_lights(GLTFState &state) { - if (!state.json.has("extensions")) { - return OK; - } - Dictionary extensions = state.json["extensions"]; - if (!extensions.has("KHR_lights_punctual")) { - return OK; - } - Dictionary lights_punctual = extensions["KHR_lights_punctual"]; - if (!lights_punctual.has("lights")) { - return OK; - } - - const Array &lights = lights_punctual["lights"]; - - for (GLTFLightIndex light_i = 0; light_i < lights.size(); light_i++) { - const Dictionary &d = lights[light_i]; - - GLTFLight light; - ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); - const String &type = d["type"]; - light.type = type; - - if (d.has("color")) { - const Array &arr = d["color"]; - ERR_FAIL_COND_V(arr.size() != 3, ERR_PARSE_ERROR); - const Color c = Color(arr[0], arr[1], arr[2]).to_srgb(); - light.color = c; - } - if (d.has("intensity")) { - light.intensity = d["intensity"]; - } - if (d.has("range")) { - light.range = d["range"]; - } - if (type == "spot") { - const Dictionary &spot = d["spot"]; - light.inner_cone_angle = spot["innerConeAngle"]; - light.outer_cone_angle = spot["outerConeAngle"]; - ERR_FAIL_COND_V_MSG(light.inner_cone_angle >= light.outer_cone_angle, ERR_PARSE_ERROR, "The inner angle must be smaller than the outer angle."); - } else if (type != "point" && type != "directional") { - ERR_FAIL_V_MSG(ERR_PARSE_ERROR, "Light type is unknown."); - } - - state.lights.push_back(light); - } - - print_verbose("glTF: Total lights: " + itos(state.lights.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_cameras(GLTFState &state) { - if (!state.json.has("cameras")) { - return OK; - } - - const Array &cameras = state.json["cameras"]; - - for (GLTFCameraIndex i = 0; i < cameras.size(); i++) { - const Dictionary &d = cameras[i]; - - GLTFCamera camera; - ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); - const String &type = d["type"]; - if (type == "orthographic") { - camera.perspective = false; - if (d.has("orthographic")) { - const Dictionary &og = d["orthographic"]; - camera.fov_size = og["ymag"]; - camera.zfar = og["zfar"]; - camera.znear = og["znear"]; - } else { - camera.fov_size = 10; - } - - } else if (type == "perspective") { - camera.perspective = true; - if (d.has("perspective")) { - const Dictionary &ppt = d["perspective"]; - // GLTF spec is in radians, Godot's camera is in degrees. - camera.fov_size = (double)ppt["yfov"] * 180.0 / Math_PI; - camera.zfar = ppt["zfar"]; - camera.znear = ppt["znear"]; - } else { - camera.fov_size = 10; - } - } else { - ERR_FAIL_V_MSG(ERR_PARSE_ERROR, "Camera should be in 'orthographic' or 'perspective'"); - } - - state.cameras.push_back(camera); - } - - print_verbose("glTF: Total cameras: " + itos(state.cameras.size())); - - return OK; -} - -Error EditorSceneImporterGLTF::_parse_animations(GLTFState &state) { - if (!state.json.has("animations")) { - return OK; - } - - const Array &animations = state.json["animations"]; - - for (GLTFAnimationIndex i = 0; i < animations.size(); i++) { - const Dictionary &d = animations[i]; - - GLTFAnimation animation; - - if (!d.has("channels") || !d.has("samplers")) { - continue; - } - - Array channels = d["channels"]; - Array samplers = d["samplers"]; - - if (d.has("name")) { - String name = d["name"]; - if (name.begins_with("loop") || name.ends_with("loop") || name.begins_with("cycle") || name.ends_with("cycle")) { - animation.loop = true; - } - animation.name = _sanitize_scene_name(name); - } - - for (int j = 0; j < channels.size(); j++) { - const Dictionary &c = channels[j]; - if (!c.has("target")) { - continue; - } - - const Dictionary &t = c["target"]; - if (!t.has("node") || !t.has("path")) { - continue; - } - - ERR_FAIL_COND_V(!c.has("sampler"), ERR_PARSE_ERROR); - const int sampler = c["sampler"]; - ERR_FAIL_INDEX_V(sampler, samplers.size(), ERR_PARSE_ERROR); - - GLTFNodeIndex node = t["node"]; - String path = t["path"]; - - ERR_FAIL_INDEX_V(node, state.nodes.size(), ERR_PARSE_ERROR); - - GLTFAnimation::Track *track = nullptr; - - if (!animation.tracks.has(node)) { - animation.tracks[node] = GLTFAnimation::Track(); - } - - track = &animation.tracks[node]; - - const Dictionary &s = samplers[sampler]; - - ERR_FAIL_COND_V(!s.has("input"), ERR_PARSE_ERROR); - ERR_FAIL_COND_V(!s.has("output"), ERR_PARSE_ERROR); - - const int input = s["input"]; - const int output = s["output"]; - - GLTFAnimation::Interpolation interp = GLTFAnimation::INTERP_LINEAR; - int output_count = 1; - if (s.has("interpolation")) { - const String &in = s["interpolation"]; - if (in == "STEP") { - interp = GLTFAnimation::INTERP_STEP; - } else if (in == "LINEAR") { - interp = GLTFAnimation::INTERP_LINEAR; - } else if (in == "CATMULLROMSPLINE") { - interp = GLTFAnimation::INTERP_CATMULLROMSPLINE; - output_count = 3; - } else if (in == "CUBICSPLINE") { - interp = GLTFAnimation::INTERP_CUBIC_SPLINE; - output_count = 3; - } - } - - const Vector<float> times = _decode_accessor_as_floats(state, input, false); - if (path == "translation") { - const Vector<Vector3> translations = _decode_accessor_as_vec3(state, output, false); - track->translation_track.interpolation = interp; - track->translation_track.times = Variant(times); //convert via variant - track->translation_track.values = Variant(translations); //convert via variant - } else if (path == "rotation") { - const Vector<Quat> rotations = _decode_accessor_as_quat(state, output, false); - track->rotation_track.interpolation = interp; - track->rotation_track.times = Variant(times); //convert via variant - track->rotation_track.values = rotations; //convert via variant - } else if (path == "scale") { - const Vector<Vector3> scales = _decode_accessor_as_vec3(state, output, false); - track->scale_track.interpolation = interp; - track->scale_track.times = Variant(times); //convert via variant - track->scale_track.values = Variant(scales); //convert via variant - } else if (path == "weights") { - const Vector<float> weights = _decode_accessor_as_floats(state, output, false); - - ERR_FAIL_INDEX_V(state.nodes[node]->mesh, state.meshes.size(), ERR_PARSE_ERROR); - const GLTFMesh *mesh = &state.meshes[state.nodes[node]->mesh]; - ERR_FAIL_COND_V(mesh->blend_weights.size() == 0, ERR_PARSE_ERROR); - const int wc = mesh->blend_weights.size(); - - track->weight_tracks.resize(wc); - - const int expected_value_count = times.size() * output_count * wc; - ERR_FAIL_COND_V_MSG(weights.size() != expected_value_count, ERR_PARSE_ERROR, "Invalid weight data, expected " + itos(expected_value_count) + " weight values, got " + itos(weights.size()) + " instead."); - - const int wlen = weights.size() / wc; - const float *r = weights.ptr(); - for (int k = 0; k < wc; k++) { //separate tracks, having them together is not such a good idea - GLTFAnimation::Channel<float> cf; - cf.interpolation = interp; - cf.times = Variant(times); - Vector<float> wdata; - wdata.resize(wlen); - for (int l = 0; l < wlen; l++) { - wdata.write[l] = r[l * wc + k]; - } - - cf.values = wdata; - track->weight_tracks.write[k] = cf; - } - } else { - WARN_PRINT("Invalid path '" + path + "'."); - } - } - - state.animations.push_back(animation); - } - - print_verbose("glTF: Total animations '" + itos(state.animations.size()) + "'."); - - return OK; -} - -void EditorSceneImporterGLTF::_assign_scene_names(GLTFState &state) { - for (int i = 0; i < state.nodes.size(); i++) { - GLTFNode *n = state.nodes[i]; - - // Any joints get unique names generated when the skeleton is made, unique to the skeleton - if (n->skeleton >= 0) { - continue; - } - - if (n->name.empty()) { - if (n->mesh >= 0) { - n->name = "Mesh"; - } else if (n->camera >= 0) { - n->name = "Camera"; - } else { - n->name = "Node"; - } - } - - n->name = _gen_unique_name(state, n->name); - } -} - -BoneAttachment3D *EditorSceneImporterGLTF::_generate_bone_attachment(GLTFState &state, Skeleton3D *skeleton, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - const GLTFNode *bone_node = state.nodes[gltf_node->parent]; - - BoneAttachment3D *bone_attachment = memnew(BoneAttachment3D); - print_verbose("glTF: Creating bone attachment for: " + gltf_node->name); - - ERR_FAIL_COND_V(!bone_node->joint, nullptr); - - bone_attachment->set_bone_name(bone_node->name); - - return bone_attachment; -} - -EditorSceneImporterMeshNode *EditorSceneImporterGLTF::_generate_mesh_instance(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - - ERR_FAIL_INDEX_V(gltf_node->mesh, state.meshes.size(), nullptr); - - EditorSceneImporterMeshNode *mi = memnew(EditorSceneImporterMeshNode); - print_verbose("glTF: Creating mesh for: " + gltf_node->name); - - GLTFMesh &mesh = state.meshes.write[gltf_node->mesh]; - mi->set_mesh(mesh.mesh); - - if (mesh.mesh->get_name() == "") { - mesh.mesh->set_name(gltf_node->name); - } - - for (int i = 0; i < mesh.blend_weights.size(); i++) { - mi->set("blend_shapes/" + mesh.mesh->get_blend_shape_name(i), mesh.blend_weights[i]); - } - - return mi; -} - -Light3D *EditorSceneImporterGLTF::_generate_light(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - - ERR_FAIL_INDEX_V(gltf_node->light, state.lights.size(), nullptr); - - print_verbose("glTF: Creating light for: " + gltf_node->name); - - const GLTFLight &l = state.lights[gltf_node->light]; - - float intensity = l.intensity; - if (intensity > 10) { - // GLTF spec has the default around 1, but Blender defaults lights to 100. - // The only sane way to handle this is to check where it came from and - // handle it accordingly. If it's over 10, it probably came from Blender. - intensity /= 100; - } - - if (l.type == "directional") { - DirectionalLight3D *light = memnew(DirectionalLight3D); - light->set_param(Light3D::PARAM_ENERGY, intensity); - light->set_color(l.color); - return light; - } - - const float range = CLAMP(l.range, 0, 4096); - // Doubling the range will double the effective brightness, so we need double attenuation (half brightness). - // We want to have double intensity give double brightness, so we need half the attenuation. - const float attenuation = range / intensity; - if (l.type == "point") { - OmniLight3D *light = memnew(OmniLight3D); - light->set_param(OmniLight3D::PARAM_ATTENUATION, attenuation); - light->set_param(OmniLight3D::PARAM_RANGE, range); - light->set_color(l.color); - return light; - } - if (l.type == "spot") { - SpotLight3D *light = memnew(SpotLight3D); - light->set_param(SpotLight3D::PARAM_ATTENUATION, attenuation); - light->set_param(SpotLight3D::PARAM_RANGE, range); - light->set_param(SpotLight3D::PARAM_SPOT_ANGLE, Math::rad2deg(l.outer_cone_angle)); - light->set_color(l.color); - - // Line of best fit derived from guessing, see https://www.desmos.com/calculator/biiflubp8b - // The points in desmos are not exact, except for (1, infinity). - float angle_ratio = l.inner_cone_angle / l.outer_cone_angle; - float angle_attenuation = 0.2 / (1 - angle_ratio) - 0.1; - light->set_param(SpotLight3D::PARAM_SPOT_ATTENUATION, angle_attenuation); - return light; - } - return nullptr; -} - -Camera3D *EditorSceneImporterGLTF::_generate_camera(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - - ERR_FAIL_INDEX_V(gltf_node->camera, state.cameras.size(), nullptr); - - Camera3D *camera = memnew(Camera3D); - print_verbose("glTF: Creating camera for: " + gltf_node->name); - - const GLTFCamera &c = state.cameras[gltf_node->camera]; - if (c.perspective) { - camera->set_perspective(c.fov_size, c.znear, c.zfar); - } else { - camera->set_orthogonal(c.fov_size, c.znear, c.zfar); - } - - return camera; -} - -Node3D *EditorSceneImporterGLTF::_generate_spatial(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - - Node3D *spatial = memnew(Node3D); - print_verbose("glTF: Creating spatial for: " + gltf_node->name); - - return spatial; -} - -void EditorSceneImporterGLTF::_generate_scene_node(GLTFState &state, Node *scene_parent, Node3D *scene_root, const GLTFNodeIndex node_index) { - const GLTFNode *gltf_node = state.nodes[node_index]; - - Node3D *current_node = nullptr; - - // Is our parent a skeleton - Skeleton3D *active_skeleton = Object::cast_to<Skeleton3D>(scene_parent); - - if (gltf_node->skeleton >= 0) { - Skeleton3D *skeleton = state.skeletons[gltf_node->skeleton].godot_skeleton; - - if (active_skeleton != skeleton) { - ERR_FAIL_COND_MSG(active_skeleton != nullptr, "glTF: Generating scene detected direct parented Skeletons"); - - // Add it to the scene if it has not already been added - if (skeleton->get_parent() == nullptr) { - scene_parent->add_child(skeleton); - skeleton->set_owner(scene_root); - } - } - - active_skeleton = skeleton; - current_node = skeleton; - } - - // If we have an active skeleton, and the node is node skinned, we need to create a bone attachment - if (current_node == nullptr && active_skeleton != nullptr && gltf_node->skin < 0) { - BoneAttachment3D *bone_attachment = _generate_bone_attachment(state, active_skeleton, node_index); - - scene_parent->add_child(bone_attachment); - bone_attachment->set_owner(scene_root); - - // There is no gltf_node that represent this, so just directly create a unique name - bone_attachment->set_name(_gen_unique_name(state, "BoneAttachment")); - - // We change the scene_parent to our bone attachment now. We do not set current_node because we want to make the node - // and attach it to the bone_attachment - scene_parent = bone_attachment; - } - - // We still have not managed to make a node - if (current_node == nullptr) { - if (gltf_node->mesh >= 0) { - current_node = _generate_mesh_instance(state, scene_parent, node_index); - } else if (gltf_node->camera >= 0) { - current_node = _generate_camera(state, scene_parent, node_index); - } else if (gltf_node->light >= 0) { - current_node = _generate_light(state, scene_parent, node_index); - } else { - current_node = _generate_spatial(state, scene_parent, node_index); - } - - scene_parent->add_child(current_node); - current_node->set_owner(scene_root); - current_node->set_transform(gltf_node->xform); - current_node->set_name(gltf_node->name); - } - - state.scene_nodes.insert(node_index, current_node); - - for (int i = 0; i < gltf_node->children.size(); ++i) { - _generate_scene_node(state, current_node, scene_root, gltf_node->children[i]); - } -} - -template <class T> -struct EditorSceneImporterGLTFInterpolate { - T lerp(const T &a, const T &b, float c) const { - return a + (b - a) * c; - } - - T catmull_rom(const T &p0, const T &p1, const T &p2, const T &p3, float t) { - const float t2 = t * t; - const float t3 = t2 * t; - - return 0.5f * ((2.0f * p1) + (-p0 + p2) * t + (2.0f * p0 - 5.0f * p1 + 4.0f * p2 - p3) * t2 + (-p0 + 3.0f * p1 - 3.0f * p2 + p3) * t3); - } - - T bezier(T start, T control_1, T control_2, T end, float t) { - /* Formula from Wikipedia article on Bezier curves. */ - const real_t omt = (1.0 - t); - const real_t omt2 = omt * omt; - const real_t omt3 = omt2 * omt; - const real_t t2 = t * t; - const real_t t3 = t2 * t; - - return start * omt3 + control_1 * omt2 * t * 3.0 + control_2 * omt * t2 * 3.0 + end * t3; - } -}; - -// thank you for existing, partial specialization -template <> -struct EditorSceneImporterGLTFInterpolate<Quat> { - Quat lerp(const Quat &a, const Quat &b, const float c) const { - ERR_FAIL_COND_V_MSG(!a.is_normalized(), Quat(), "The quaternion \"a\" must be normalized."); - ERR_FAIL_COND_V_MSG(!b.is_normalized(), Quat(), "The quaternion \"b\" must be normalized."); - - return a.slerp(b, c).normalized(); - } - - Quat catmull_rom(const Quat &p0, const Quat &p1, const Quat &p2, const Quat &p3, const float c) { - ERR_FAIL_COND_V_MSG(!p1.is_normalized(), Quat(), "The quaternion \"p1\" must be normalized."); - ERR_FAIL_COND_V_MSG(!p2.is_normalized(), Quat(), "The quaternion \"p2\" must be normalized."); - - return p1.slerp(p2, c).normalized(); - } - - Quat bezier(const Quat start, const Quat control_1, const Quat control_2, const Quat end, const float t) { - ERR_FAIL_COND_V_MSG(!start.is_normalized(), Quat(), "The start quaternion must be normalized."); - ERR_FAIL_COND_V_MSG(!end.is_normalized(), Quat(), "The end quaternion must be normalized."); - - return start.slerp(end, t).normalized(); - } -}; - -template <class T> -T EditorSceneImporterGLTF::_interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, const float p_time, const GLTFAnimation::Interpolation p_interp) { - //could use binary search, worth it? - int idx = -1; - for (int i = 0; i < p_times.size(); i++) { - if (p_times[i] > p_time) { - break; - } - idx++; - } - - EditorSceneImporterGLTFInterpolate<T> interp; - - switch (p_interp) { - case GLTFAnimation::INTERP_LINEAR: { - if (idx == -1) { - return p_values[0]; - } else if (idx >= p_times.size() - 1) { - return p_values[p_times.size() - 1]; - } - - const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - return interp.lerp(p_values[idx], p_values[idx + 1], c); - - } break; - case GLTFAnimation::INTERP_STEP: { - if (idx == -1) { - return p_values[0]; - } else if (idx >= p_times.size() - 1) { - return p_values[p_times.size() - 1]; - } - - return p_values[idx]; - - } break; - case GLTFAnimation::INTERP_CATMULLROMSPLINE: { - if (idx == -1) { - return p_values[1]; - } else if (idx >= p_times.size() - 1) { - return p_values[1 + p_times.size() - 1]; - } - - const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - return interp.catmull_rom(p_values[idx - 1], p_values[idx], p_values[idx + 1], p_values[idx + 3], c); - - } break; - case GLTFAnimation::INTERP_CUBIC_SPLINE: { - if (idx == -1) { - return p_values[1]; - } else if (idx >= p_times.size() - 1) { - return p_values[(p_times.size() - 1) * 3 + 1]; - } - - const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - const T from = p_values[idx * 3 + 1]; - const T c1 = from + p_values[idx * 3 + 2]; - const T to = p_values[idx * 3 + 4]; - const T c2 = to + p_values[idx * 3 + 3]; - - return interp.bezier(from, c1, c2, to, c); - - } break; - } - - ERR_FAIL_V(p_values[0]); -} - -void EditorSceneImporterGLTF::_import_animation(GLTFState &state, AnimationPlayer *ap, const GLTFAnimationIndex index, const int bake_fps) { - const GLTFAnimation &anim = state.animations[index]; - - String name = anim.name; - if (name.empty()) { - // No node represent these, and they are not in the hierarchy, so just make a unique name - name = _gen_unique_name(state, "Animation"); - } - - Ref<Animation> animation; - animation.instance(); - animation->set_name(name); - - if (anim.loop) { - animation->set_loop(true); - } - - float length = 0; - - for (Map<int, GLTFAnimation::Track>::Element *E = anim.tracks.front(); E; E = E->next()) { - const GLTFAnimation::Track &track = E->get(); - //need to find the path - NodePath node_path; - - GLTFNodeIndex node_index = E->key(); - if (state.nodes[node_index]->fake_joint_parent >= 0) { - // Should be same as parent - node_index = state.nodes[node_index]->fake_joint_parent; - } - - const GLTFNode *node = state.nodes[E->key()]; - - if (node->skeleton >= 0) { - const Skeleton3D *sk = Object::cast_to<Skeleton3D>(state.scene_nodes.find(node_index)->get()); - ERR_FAIL_COND(sk == nullptr); - - const String path = ap->get_parent()->get_path_to(sk); - const String bone = node->name; - node_path = path + ":" + bone; - } else { - node_path = ap->get_parent()->get_path_to(state.scene_nodes.find(node_index)->get()); - } - - for (int i = 0; i < track.rotation_track.times.size(); i++) { - length = MAX(length, track.rotation_track.times[i]); - } - for (int i = 0; i < track.translation_track.times.size(); i++) { - length = MAX(length, track.translation_track.times[i]); - } - for (int i = 0; i < track.scale_track.times.size(); i++) { - length = MAX(length, track.scale_track.times[i]); - } - - for (int i = 0; i < track.weight_tracks.size(); i++) { - for (int j = 0; j < track.weight_tracks[i].times.size(); j++) { - length = MAX(length, track.weight_tracks[i].times[j]); - } - } - - if (track.rotation_track.values.size() || track.translation_track.values.size() || track.scale_track.values.size()) { - //make transform track - int track_idx = animation->get_track_count(); - animation->add_track(Animation::TYPE_TRANSFORM); - animation->track_set_path(track_idx, node_path); - animation->track_set_imported(track_idx, true); - //first determine animation length - - const float increment = 1.0 / float(bake_fps); - float time = 0.0; - - Vector3 base_pos; - Quat base_rot; - Vector3 base_scale = Vector3(1, 1, 1); - - if (!track.rotation_track.values.size()) { - base_rot = state.nodes[E->key()]->rotation.normalized(); - } - - if (!track.translation_track.values.size()) { - base_pos = state.nodes[E->key()]->translation; - } - - if (!track.scale_track.values.size()) { - base_scale = state.nodes[E->key()]->scale; - } - - bool last = false; - while (true) { - Vector3 pos = base_pos; - Quat rot = base_rot; - Vector3 scale = base_scale; - - if (track.translation_track.times.size()) { - pos = _interpolate_track<Vector3>(track.translation_track.times, track.translation_track.values, time, track.translation_track.interpolation); - } - - if (track.rotation_track.times.size()) { - rot = _interpolate_track<Quat>(track.rotation_track.times, track.rotation_track.values, time, track.rotation_track.interpolation); - } - - if (track.scale_track.times.size()) { - scale = _interpolate_track<Vector3>(track.scale_track.times, track.scale_track.values, time, track.scale_track.interpolation); - } - - if (node->skeleton >= 0) { - Transform xform; - xform.basis.set_quat_scale(rot, scale); - xform.origin = pos; - - const Skeleton3D *skeleton = state.skeletons[node->skeleton].godot_skeleton; - const int bone_idx = skeleton->find_bone(node->name); - xform = skeleton->get_bone_rest(bone_idx).affine_inverse() * xform; - - rot = xform.basis.get_rotation_quat(); - rot.normalize(); - scale = xform.basis.get_scale(); - pos = xform.origin; - } - - animation->transform_track_insert_key(track_idx, time, pos, rot, scale); - - if (last) { - break; - } - time += increment; - if (time >= length) { - last = true; - time = length; - } - } - } - - for (int i = 0; i < track.weight_tracks.size(); i++) { - ERR_CONTINUE(node->mesh < 0 || node->mesh >= state.meshes.size()); - const GLTFMesh &mesh = state.meshes[node->mesh]; - const String prop = "blend_shapes/" + mesh.mesh->get_blend_shape_name(i); - - const String blend_path = String(node_path) + ":" + prop; - - const int track_idx = animation->get_track_count(); - animation->add_track(Animation::TYPE_VALUE); - animation->track_set_path(track_idx, blend_path); - - // Only LINEAR and STEP (NEAREST) can be supported out of the box by Godot's Animation, - // the other modes have to be baked. - GLTFAnimation::Interpolation gltf_interp = track.weight_tracks[i].interpolation; - if (gltf_interp == GLTFAnimation::INTERP_LINEAR || gltf_interp == GLTFAnimation::INTERP_STEP) { - animation->track_set_interpolation_type(track_idx, gltf_interp == GLTFAnimation::INTERP_STEP ? Animation::INTERPOLATION_NEAREST : Animation::INTERPOLATION_LINEAR); - for (int j = 0; j < track.weight_tracks[i].times.size(); j++) { - const float t = track.weight_tracks[i].times[j]; - const float w = track.weight_tracks[i].values[j]; - animation->track_insert_key(track_idx, t, w); - } - } else { - // CATMULLROMSPLINE or CUBIC_SPLINE have to be baked, apologies. - const float increment = 1.0 / float(bake_fps); - float time = 0.0; - bool last = false; - while (true) { - _interpolate_track<float>(track.weight_tracks[i].times, track.weight_tracks[i].values, time, gltf_interp); - if (last) { - break; - } - time += increment; - if (time >= length) { - last = true; - time = length; - } - } - } - } - } - - animation->set_length(length); - - ap->add_animation(name, animation); -} - -void EditorSceneImporterGLTF::_process_mesh_instances(GLTFState &state, Node3D *scene_root) { - for (GLTFNodeIndex node_i = 0; node_i < state.nodes.size(); ++node_i) { - const GLTFNode *node = state.nodes[node_i]; - - if (node->skin >= 0 && node->mesh >= 0) { - const GLTFSkinIndex skin_i = node->skin; - - Map<GLTFNodeIndex, Node *>::Element *mi_element = state.scene_nodes.find(node_i); - EditorSceneImporterMeshNode *mi = Object::cast_to<EditorSceneImporterMeshNode>(mi_element->get()); - ERR_FAIL_COND(mi == nullptr); - - const GLTFSkeletonIndex skel_i = state.skins[node->skin].skeleton; - const GLTFSkeleton &gltf_skeleton = state.skeletons[skel_i]; - Skeleton3D *skeleton = gltf_skeleton.godot_skeleton; - ERR_FAIL_COND(skeleton == nullptr); - - mi->get_parent()->remove_child(mi); - skeleton->add_child(mi); - mi->set_owner(scene_root); - - mi->set_skin(state.skins[skin_i].godot_skin); - mi->set_skeleton_path(mi->get_path_to(skeleton)); - mi->set_transform(Transform()); - } - } -} - -Node3D *EditorSceneImporterGLTF::_generate_scene(GLTFState &state, const int p_bake_fps) { - Node3D *root = memnew(Node3D); - - // scene_name is already unique - root->set_name(state.scene_name); - - for (int i = 0; i < state.root_nodes.size(); ++i) { - _generate_scene_node(state, root, root, state.root_nodes[i]); - } - - _process_mesh_instances(state, root); - - if (state.animations.size()) { - AnimationPlayer *ap = memnew(AnimationPlayer); - ap->set_name("AnimationPlayer"); - root->add_child(ap); - ap->set_owner(root); - - for (int i = 0; i < state.animations.size(); i++) { - _import_animation(state, ap, i, p_bake_fps); - } - } - - return root; -} - -Node *EditorSceneImporterGLTF::import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, List<String> *r_missing_deps, Error *r_err) { - print_verbose(vformat("glTF: Importing file %s as scene.", p_path)); - - GLTFState state; - - if (p_path.to_lower().ends_with("glb")) { - //binary file - //text file - Error err = _parse_glb(p_path, state); - if (err) { - return nullptr; - } - } else { - //text file - Error err = _parse_json(p_path, state); - if (err) { - return nullptr; - } - } - - ERR_FAIL_COND_V(!state.json.has("asset"), nullptr); - - Dictionary asset = state.json["asset"]; - - ERR_FAIL_COND_V(!asset.has("version"), nullptr); - - String version = asset["version"]; - - state.import_flags = p_flags; - state.major_version = version.get_slice(".", 0).to_int(); - state.minor_version = version.get_slice(".", 1).to_int(); - state.use_named_skin_binds = p_flags & IMPORT_USE_NAMED_SKIN_BINDS; - - /* STEP 0 PARSE SCENE */ - Error err = _parse_scenes(state); - if (err != OK) { - return nullptr; - } - - /* STEP 1 PARSE NODES */ - err = _parse_nodes(state); - if (err != OK) { - return nullptr; - } - - /* STEP 2 PARSE BUFFERS */ - err = _parse_buffers(state, p_path.get_base_dir()); - if (err != OK) { - return nullptr; - } - - /* STEP 3 PARSE BUFFER VIEWS */ - err = _parse_buffer_views(state); - if (err != OK) { - return nullptr; - } - - /* STEP 4 PARSE ACCESSORS */ - err = _parse_accessors(state); - if (err != OK) { - return nullptr; - } - - /* STEP 5 PARSE IMAGES */ - err = _parse_images(state, p_path.get_base_dir()); - if (err != OK) { - return nullptr; - } - - /* STEP 6 PARSE TEXTURES */ - err = _parse_textures(state); - if (err != OK) { - return nullptr; - } - - /* STEP 7 PARSE TEXTURES */ - err = _parse_materials(state); - if (err != OK) { - return nullptr; - } - - /* STEP 9 PARSE SKINS */ - err = _parse_skins(state); - if (err != OK) { - return nullptr; - } - - /* STEP 10 DETERMINE SKELETONS */ - err = _determine_skeletons(state); - if (err != OK) { - return nullptr; - } - - /* STEP 11 CREATE SKELETONS */ - err = _create_skeletons(state); - if (err != OK) { - return nullptr; - } - - /* STEP 12 CREATE SKINS */ - err = _create_skins(state); - if (err != OK) { - return nullptr; - } - - /* STEP 13 PARSE MESHES (we have enough info now) */ - err = _parse_meshes(state); - if (err != OK) { - return nullptr; - } - - /* STEP 14 PARSE LIGHTS */ - err = _parse_lights(state); - if (err != OK) { - return NULL; - } - - /* STEP 15 PARSE CAMERAS */ - err = _parse_cameras(state); - if (err != OK) { - return nullptr; - } - - /* STEP 16 PARSE ANIMATIONS */ - err = _parse_animations(state); - if (err != OK) { - return nullptr; - } - - /* STEP 17 ASSIGN SCENE NAMES */ - _assign_scene_names(state); - - /* STEP 18 MAKE SCENE! */ - Node3D *scene = _generate_scene(state, p_bake_fps); - - return scene; -} - -Ref<Animation> EditorSceneImporterGLTF::import_animation(const String &p_path, uint32_t p_flags, int p_bake_fps) { - return Ref<Animation>(); -} - -EditorSceneImporterGLTF::EditorSceneImporterGLTF() { -} diff --git a/editor/import/editor_scene_importer_gltf.h b/editor/import/editor_scene_importer_gltf.h deleted file mode 100644 index e6163a46be..0000000000 --- a/editor/import/editor_scene_importer_gltf.h +++ /dev/null @@ -1,384 +0,0 @@ -/*************************************************************************/ -/* editor_scene_importer_gltf.h */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#ifndef EDITOR_SCENE_IMPORTER_GLTF_H -#define EDITOR_SCENE_IMPORTER_GLTF_H - -#include "editor/import/resource_importer_scene.h" -#include "scene/3d/light_3d.h" -#include "scene/3d/node_3d.h" -#include "scene/3d/skeleton_3d.h" - -class AnimationPlayer; -class BoneAttachment3D; -class EditorSceneImporterMeshNode; - -class EditorSceneImporterGLTF : public EditorSceneImporter { - GDCLASS(EditorSceneImporterGLTF, EditorSceneImporter); - - typedef int GLTFAccessorIndex; - typedef int GLTFAnimationIndex; - typedef int GLTFBufferIndex; - typedef int GLTFBufferViewIndex; - typedef int GLTFCameraIndex; - typedef int GLTFImageIndex; - typedef int GLTFMaterialIndex; - typedef int GLTFMeshIndex; - typedef int GLTFLightIndex; - typedef int GLTFNodeIndex; - typedef int GLTFSkeletonIndex; - typedef int GLTFSkinIndex; - typedef int GLTFTextureIndex; - - enum { - ARRAY_BUFFER = 34962, - ELEMENT_ARRAY_BUFFER = 34963, - - TYPE_BYTE = 5120, - TYPE_UNSIGNED_BYTE = 5121, - TYPE_SHORT = 5122, - TYPE_UNSIGNED_SHORT = 5123, - TYPE_UNSIGNED_INT = 5125, - TYPE_FLOAT = 5126, - - COMPONENT_TYPE_BYTE = 5120, - COMPONENT_TYPE_UNSIGNED_BYTE = 5121, - COMPONENT_TYPE_SHORT = 5122, - COMPONENT_TYPE_UNSIGNED_SHORT = 5123, - COMPONENT_TYPE_INT = 5125, - COMPONENT_TYPE_FLOAT = 5126, - - }; - - String _get_component_type_name(const uint32_t p_component); - int _get_component_type_size(const int component_type); - - enum GLTFType { - TYPE_SCALAR, - TYPE_VEC2, - TYPE_VEC3, - TYPE_VEC4, - TYPE_MAT2, - TYPE_MAT3, - TYPE_MAT4, - }; - - String _get_type_name(const GLTFType p_component); - - struct GLTFNode { - //matrices need to be transformed to this - GLTFNodeIndex parent = -1; - int height = -1; - - Transform xform; - String name; - - GLTFMeshIndex mesh = -1; - GLTFCameraIndex camera = -1; - GLTFSkinIndex skin = -1; - - GLTFSkeletonIndex skeleton = -1; - bool joint = false; - - Vector3 translation; - Quat rotation; - Vector3 scale = Vector3(1, 1, 1); - - Vector<int> children; - - GLTFNodeIndex fake_joint_parent = -1; - - GLTFLightIndex light = -1; - }; - - struct GLTFBufferView { - GLTFBufferIndex buffer = -1; - int byte_offset = 0; - int byte_length = 0; - int byte_stride = 0; - bool indices = false; - //matrices need to be transformed to this - }; - - struct GLTFAccessor { - GLTFBufferViewIndex buffer_view = 0; - int byte_offset = 0; - int component_type = 0; - bool normalized = false; - int count = 0; - GLTFType type = GLTFType::TYPE_SCALAR; - float min = 0; - float max = 0; - int sparse_count = 0; - int sparse_indices_buffer_view = 0; - int sparse_indices_byte_offset = 0; - int sparse_indices_component_type = 0; - int sparse_values_buffer_view = 0; - int sparse_values_byte_offset = 0; - }; - struct GLTFTexture { - GLTFImageIndex src_image; - }; - - struct GLTFSkeleton { - // The *synthesized* skeletons joints - Vector<GLTFNodeIndex> joints; - - // The roots of the skeleton. If there are multiple, each root must have the same parent - // (ie roots are siblings) - Vector<GLTFNodeIndex> roots; - - // The created Skeleton for the scene - Skeleton3D *godot_skeleton = nullptr; - - // Set of unique bone names for the skeleton - Set<String> unique_names; - }; - - struct GLTFSkin { - String name; - - // The "skeleton" property defined in the gltf spec. -1 = Scene Root - GLTFNodeIndex skin_root = -1; - - Vector<GLTFNodeIndex> joints_original; - Vector<Transform> inverse_binds; - - // Note: joints + non_joints should form a complete subtree, or subtrees with a common parent - - // All nodes that are skins that are caught in-between the original joints - // (inclusive of joints_original) - Vector<GLTFNodeIndex> joints; - - // All Nodes that are caught in-between skin joint nodes, and are not defined - // as joints by any skin - Vector<GLTFNodeIndex> non_joints; - - // The roots of the skin. In the case of multiple roots, their parent *must* - // be the same (the roots must be siblings) - Vector<GLTFNodeIndex> roots; - - // The GLTF Skeleton this Skin points to (after we determine skeletons) - GLTFSkeletonIndex skeleton = -1; - - // A mapping from the joint indices (in the order of joints_original) to the - // Godot Skeleton's bone_indices - Map<int, int> joint_i_to_bone_i; - Map<int, StringName> joint_i_to_name; - - // The Actual Skin that will be created as a mapping between the IBM's of this skin - // to the generated skeleton for the mesh instances. - Ref<Skin> godot_skin; - }; - - struct GLTFMesh { - Ref<EditorSceneImporterMesh> mesh; - Vector<float> blend_weights; - }; - - struct GLTFCamera { - bool perspective = true; - float fov_size = 75; - float zfar = 4000; - float znear = 0.05; - }; - - struct GLTFLight { - Color color = Color(1.0f, 1.0f, 1.0f); - float intensity = 1.0f; - String type = ""; - float range = Math_INF; - float inner_cone_angle = 0.0f; - float outer_cone_angle = Math_PI / 4.0; - }; - - struct GLTFAnimation { - bool loop = false; - - enum Interpolation { - INTERP_LINEAR, - INTERP_STEP, - INTERP_CATMULLROMSPLINE, - INTERP_CUBIC_SPLINE - }; - - template <class T> - struct Channel { - Interpolation interpolation; - Vector<float> times; - Vector<T> values; - }; - - struct Track { - Channel<Vector3> translation_track; - Channel<Quat> rotation_track; - Channel<Vector3> scale_track; - Vector<Channel<float>> weight_tracks; - }; - - String name; - - Map<int, Track> tracks; - }; - - struct GLTFState { - Dictionary json; - int major_version = 0; - int minor_version = 0; - Vector<uint8_t> glb_data; - - bool use_named_skin_binds = false; - - Vector<GLTFNode *> nodes; - Vector<Vector<uint8_t>> buffers; - Vector<GLTFBufferView> buffer_views; - Vector<GLTFAccessor> accessors; - - Vector<GLTFMesh> meshes; //meshes are loaded directly, no reason not to. - Vector<Ref<StandardMaterial3D>> materials; - - String scene_name; - Vector<int> root_nodes; - - Vector<GLTFTexture> textures; - Vector<Ref<Texture2D>> images; - - Vector<GLTFSkin> skins; - Vector<GLTFCamera> cameras; - Vector<GLTFLight> lights; - - Set<String> unique_names; - - Vector<GLTFSkeleton> skeletons; - Vector<GLTFAnimation> animations; - - Map<GLTFNodeIndex, Node *> scene_nodes; - - // EditorSceneImporter::ImportFlags - uint32_t import_flags; - - ~GLTFState() { - for (int i = 0; i < nodes.size(); i++) { - memdelete(nodes[i]); - } - } - }; - - String _sanitize_scene_name(const String &name); - String _gen_unique_name(GLTFState &state, const String &p_name); - - String _sanitize_bone_name(const String &name); - String _gen_unique_bone_name(GLTFState &state, const GLTFSkeletonIndex skel_i, const String &p_name); - - Ref<Texture2D> _get_texture(GLTFState &state, const GLTFTextureIndex p_texture); - - Error _parse_json(const String &p_path, GLTFState &state); - Error _parse_glb(const String &p_path, GLTFState &state); - - Error _parse_scenes(GLTFState &state); - Error _parse_nodes(GLTFState &state); - - void _compute_node_heights(GLTFState &state); - - Error _parse_buffers(GLTFState &state, const String &p_base_path); - Error _parse_buffer_views(GLTFState &state); - GLTFType _get_type_from_str(const String &p_string); - Error _parse_accessors(GLTFState &state); - Error _decode_buffer_view(GLTFState &state, double *dst, const GLTFBufferViewIndex p_buffer_view, const int skip_every, const int skip_bytes, const int element_size, const int count, const GLTFType type, const int component_count, const int component_type, const int component_size, const bool normalized, const int byte_offset, const bool for_vertex); - - Vector<double> _decode_accessor(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<float> _decode_accessor_as_floats(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<int> _decode_accessor_as_ints(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Vector2> _decode_accessor_as_vec2(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Vector3> _decode_accessor_as_vec3(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Color> _decode_accessor_as_color(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Quat> _decode_accessor_as_quat(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Transform2D> _decode_accessor_as_xform2d(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Basis> _decode_accessor_as_basis(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - Vector<Transform> _decode_accessor_as_xform(GLTFState &state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex); - - Error _parse_meshes(GLTFState &state); - Error _parse_images(GLTFState &state, const String &p_base_path); - Error _parse_textures(GLTFState &state); - - Error _parse_materials(GLTFState &state); - - GLTFNodeIndex _find_highest_node(GLTFState &state, const Vector<GLTFNodeIndex> &subset); - - bool _capture_nodes_in_skin(GLTFState &state, GLTFSkin &skin, const GLTFNodeIndex node_index); - void _capture_nodes_for_multirooted_skin(GLTFState &state, GLTFSkin &skin); - Error _expand_skin(GLTFState &state, GLTFSkin &skin); - Error _verify_skin(GLTFState &state, GLTFSkin &skin); - Error _parse_skins(GLTFState &state); - - Error _determine_skeletons(GLTFState &state); - Error _reparent_non_joint_skeleton_subtrees(GLTFState &state, GLTFSkeleton &skeleton, const Vector<GLTFNodeIndex> &non_joints); - Error _reparent_to_fake_joint(GLTFState &state, GLTFSkeleton &skeleton, const GLTFNodeIndex node_index); - Error _determine_skeleton_roots(GLTFState &state, const GLTFSkeletonIndex skel_i); - - Error _create_skeletons(GLTFState &state); - Error _map_skin_joints_indices_to_skeleton_bone_indices(GLTFState &state); - - Error _create_skins(GLTFState &state); - bool _skins_are_same(const Ref<Skin> &skin_a, const Ref<Skin> &skin_b); - void _remove_duplicate_skins(GLTFState &state); - - Error _parse_cameras(GLTFState &state); - Error _parse_lights(GLTFState &state); - Error _parse_animations(GLTFState &state); - - BoneAttachment3D *_generate_bone_attachment(GLTFState &state, Skeleton3D *skeleton, const GLTFNodeIndex node_index); - EditorSceneImporterMeshNode *_generate_mesh_instance(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index); - Camera3D *_generate_camera(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index); - Light3D *_generate_light(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index); - Node3D *_generate_spatial(GLTFState &state, Node *scene_parent, const GLTFNodeIndex node_index); - - void _generate_scene_node(GLTFState &state, Node *scene_parent, Node3D *scene_root, const GLTFNodeIndex node_index); - Node3D *_generate_scene(GLTFState &state, const int p_bake_fps); - - void _process_mesh_instances(GLTFState &state, Node3D *scene_root); - - void _assign_scene_names(GLTFState &state); - - template <class T> - T _interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, const float p_time, const GLTFAnimation::Interpolation p_interp); - - void _import_animation(GLTFState &state, AnimationPlayer *ap, const GLTFAnimationIndex index, const int bake_fps); - -public: - virtual uint32_t get_import_flags() const override; - virtual void get_extensions(List<String> *r_extensions) const override; - virtual Node *import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, List<String> *r_missing_deps = nullptr, Error *r_err = nullptr) override; - virtual Ref<Animation> import_animation(const String &p_path, uint32_t p_flags, int p_bake_fps) override; - - EditorSceneImporterGLTF(); -}; - -#endif // EDITOR_SCENE_IMPORTER_GLTF_H diff --git a/editor/import/resource_importer_obj.cpp b/editor/import/resource_importer_obj.cpp index 30c7b2920a..e7170ef61c 100644 --- a/editor/import/resource_importer_obj.cpp +++ b/editor/import/resource_importer_obj.cpp @@ -32,6 +32,8 @@ #include "core/io/resource_saver.h" #include "core/os/file_access.h" +#include "editor/import/scene_importer_mesh.h" +#include "editor/import/scene_importer_mesh_node_3d.h" #include "scene/3d/mesh_instance_3d.h" #include "scene/3d/node_3d.h" #include "scene/resources/mesh.h" @@ -444,7 +446,7 @@ Node *EditorOBJImporter::import_scene(const String &p_path, uint32_t p_flags, in mesh->add_surface(m->surface_get_primitive_type(i), m->surface_get_arrays(i), Array(), Dictionary(), m->surface_get_material(i)); } - EditorSceneImporterMeshNode *mi = memnew(EditorSceneImporterMeshNode); + EditorSceneImporterMeshNode3D *mi = memnew(EditorSceneImporterMeshNode3D); mi->set_mesh(mesh); mi->set_name(E->get()->get_name()); scene->add_child(mi); diff --git a/editor/import/resource_importer_scene.cpp b/editor/import/resource_importer_scene.cpp index b591627660..d36d811ce8 100644 --- a/editor/import/resource_importer_scene.cpp +++ b/editor/import/resource_importer_scene.cpp @@ -32,6 +32,7 @@ #include "core/io/resource_saver.h" #include "editor/editor_node.h" +#include "editor/import/scene_importer_mesh_node_3d.h" #include "scene/3d/collision_shape_3d.h" #include "scene/3d/mesh_instance_3d.h" #include "scene/3d/navigation_3d.h" @@ -120,345 +121,6 @@ void EditorSceneImporter::_bind_methods() { BIND_CONSTANT(IMPORT_USE_COMPRESSION); } -//////////////////////////////////////////////// - -void EditorSceneImporterMesh::add_blend_shape(const String &p_name) { - ERR_FAIL_COND(surfaces.size() > 0); - blend_shapes.push_back(p_name); -} - -int EditorSceneImporterMesh::get_blend_shape_count() const { - return blend_shapes.size(); -} - -String EditorSceneImporterMesh::get_blend_shape_name(int p_blend_shape) const { - ERR_FAIL_INDEX_V(p_blend_shape, blend_shapes.size(), String()); - return blend_shapes[p_blend_shape]; -} - -void EditorSceneImporterMesh::set_blend_shape_mode(Mesh::BlendShapeMode p_blend_shape_mode) { - blend_shape_mode = p_blend_shape_mode; -} -Mesh::BlendShapeMode EditorSceneImporterMesh::get_blend_shape_mode() const { - return blend_shape_mode; -} - -void EditorSceneImporterMesh::add_surface(Mesh::PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes, const Dictionary &p_lods, const Ref<Material> &p_material, const String &p_name) { - ERR_FAIL_COND(p_blend_shapes.size() != blend_shapes.size()); - ERR_FAIL_COND(p_arrays.size() != Mesh::ARRAY_MAX); - Surface s; - s.primitive = p_primitive; - s.arrays = p_arrays; - s.name = p_name; - - for (int i = 0; i < blend_shapes.size(); i++) { - Array bsdata = p_blend_shapes[i]; - ERR_FAIL_COND(bsdata.size() != Mesh::ARRAY_MAX); - Surface::BlendShape bs; - bs.arrays = bsdata; - s.blend_shape_data.push_back(bs); - } - - List<Variant> lods; - p_lods.get_key_list(&lods); - for (List<Variant>::Element *E = lods.front(); E; E = E->next()) { - ERR_CONTINUE(!E->get().is_num()); - Surface::LOD lod; - lod.distance = E->get(); - lod.indices = p_lods[E->get()]; - ERR_CONTINUE(lod.indices.size() == 0); - s.lods.push_back(lod); - } - - s.material = p_material; - - surfaces.push_back(s); - mesh.unref(); -} -int EditorSceneImporterMesh::get_surface_count() const { - return surfaces.size(); -} - -Mesh::PrimitiveType EditorSceneImporterMesh::get_surface_primitive_type(int p_surface) { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Mesh::PRIMITIVE_MAX); - return surfaces[p_surface].primitive; -} -Array EditorSceneImporterMesh::get_surface_arrays(int p_surface) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Array()); - return surfaces[p_surface].arrays; -} -String EditorSceneImporterMesh::get_surface_name(int p_surface) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), String()); - return surfaces[p_surface].name; -} -Array EditorSceneImporterMesh::get_surface_blend_shape_arrays(int p_surface, int p_blend_shape) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Array()); - ERR_FAIL_INDEX_V(p_blend_shape, surfaces[p_surface].blend_shape_data.size(), Array()); - return surfaces[p_surface].blend_shape_data[p_blend_shape].arrays; -} -int EditorSceneImporterMesh::get_surface_lod_count(int p_surface) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), 0); - return surfaces[p_surface].lods.size(); -} -Vector<int> EditorSceneImporterMesh::get_surface_lod_indices(int p_surface, int p_lod) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Vector<int>()); - ERR_FAIL_INDEX_V(p_lod, surfaces[p_surface].lods.size(), Vector<int>()); - - return surfaces[p_surface].lods[p_lod].indices; -} - -float EditorSceneImporterMesh::get_surface_lod_size(int p_surface, int p_lod) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), 0); - ERR_FAIL_INDEX_V(p_lod, surfaces[p_surface].lods.size(), 0); - return surfaces[p_surface].lods[p_lod].distance; -} - -Ref<Material> EditorSceneImporterMesh::get_surface_material(int p_surface) const { - ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Ref<Material>()); - return surfaces[p_surface].material; -} - -void EditorSceneImporterMesh::generate_lods() { - if (!SurfaceTool::simplify_func) { - return; - } - - for (int i = 0; i < surfaces.size(); i++) { - if (surfaces[i].primitive != Mesh::PRIMITIVE_TRIANGLES) { - continue; - } - - surfaces.write[i].lods.clear(); - Vector<Vector3> vertices = surfaces[i].arrays[RS::ARRAY_VERTEX]; - Vector<int> indices = surfaces[i].arrays[RS::ARRAY_INDEX]; - if (indices.size() == 0) { - continue; //no lods if no indices - } - uint32_t vertex_count = vertices.size(); - const Vector3 *vertices_ptr = vertices.ptr(); - - int min_indices = 10; - int index_target = indices.size() / 2; - print_line("total: " + itos(indices.size())); - while (index_target > min_indices) { - float error; - Vector<int> new_indices; - new_indices.resize(indices.size()); - size_t new_len = SurfaceTool::simplify_func((unsigned int *)new_indices.ptrw(), (const unsigned int *)indices.ptr(), indices.size(), (const float *)vertices_ptr, vertex_count, sizeof(Vector3), index_target, 1e20, &error); - print_line("shoot for " + itos(index_target) + ", got " + itos(new_len) + " distance " + rtos(error)); - if ((int)new_len > (index_target * 120 / 100)) { - break; // 20 percent tolerance - } - new_indices.resize(new_len); - Surface::LOD lod; - lod.distance = error; - lod.indices = new_indices; - surfaces.write[i].lods.push_back(lod); - index_target /= 2; - } - } -} - -bool EditorSceneImporterMesh::has_mesh() const { - return mesh.is_valid(); -} - -Ref<ArrayMesh> EditorSceneImporterMesh::get_mesh() { - ERR_FAIL_COND_V(surfaces.size() == 0, Ref<ArrayMesh>()); - - if (mesh.is_null()) { - mesh.instance(); - for (int i = 0; i < blend_shapes.size(); i++) { - mesh->add_blend_shape(blend_shapes[i]); - } - mesh->set_blend_shape_mode(blend_shape_mode); - for (int i = 0; i < surfaces.size(); i++) { - Array bs_data; - if (surfaces[i].blend_shape_data.size()) { - for (int j = 0; j < surfaces[i].blend_shape_data.size(); j++) { - bs_data.push_back(surfaces[i].blend_shape_data[j].arrays); - } - } - Dictionary lods; - if (surfaces[i].lods.size()) { - for (int j = 0; j < surfaces[i].lods.size(); j++) { - lods[surfaces[i].lods[j].distance] = surfaces[i].lods[j].indices; - } - } - - mesh->add_surface_from_arrays(surfaces[i].primitive, surfaces[i].arrays, bs_data, lods); - if (surfaces[i].material.is_valid()) { - mesh->surface_set_material(mesh->get_surface_count() - 1, surfaces[i].material); - } - if (surfaces[i].name != String()) { - mesh->surface_set_name(mesh->get_surface_count() - 1, surfaces[i].name); - } - } - } - - return mesh; -} - -void EditorSceneImporterMesh::clear() { - surfaces.clear(); - blend_shapes.clear(); - mesh.unref(); -} - -void EditorSceneImporterMesh::_set_data(const Dictionary &p_data) { - clear(); - if (p_data.has("blend_shape_names")) { - blend_shapes = p_data["blend_shape_names"]; - } - if (p_data.has("surfaces")) { - Array surface_arr = p_data["surfaces"]; - for (int i = 0; i < surface_arr.size(); i++) { - Dictionary s = surface_arr[i]; - ERR_CONTINUE(!s.has("primitive")); - ERR_CONTINUE(!s.has("arrays")); - Mesh::PrimitiveType prim = Mesh::PrimitiveType(int(s["primitive"])); - ERR_CONTINUE(prim >= Mesh::PRIMITIVE_MAX); - Array arr = s["arrays"]; - Dictionary lods; - String name; - if (s.has("name")) { - name = s["name"]; - } - if (s.has("lods")) { - lods = s["lods"]; - } - Array blend_shapes; - if (s.has("blend_shapes")) { - blend_shapes = s["blend_shapes"]; - } - Ref<Material> material; - if (s.has("material")) { - material = s["material"]; - } - add_surface(prim, arr, blend_shapes, lods, material, name); - } - } -} -Dictionary EditorSceneImporterMesh::_get_data() const { - Dictionary data; - if (blend_shapes.size()) { - data["blend_shape_names"] = blend_shapes; - } - Array surface_arr; - for (int i = 0; i < surfaces.size(); i++) { - Dictionary d; - d["primitive"] = surfaces[i].primitive; - d["arrays"] = surfaces[i].arrays; - if (surfaces[i].blend_shape_data.size()) { - Array bs_data; - for (int j = 0; j < surfaces[i].blend_shape_data.size(); j++) { - bs_data.push_back(surfaces[i].blend_shape_data[j].arrays); - } - d["blend_shapes"] = bs_data; - } - if (surfaces[i].lods.size()) { - Dictionary lods; - for (int j = 0; j < surfaces[i].lods.size(); j++) { - lods[surfaces[i].lods[j].distance] = surfaces[i].lods[j].indices; - } - d["lods"] = lods; - } - - if (surfaces[i].material.is_valid()) { - d["material"] = surfaces[i].material; - } - - if (surfaces[i].name != String()) { - d["name"] = surfaces[i].name; - } - - surface_arr.push_back(d); - } - data["surfaces"] = surface_arr; - return data; -} - -void EditorSceneImporterMesh::_bind_methods() { - ClassDB::bind_method(D_METHOD("add_blend_shape", "name"), &EditorSceneImporterMesh::add_blend_shape); - ClassDB::bind_method(D_METHOD("get_blend_shape_count"), &EditorSceneImporterMesh::get_blend_shape_count); - ClassDB::bind_method(D_METHOD("get_blend_shape_name", "blend_shape_idx"), &EditorSceneImporterMesh::get_blend_shape_name); - - ClassDB::bind_method(D_METHOD("set_blend_shape_mode", "mode"), &EditorSceneImporterMesh::set_blend_shape_mode); - ClassDB::bind_method(D_METHOD("get_blend_shape_mode"), &EditorSceneImporterMesh::get_blend_shape_mode); - - ClassDB::bind_method(D_METHOD("add_surface", "primitive", "arrays", "blend_shapes", "lods", "material"), &EditorSceneImporterMesh::add_surface, DEFVAL(Array()), DEFVAL(Dictionary()), DEFVAL(Ref<Material>()), DEFVAL(String())); - - ClassDB::bind_method(D_METHOD("get_surface_count"), &EditorSceneImporterMesh::get_surface_count); - ClassDB::bind_method(D_METHOD("get_surface_primitive_type", "surface_idx"), &EditorSceneImporterMesh::get_surface_primitive_type); - ClassDB::bind_method(D_METHOD("get_surface_name", "surface_idx"), &EditorSceneImporterMesh::get_surface_name); - ClassDB::bind_method(D_METHOD("get_surface_arrays", "surface_idx"), &EditorSceneImporterMesh::get_surface_arrays); - ClassDB::bind_method(D_METHOD("get_surface_blend_shape_arrays", "surface_idx", "blend_shape_idx"), &EditorSceneImporterMesh::get_surface_blend_shape_arrays); - ClassDB::bind_method(D_METHOD("get_surface_lod_count", "surface_idx"), &EditorSceneImporterMesh::get_surface_lod_count); - ClassDB::bind_method(D_METHOD("get_surface_lod_size", "surface_idx", "lod_idx"), &EditorSceneImporterMesh::get_surface_lod_size); - ClassDB::bind_method(D_METHOD("get_surface_lod_indices", "surface_idx", "lod_idx"), &EditorSceneImporterMesh::get_surface_lod_indices); - ClassDB::bind_method(D_METHOD("get_surface_material", "surface_idx"), &EditorSceneImporterMesh::get_surface_material); - - ClassDB::bind_method(D_METHOD("get_mesh"), &EditorSceneImporterMesh::get_mesh); - ClassDB::bind_method(D_METHOD("clear"), &EditorSceneImporterMesh::clear); - - ClassDB::bind_method(D_METHOD("_set_data", "data"), &EditorSceneImporterMesh::_set_data); - ClassDB::bind_method(D_METHOD("_get_data"), &EditorSceneImporterMesh::_get_data); - - ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "_data", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR), "_set_data", "_get_data"); -} - -void EditorSceneImporterMeshNode::set_mesh(const Ref<EditorSceneImporterMesh> &p_mesh) { - mesh = p_mesh; -} -Ref<EditorSceneImporterMesh> EditorSceneImporterMeshNode::get_mesh() const { - return mesh; -} - -void EditorSceneImporterMeshNode::set_skin(const Ref<Skin> &p_skin) { - skin = p_skin; -} -Ref<Skin> EditorSceneImporterMeshNode::get_skin() const { - return skin; -} - -void EditorSceneImporterMeshNode::set_surface_material(int p_idx, const Ref<Material> &p_material) { - ERR_FAIL_COND(p_idx < 0); - if (p_idx >= surface_materials.size()) { - surface_materials.resize(p_idx + 1); - } - - surface_materials.write[p_idx] = p_material; -} -Ref<Material> EditorSceneImporterMeshNode::get_surface_material(int p_idx) const { - ERR_FAIL_COND_V(p_idx < 0, Ref<Material>()); - if (p_idx >= surface_materials.size()) { - return Ref<Material>(); - } - return surface_materials[p_idx]; -} - -void EditorSceneImporterMeshNode::set_skeleton_path(const NodePath &p_path) { - skeleton_path = p_path; -} -NodePath EditorSceneImporterMeshNode::get_skeleton_path() const { - return skeleton_path; -} - -void EditorSceneImporterMeshNode::_bind_methods() { - ClassDB::bind_method(D_METHOD("set_mesh", "mesh"), &EditorSceneImporterMeshNode::set_mesh); - ClassDB::bind_method(D_METHOD("get_mesh"), &EditorSceneImporterMeshNode::get_mesh); - - ClassDB::bind_method(D_METHOD("set_skin", "skin"), &EditorSceneImporterMeshNode::set_skin); - ClassDB::bind_method(D_METHOD("get_skin"), &EditorSceneImporterMeshNode::get_skin); - - ClassDB::bind_method(D_METHOD("set_skeleton_path", "skeleton_path"), &EditorSceneImporterMeshNode::set_skeleton_path); - ClassDB::bind_method(D_METHOD("get_skeleton_path"), &EditorSceneImporterMeshNode::get_skeleton_path); - - ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "mesh", PROPERTY_HINT_RESOURCE_TYPE, "EditorSceneImporterMesh"), "set_mesh", "get_mesh"); - ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "skin", PROPERTY_HINT_RESOURCE_TYPE, "Skin"), "set_skin", "get_skin"); - ADD_PROPERTY(PropertyInfo(Variant::NODE_PATH, "skeleton_path", PROPERTY_HINT_NODE_PATH_VALID_TYPES, "Skeleton"), "set_skeleton_path", "get_skeleton_path"); -} - ///////////////////////////////// void EditorScenePostImport::_bind_methods() { BIND_VMETHOD(MethodInfo(Variant::OBJECT, "post_import", PropertyInfo(Variant::OBJECT, "scene"))); @@ -1560,27 +1222,30 @@ Ref<Animation> ResourceImporterScene::import_animation_from_other_importer(Edito } void ResourceImporterScene::_generate_meshes(Node *p_node, bool p_generate_lods) { - EditorSceneImporterMeshNode *src_mesh = Object::cast_to<EditorSceneImporterMeshNode>(p_node); - if (src_mesh != nullptr) { + EditorSceneImporterMeshNode3D *src_mesh_node = Object::cast_to<EditorSceneImporterMeshNode3D>(p_node); + if (src_mesh_node) { //is mesh MeshInstance3D *mesh_node = memnew(MeshInstance3D); - mesh_node->set_transform(src_mesh->get_transform()); - mesh_node->set_skin(src_mesh->get_skin()); - mesh_node->set_skeleton_path(src_mesh->get_skeleton_path()); - - Ref<ArrayMesh> mesh; - if (!src_mesh->get_mesh()->has_mesh()) { - if (p_generate_lods) { - src_mesh->get_mesh()->generate_lods(); + mesh_node->set_name(src_mesh_node->get_name()); + mesh_node->set_transform(src_mesh_node->get_transform()); + mesh_node->set_skin(src_mesh_node->get_skin()); + mesh_node->set_skeleton_path(src_mesh_node->get_skeleton_path()); + if (src_mesh_node->get_mesh().is_valid()) { + Ref<ArrayMesh> mesh; + if (!src_mesh_node->get_mesh()->has_mesh()) { + //do mesh processing + if (p_generate_lods) { + src_mesh_node->get_mesh()->generate_lods(); + } + } + mesh = src_mesh_node->get_mesh()->get_mesh(); + if (mesh.is_valid()) { + mesh_node->set_mesh(mesh); + for (int i = 0; i < mesh->get_surface_count(); i++) { + mesh_node->set_surface_material(i, src_mesh_node->get_surface_material(i)); + } } - //do mesh processing - } - mesh = src_mesh->get_mesh()->get_mesh(); - mesh_node->set_mesh(mesh); - for (int i = 0; i < mesh->get_surface_count(); i++) { - mesh_node->set_surface_material(i, src_mesh->get_surface_material(i)); } - p_node->replace_by(mesh_node); memdelete(p_node); p_node = mesh_node; diff --git a/editor/import/resource_importer_scene.h b/editor/import/resource_importer_scene.h index aef6c0ac50..5b70f5bd81 100644 --- a/editor/import/resource_importer_scene.h +++ b/editor/import/resource_importer_scene.h @@ -90,92 +90,6 @@ public: EditorScenePostImport(); }; -// The following classes are used by importers instead of ArrayMesh and MeshInstance3D -// so the data is not reginstered (hence, quality loss), importing happens faster and -// its easier to modify before saving - -class EditorSceneImporterMesh : public Resource { - GDCLASS(EditorSceneImporterMesh, Resource) - - struct Surface { - Mesh::PrimitiveType primitive; - Array arrays; - struct BlendShape { - Array arrays; - }; - Vector<BlendShape> blend_shape_data; - struct LOD { - Vector<int> indices; - float distance; - }; - Vector<LOD> lods; - Ref<Material> material; - String name; - }; - Vector<Surface> surfaces; - Vector<String> blend_shapes; - Mesh::BlendShapeMode blend_shape_mode = Mesh::BLEND_SHAPE_MODE_NORMALIZED; - - Ref<ArrayMesh> mesh; - -protected: - void _set_data(const Dictionary &p_data); - Dictionary _get_data() const; - - static void _bind_methods(); - -public: - void add_blend_shape(const String &p_name); - int get_blend_shape_count() const; - String get_blend_shape_name(int p_blend_shape) const; - - void add_surface(Mesh::PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes = Array(), const Dictionary &p_lods = Dictionary(), const Ref<Material> &p_material = Ref<Material>(), const String &p_name = String()); - int get_surface_count() const; - - void set_blend_shape_mode(Mesh::BlendShapeMode p_blend_shape_mode); - Mesh::BlendShapeMode get_blend_shape_mode() const; - - Mesh::PrimitiveType get_surface_primitive_type(int p_surface); - String get_surface_name(int p_surface) const; - Array get_surface_arrays(int p_surface) const; - Array get_surface_blend_shape_arrays(int p_surface, int p_blend_shape) const; - int get_surface_lod_count(int p_surface) const; - Vector<int> get_surface_lod_indices(int p_surface, int p_lod) const; - float get_surface_lod_size(int p_surface, int p_lod) const; - Ref<Material> get_surface_material(int p_surface) const; - - void generate_lods(); - - bool has_mesh() const; - Ref<ArrayMesh> get_mesh(); - void clear(); -}; - -class EditorSceneImporterMeshNode : public Node3D { - GDCLASS(EditorSceneImporterMeshNode, Node3D) - - Ref<EditorSceneImporterMesh> mesh; - Ref<Skin> skin; - NodePath skeleton_path; - Vector<Ref<Material>> surface_materials; - -protected: - static void _bind_methods(); - -public: - void set_mesh(const Ref<EditorSceneImporterMesh> &p_mesh); - Ref<EditorSceneImporterMesh> get_mesh() const; - - void set_skin(const Ref<Skin> &p_skin); - Ref<Skin> get_skin() const; - - void set_surface_material(int p_idx, const Ref<Material> &p_material); - Ref<Material> get_surface_material(int p_idx) const; - - void set_skeleton_path(const NodePath &p_path); - NodePath get_skeleton_path() const; -}; - class ResourceImporterScene : public ResourceImporter { GDCLASS(ResourceImporterScene, ResourceImporter); diff --git a/editor/import/scene_importer_mesh.cpp b/editor/import/scene_importer_mesh.cpp new file mode 100644 index 0000000000..263e6afe0c --- /dev/null +++ b/editor/import/scene_importer_mesh.cpp @@ -0,0 +1,320 @@ +/*************************************************************************/ +/* scene_importer_mesh.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "scene_importer_mesh.h" + +#include "scene/resources/surface_tool.h" + +void EditorSceneImporterMesh::add_blend_shape(const String &p_name) { + ERR_FAIL_COND(surfaces.size() > 0); + blend_shapes.push_back(p_name); +} + +int EditorSceneImporterMesh::get_blend_shape_count() const { + return blend_shapes.size(); +} + +String EditorSceneImporterMesh::get_blend_shape_name(int p_blend_shape) const { + ERR_FAIL_INDEX_V(p_blend_shape, blend_shapes.size(), String()); + return blend_shapes[p_blend_shape]; +} + +void EditorSceneImporterMesh::set_blend_shape_mode(Mesh::BlendShapeMode p_blend_shape_mode) { + blend_shape_mode = p_blend_shape_mode; +} + +Mesh::BlendShapeMode EditorSceneImporterMesh::get_blend_shape_mode() const { + return blend_shape_mode; +} + +void EditorSceneImporterMesh::add_surface(Mesh::PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes, const Dictionary &p_lods, const Ref<Material> &p_material, const String &p_name) { + ERR_FAIL_COND(p_blend_shapes.size() != blend_shapes.size()); + ERR_FAIL_COND(p_arrays.size() != Mesh::ARRAY_MAX); + Surface s; + s.primitive = p_primitive; + s.arrays = p_arrays; + s.name = p_name; + + for (int i = 0; i < blend_shapes.size(); i++) { + Array bsdata = p_blend_shapes[i]; + ERR_FAIL_COND(bsdata.size() != Mesh::ARRAY_MAX); + Surface::BlendShape bs; + bs.arrays = bsdata; + s.blend_shape_data.push_back(bs); + } + + List<Variant> lods; + p_lods.get_key_list(&lods); + for (List<Variant>::Element *E = lods.front(); E; E = E->next()) { + ERR_CONTINUE(!E->get().is_num()); + Surface::LOD lod; + lod.distance = E->get(); + lod.indices = p_lods[E->get()]; + ERR_CONTINUE(lod.indices.size() == 0); + s.lods.push_back(lod); + } + + s.material = p_material; + + surfaces.push_back(s); + mesh.unref(); +} + +int EditorSceneImporterMesh::get_surface_count() const { + return surfaces.size(); +} + +Mesh::PrimitiveType EditorSceneImporterMesh::get_surface_primitive_type(int p_surface) { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Mesh::PRIMITIVE_MAX); + return surfaces[p_surface].primitive; +} +Array EditorSceneImporterMesh::get_surface_arrays(int p_surface) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Array()); + return surfaces[p_surface].arrays; +} +String EditorSceneImporterMesh::get_surface_name(int p_surface) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), String()); + return surfaces[p_surface].name; +} +Array EditorSceneImporterMesh::get_surface_blend_shape_arrays(int p_surface, int p_blend_shape) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Array()); + ERR_FAIL_INDEX_V(p_blend_shape, surfaces[p_surface].blend_shape_data.size(), Array()); + return surfaces[p_surface].blend_shape_data[p_blend_shape].arrays; +} +int EditorSceneImporterMesh::get_surface_lod_count(int p_surface) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), 0); + return surfaces[p_surface].lods.size(); +} +Vector<int> EditorSceneImporterMesh::get_surface_lod_indices(int p_surface, int p_lod) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Vector<int>()); + ERR_FAIL_INDEX_V(p_lod, surfaces[p_surface].lods.size(), Vector<int>()); + + return surfaces[p_surface].lods[p_lod].indices; +} + +float EditorSceneImporterMesh::get_surface_lod_size(int p_surface, int p_lod) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), 0); + ERR_FAIL_INDEX_V(p_lod, surfaces[p_surface].lods.size(), 0); + return surfaces[p_surface].lods[p_lod].distance; +} + +Ref<Material> EditorSceneImporterMesh::get_surface_material(int p_surface) const { + ERR_FAIL_INDEX_V(p_surface, surfaces.size(), Ref<Material>()); + return surfaces[p_surface].material; +} + +void EditorSceneImporterMesh::generate_lods() { + if (!SurfaceTool::simplify_func) { + return; + } + + for (int i = 0; i < surfaces.size(); i++) { + if (surfaces[i].primitive != Mesh::PRIMITIVE_TRIANGLES) { + continue; + } + + surfaces.write[i].lods.clear(); + Vector<Vector3> vertices = surfaces[i].arrays[RS::ARRAY_VERTEX]; + Vector<int> indices = surfaces[i].arrays[RS::ARRAY_INDEX]; + if (indices.size() == 0) { + continue; //no lods if no indices + } + uint32_t vertex_count = vertices.size(); + const Vector3 *vertices_ptr = vertices.ptr(); + + int min_indices = 10; + int index_target = indices.size() / 2; + print_line("total: " + itos(indices.size())); + while (index_target > min_indices) { + float error; + Vector<int> new_indices; + new_indices.resize(indices.size()); + size_t new_len = SurfaceTool::simplify_func((unsigned int *)new_indices.ptrw(), (const unsigned int *)indices.ptr(), indices.size(), (const float *)vertices_ptr, vertex_count, sizeof(Vector3), index_target, 1e20, &error); + print_line("shoot for " + itos(index_target) + ", got " + itos(new_len) + " distance " + rtos(error)); + if ((int)new_len > (index_target * 120 / 100)) { + break; // 20 percent tolerance + } + new_indices.resize(new_len); + Surface::LOD lod; + lod.distance = error; + lod.indices = new_indices; + surfaces.write[i].lods.push_back(lod); + index_target /= 2; + } + } +} + +bool EditorSceneImporterMesh::has_mesh() const { + return mesh.is_valid(); +} + +Ref<ArrayMesh> EditorSceneImporterMesh::get_mesh() { + ERR_FAIL_COND_V(surfaces.size() == 0, Ref<ArrayMesh>()); + + if (mesh.is_null()) { + mesh.instance(); + for (int i = 0; i < blend_shapes.size(); i++) { + mesh->add_blend_shape(blend_shapes[i]); + } + mesh->set_blend_shape_mode(blend_shape_mode); + for (int i = 0; i < surfaces.size(); i++) { + Array bs_data; + if (surfaces[i].blend_shape_data.size()) { + for (int j = 0; j < surfaces[i].blend_shape_data.size(); j++) { + bs_data.push_back(surfaces[i].blend_shape_data[j].arrays); + } + } + Dictionary lods; + if (surfaces[i].lods.size()) { + for (int j = 0; j < surfaces[i].lods.size(); j++) { + lods[surfaces[i].lods[j].distance] = surfaces[i].lods[j].indices; + } + } + + mesh->add_surface_from_arrays(surfaces[i].primitive, surfaces[i].arrays, bs_data, lods); + if (surfaces[i].material.is_valid()) { + mesh->surface_set_material(mesh->get_surface_count() - 1, surfaces[i].material); + } + if (surfaces[i].name != String()) { + mesh->surface_set_name(mesh->get_surface_count() - 1, surfaces[i].name); + } + } + } + + return mesh; +} + +void EditorSceneImporterMesh::clear() { + surfaces.clear(); + blend_shapes.clear(); + mesh.unref(); +} + +void EditorSceneImporterMesh::_set_data(const Dictionary &p_data) { + clear(); + if (p_data.has("blend_shape_names")) { + blend_shapes = p_data["blend_shape_names"]; + } + if (p_data.has("surfaces")) { + Array surface_arr = p_data["surfaces"]; + for (int i = 0; i < surface_arr.size(); i++) { + Dictionary s = surface_arr[i]; + ERR_CONTINUE(!s.has("primitive")); + ERR_CONTINUE(!s.has("arrays")); + Mesh::PrimitiveType prim = Mesh::PrimitiveType(int(s["primitive"])); + ERR_CONTINUE(prim >= Mesh::PRIMITIVE_MAX); + Array arr = s["arrays"]; + Dictionary lods; + String name; + if (s.has("name")) { + name = s["name"]; + } + if (s.has("lods")) { + lods = s["lods"]; + } + Array blend_shapes; + if (s.has("blend_shapes")) { + blend_shapes = s["blend_shapes"]; + } + Ref<Material> material; + if (s.has("material")) { + material = s["material"]; + } + add_surface(prim, arr, blend_shapes, lods, material, name); + } + } +} +Dictionary EditorSceneImporterMesh::_get_data() const { + Dictionary data; + if (blend_shapes.size()) { + data["blend_shape_names"] = blend_shapes; + } + Array surface_arr; + for (int i = 0; i < surfaces.size(); i++) { + Dictionary d; + d["primitive"] = surfaces[i].primitive; + d["arrays"] = surfaces[i].arrays; + if (surfaces[i].blend_shape_data.size()) { + Array bs_data; + for (int j = 0; j < surfaces[i].blend_shape_data.size(); j++) { + bs_data.push_back(surfaces[i].blend_shape_data[j].arrays); + } + d["blend_shapes"] = bs_data; + } + if (surfaces[i].lods.size()) { + Dictionary lods; + for (int j = 0; j < surfaces[i].lods.size(); j++) { + lods[surfaces[i].lods[j].distance] = surfaces[i].lods[j].indices; + } + d["lods"] = lods; + } + + if (surfaces[i].material.is_valid()) { + d["material"] = surfaces[i].material; + } + + if (surfaces[i].name != String()) { + d["name"] = surfaces[i].name; + } + + surface_arr.push_back(d); + } + data["surfaces"] = surface_arr; + return data; +} + +void EditorSceneImporterMesh::_bind_methods() { + ClassDB::bind_method(D_METHOD("add_blend_shape", "name"), &EditorSceneImporterMesh::add_blend_shape); + ClassDB::bind_method(D_METHOD("get_blend_shape_count"), &EditorSceneImporterMesh::get_blend_shape_count); + ClassDB::bind_method(D_METHOD("get_blend_shape_name", "blend_shape_idx"), &EditorSceneImporterMesh::get_blend_shape_name); + + ClassDB::bind_method(D_METHOD("set_blend_shape_mode", "mode"), &EditorSceneImporterMesh::set_blend_shape_mode); + ClassDB::bind_method(D_METHOD("get_blend_shape_mode"), &EditorSceneImporterMesh::get_blend_shape_mode); + + ClassDB::bind_method(D_METHOD("add_surface", "primitive", "arrays", "blend_shapes", "lods", "material"), &EditorSceneImporterMesh::add_surface, DEFVAL(Array()), DEFVAL(Dictionary()), DEFVAL(Ref<Material>()), DEFVAL(String())); + + ClassDB::bind_method(D_METHOD("get_surface_count"), &EditorSceneImporterMesh::get_surface_count); + ClassDB::bind_method(D_METHOD("get_surface_primitive_type", "surface_idx"), &EditorSceneImporterMesh::get_surface_primitive_type); + ClassDB::bind_method(D_METHOD("get_surface_name", "surface_idx"), &EditorSceneImporterMesh::get_surface_name); + ClassDB::bind_method(D_METHOD("get_surface_arrays", "surface_idx"), &EditorSceneImporterMesh::get_surface_arrays); + ClassDB::bind_method(D_METHOD("get_surface_blend_shape_arrays", "surface_idx", "blend_shape_idx"), &EditorSceneImporterMesh::get_surface_blend_shape_arrays); + ClassDB::bind_method(D_METHOD("get_surface_lod_count", "surface_idx"), &EditorSceneImporterMesh::get_surface_lod_count); + ClassDB::bind_method(D_METHOD("get_surface_lod_size", "surface_idx", "lod_idx"), &EditorSceneImporterMesh::get_surface_lod_size); + ClassDB::bind_method(D_METHOD("get_surface_lod_indices", "surface_idx", "lod_idx"), &EditorSceneImporterMesh::get_surface_lod_indices); + ClassDB::bind_method(D_METHOD("get_surface_material", "surface_idx"), &EditorSceneImporterMesh::get_surface_material); + + ClassDB::bind_method(D_METHOD("get_mesh"), &EditorSceneImporterMesh::get_mesh); + ClassDB::bind_method(D_METHOD("clear"), &EditorSceneImporterMesh::clear); + + ClassDB::bind_method(D_METHOD("_set_data", "data"), &EditorSceneImporterMesh::_set_data); + ClassDB::bind_method(D_METHOD("_get_data"), &EditorSceneImporterMesh::_get_data); + + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "_data", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR), "_set_data", "_get_data"); +} diff --git a/editor/import/scene_importer_mesh.h b/editor/import/scene_importer_mesh.h new file mode 100644 index 0000000000..7efe2f2ffb --- /dev/null +++ b/editor/import/scene_importer_mesh.h @@ -0,0 +1,96 @@ +/*************************************************************************/ +/* scene_importer_mesh.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef EDITOR_SCENE_IMPORTER_MESH_H +#define EDITOR_SCENE_IMPORTER_MESH_H + +#include "core/io/resource.h" +#include "scene/resources/mesh.h" +// The following classes are used by importers instead of ArrayMesh and MeshInstance3D +// so the data is not registered (hence, quality loss), importing happens faster and +// its easier to modify before saving + +class EditorSceneImporterMesh : public Resource { + GDCLASS(EditorSceneImporterMesh, Resource) + + struct Surface { + Mesh::PrimitiveType primitive; + Array arrays; + struct BlendShape { + Array arrays; + }; + Vector<BlendShape> blend_shape_data; + struct LOD { + Vector<int> indices; + float distance; + }; + Vector<LOD> lods; + Ref<Material> material; + String name; + }; + Vector<Surface> surfaces; + Vector<String> blend_shapes; + Mesh::BlendShapeMode blend_shape_mode = Mesh::BLEND_SHAPE_MODE_NORMALIZED; + + Ref<ArrayMesh> mesh; + +protected: + void _set_data(const Dictionary &p_data); + Dictionary _get_data() const; + + static void _bind_methods(); + +public: + void add_blend_shape(const String &p_name); + int get_blend_shape_count() const; + String get_blend_shape_name(int p_blend_shape) const; + + void add_surface(Mesh::PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes = Array(), const Dictionary &p_lods = Dictionary(), const Ref<Material> &p_material = Ref<Material>(), const String &p_name = String()); + int get_surface_count() const; + + void set_blend_shape_mode(Mesh::BlendShapeMode p_blend_shape_mode); + Mesh::BlendShapeMode get_blend_shape_mode() const; + + Mesh::PrimitiveType get_surface_primitive_type(int p_surface); + String get_surface_name(int p_surface) const; + Array get_surface_arrays(int p_surface) const; + Array get_surface_blend_shape_arrays(int p_surface, int p_blend_shape) const; + int get_surface_lod_count(int p_surface) const; + Vector<int> get_surface_lod_indices(int p_surface, int p_lod) const; + float get_surface_lod_size(int p_surface, int p_lod) const; + Ref<Material> get_surface_material(int p_surface) const; + + void generate_lods(); + + bool has_mesh() const; + Ref<ArrayMesh> get_mesh(); + void clear(); +}; +#endif // EDITOR_SCENE_IMPORTER_MESH_H diff --git a/editor/import/scene_importer_mesh_node_3d.cpp b/editor/import/scene_importer_mesh_node_3d.cpp new file mode 100644 index 0000000000..53929f77b0 --- /dev/null +++ b/editor/import/scene_importer_mesh_node_3d.cpp @@ -0,0 +1,83 @@ +/*************************************************************************/ +/* scene_importer_mesh_node_3d.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "scene_importer_mesh_node_3d.h" + +void EditorSceneImporterMeshNode3D::set_mesh(const Ref<EditorSceneImporterMesh> &p_mesh) { + mesh = p_mesh; +} +Ref<EditorSceneImporterMesh> EditorSceneImporterMeshNode3D::get_mesh() const { + return mesh; +} + +void EditorSceneImporterMeshNode3D::set_skin(const Ref<Skin> &p_skin) { + skin = p_skin; +} +Ref<Skin> EditorSceneImporterMeshNode3D::get_skin() const { + return skin; +} + +void EditorSceneImporterMeshNode3D::set_surface_material(int p_idx, const Ref<Material> &p_material) { + ERR_FAIL_COND(p_idx < 0); + if (p_idx >= surface_materials.size()) { + surface_materials.resize(p_idx + 1); + } + + surface_materials.write[p_idx] = p_material; +} +Ref<Material> EditorSceneImporterMeshNode3D::get_surface_material(int p_idx) const { + ERR_FAIL_COND_V(p_idx < 0, Ref<Material>()); + if (p_idx >= surface_materials.size()) { + return Ref<Material>(); + } + return surface_materials[p_idx]; +} + +void EditorSceneImporterMeshNode3D::set_skeleton_path(const NodePath &p_path) { + skeleton_path = p_path; +} +NodePath EditorSceneImporterMeshNode3D::get_skeleton_path() const { + return skeleton_path; +} + +void EditorSceneImporterMeshNode3D::_bind_methods() { + ClassDB::bind_method(D_METHOD("set_mesh", "mesh"), &EditorSceneImporterMeshNode3D::set_mesh); + ClassDB::bind_method(D_METHOD("get_mesh"), &EditorSceneImporterMeshNode3D::get_mesh); + + ClassDB::bind_method(D_METHOD("set_skin", "skin"), &EditorSceneImporterMeshNode3D::set_skin); + ClassDB::bind_method(D_METHOD("get_skin"), &EditorSceneImporterMeshNode3D::get_skin); + + ClassDB::bind_method(D_METHOD("set_skeleton_path", "skeleton_path"), &EditorSceneImporterMeshNode3D::set_skeleton_path); + ClassDB::bind_method(D_METHOD("get_skeleton_path"), &EditorSceneImporterMeshNode3D::get_skeleton_path); + + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "mesh", PROPERTY_HINT_RESOURCE_TYPE, "EditorSceneImporterMesh"), "set_mesh", "get_mesh"); + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "skin", PROPERTY_HINT_RESOURCE_TYPE, "Skin"), "set_skin", "get_skin"); + ADD_PROPERTY(PropertyInfo(Variant::NODE_PATH, "skeleton_path", PROPERTY_HINT_NODE_PATH_VALID_TYPES, "Skeleton"), "set_skeleton_path", "get_skeleton_path"); +} diff --git a/editor/import/scene_importer_mesh_node_3d.h b/editor/import/scene_importer_mesh_node_3d.h new file mode 100644 index 0000000000..9540e3b886 --- /dev/null +++ b/editor/import/scene_importer_mesh_node_3d.h @@ -0,0 +1,64 @@ +/*************************************************************************/ +/* scene_importer_mesh_node_3d.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef EDITOR_SCENE_IMPORTER_MESH_NODE_3D_H +#define EDITOR_SCENE_IMPORTER_MESH_NODE_3D_H + +#include "editor/import/scene_importer_mesh.h" +#include "scene/3d/node_3d.h" +#include "scene/resources/skin.h" + +class EditorSceneImporterMesh; + +class EditorSceneImporterMeshNode3D : public Node3D { + GDCLASS(EditorSceneImporterMeshNode3D, Node3D) + + Ref<EditorSceneImporterMesh> mesh; + Ref<Skin> skin; + NodePath skeleton_path; + Vector<Ref<Material>> surface_materials; + +protected: + static void _bind_methods(); + +public: + void set_mesh(const Ref<EditorSceneImporterMesh> &p_mesh); + Ref<EditorSceneImporterMesh> get_mesh() const; + + void set_skin(const Ref<Skin> &p_skin); + Ref<Skin> get_skin() const; + + void set_surface_material(int p_idx, const Ref<Material> &p_material); + Ref<Material> get_surface_material(int p_idx) const; + + void set_skeleton_path(const NodePath &p_path); + NodePath get_skeleton_path() const; +}; +#endif diff --git a/editor/plugins/canvas_item_editor_plugin.cpp b/editor/plugins/canvas_item_editor_plugin.cpp index 4aac18f05f..5073cbd0b9 100644 --- a/editor/plugins/canvas_item_editor_plugin.cpp +++ b/editor/plugins/canvas_item_editor_plugin.cpp @@ -2935,7 +2935,7 @@ void CanvasItemEditor::_draw_rulers() { // Draw top ruler viewport->draw_rect(Rect2(Point2(RULER_WIDTH, 0), Size2(viewport->get_size().x, RULER_WIDTH)), bg_color); for (int i = Math::ceil(first.x); i < last.x; i++) { - Point2 position = (transform * ruler_transform * major_subdivide * minor_subdivide).xform(Point2(i, 0)); + Point2 position = (transform * ruler_transform * major_subdivide * minor_subdivide).xform(Point2(i, 0)).round(); if (i % (major_subdivision * minor_subdivision) == 0) { viewport->draw_line(Point2(position.x, 0), Point2(position.x, RULER_WIDTH), graduation_color, Math::round(EDSCALE)); float val = (ruler_transform * major_subdivide * minor_subdivide).xform(Point2(i, 0)).x; @@ -2952,7 +2952,7 @@ void CanvasItemEditor::_draw_rulers() { // Draw left ruler viewport->draw_rect(Rect2(Point2(0, RULER_WIDTH), Size2(RULER_WIDTH, viewport->get_size().y)), bg_color); for (int i = Math::ceil(first.y); i < last.y; i++) { - Point2 position = (transform * ruler_transform * major_subdivide * minor_subdivide).xform(Point2(0, i)); + Point2 position = (transform * ruler_transform * major_subdivide * minor_subdivide).xform(Point2(0, i)).round(); if (i % (major_subdivision * minor_subdivision) == 0) { viewport->draw_line(Point2(0, position.y), Point2(RULER_WIDTH, position.y), graduation_color, Math::round(EDSCALE)); float val = (ruler_transform * major_subdivide * minor_subdivide).xform(Point2(0, i)).y; diff --git a/editor/plugins/script_editor_plugin.cpp b/editor/plugins/script_editor_plugin.cpp index e0a6fe16f7..7607ab482c 100644 --- a/editor/plugins/script_editor_plugin.cpp +++ b/editor/plugins/script_editor_plugin.cpp @@ -1954,7 +1954,7 @@ void ScriptEditor::_update_script_names() { Vector<String> disambiguated_script_names; Vector<String> full_script_paths; for (int j = 0; j < sedata.size(); j++) { - disambiguated_script_names.append(sedata[j].name.replace("(*)", "")); + disambiguated_script_names.append(sedata[j].name.replace("(*)", "").get_file()); full_script_paths.append(sedata[j].tooltip); } diff --git a/modules/assimp/SCsub b/modules/assimp/SCsub deleted file mode 100644 index 7213efb74d..0000000000 --- a/modules/assimp/SCsub +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -Import("env") -Import("env_modules") - -env_assimp = env_modules.Clone() - -# Thirdparty source files - -thirdparty_obj = [] - -# Force bundled version for now, there's no released version of Assimp with -# support for ArmaturePopulate which we use from their master branch. - -if True: # env['builtin_assimp']: - thirdparty_dir = "#thirdparty/assimp" - - env_assimp.Prepend(CPPPATH=["#thirdparty/assimp"]) - env_assimp.Prepend(CPPPATH=["#thirdparty/assimp/code"]) - env_assimp.Prepend(CPPPATH=["#thirdparty/assimp/include"]) - - # env_assimp.Append(CPPDEFINES=['ASSIMP_DOUBLE_PRECISION']) # TODO default to what godot is compiled with for future double support - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_SINGLETHREADED"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_BOOST_WORKAROUND"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_OWN_ZLIB"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_EXPORT"]) - - # Importers we don't need - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_3DS_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_3MF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_AC_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_AMF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_ASE_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_ASSBIN_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_B3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_BLEND_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_BVH_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_C4D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_COB_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_COLLADA_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_CSM_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_DXF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_GLTF2_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_GLTF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_HMP_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_IFC_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_IRR_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_IRRMESH_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_LWO_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_LWS_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_M3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MD2_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MD3_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MD5_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MD5_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MDC_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MDL_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MMD_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_MS3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_NDO_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_NFF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_OBJ_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_OFF_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_OGRE_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_OPENGEX_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_PLY_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_Q3BSP_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_Q3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_RAW_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_SIB_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_SMD_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_STEP_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_STL_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_TERRAGEN_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_X3D_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_XGL_IMPORTER"]) - env_assimp.Append(CPPDEFINES=["ASSIMP_BUILD_NO_X_IMPORTER"]) - - if env["platform"] == "windows": - env_assimp.Append(CPPDEFINES=["PLATFORM_WINDOWS"]) - env_assimp.Append(CPPDEFINES=[("PLATFORM", "WINDOWS")]) - elif env["platform"] == "linuxbsd": - env_assimp.Append(CPPDEFINES=["PLATFORM_LINUX"]) - env_assimp.Append(CPPDEFINES=[("PLATFORM", "LINUX")]) - elif env["platform"] == "osx": - env_assimp.Append(CPPDEFINES=["PLATFORM_DARWIN"]) - env_assimp.Append(CPPDEFINES=[("PLATFORM", "DARWIN")]) - - env_thirdparty = env_assimp.Clone() - env_thirdparty.disable_warnings() - env_thirdparty.add_source_files(thirdparty_obj, Glob("#thirdparty/assimp/code/CApi/*.cpp")) - env_thirdparty.add_source_files(thirdparty_obj, Glob("#thirdparty/assimp/code/Common/*.cpp")) - env_thirdparty.add_source_files(thirdparty_obj, Glob("#thirdparty/assimp/code/PostProcessing/*.cpp")) - env_thirdparty.add_source_files(thirdparty_obj, Glob("#thirdparty/assimp/code/Material/*.cpp")) - env_thirdparty.add_source_files(thirdparty_obj, Glob("#thirdparty/assimp/code/FBX/*.cpp")) - env.modules_sources += thirdparty_obj - - -# Godot source files - -module_obj = [] - -env_assimp.add_source_files(module_obj, "*.cpp") -env.modules_sources += module_obj - -# Needed to force rebuilding the module files when the thirdparty library is updated. -env.Depends(module_obj, thirdparty_obj) diff --git a/modules/assimp/editor_scene_importer_assimp.cpp b/modules/assimp/editor_scene_importer_assimp.cpp deleted file mode 100644 index 796ee27a7d..0000000000 --- a/modules/assimp/editor_scene_importer_assimp.cpp +++ /dev/null @@ -1,1485 +0,0 @@ -/*************************************************************************/ -/* editor_scene_importer_assimp.cpp */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#include "editor_scene_importer_assimp.h" -#include "core/io/image_loader.h" -#include "editor/import/resource_importer_scene.h" -#include "import_utils.h" -#include "scene/3d/camera_3d.h" -#include "scene/3d/light_3d.h" -#include "scene/3d/mesh_instance_3d.h" -#include "scene/main/node.h" -#include "scene/resources/material.h" -#include "scene/resources/surface_tool.h" - -#include <assimp/matrix4x4.h> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/Importer.hpp> -#include <assimp/LogStream.hpp> - -// move into assimp -aiBone *get_bone_by_name(const aiScene *scene, aiString bone_name) { - for (unsigned int mesh_id = 0; mesh_id < scene->mNumMeshes; ++mesh_id) { - aiMesh *mesh = scene->mMeshes[mesh_id]; - - // iterate over all the bones on the mesh for this node only! - for (unsigned int boneIndex = 0; boneIndex < mesh->mNumBones; boneIndex++) { - aiBone *bone = mesh->mBones[boneIndex]; - if (bone->mName == bone_name) { - printf("matched bone by name: %s\n", bone->mName.C_Str()); - return bone; - } - } - } - - return nullptr; -} - -void EditorSceneImporterAssimp::get_extensions(List<String> *r_extensions) const { - const String import_setting_string = "filesystem/import/open_asset_import/"; - - Map<String, ImportFormat> import_format; - { - Vector<String> exts; - exts.push_back("fbx"); - ImportFormat import = { exts, true }; - import_format.insert("fbx", import); - } - for (Map<String, ImportFormat>::Element *E = import_format.front(); E; E = E->next()) { - _register_project_setting_import(E->key(), import_setting_string, E->get().extensions, r_extensions, - E->get().is_default); - } -} - -void EditorSceneImporterAssimp::_register_project_setting_import(const String generic, const String import_setting_string, - const Vector<String> &exts, List<String> *r_extensions, - const bool p_enabled) const { - const String use_generic = "use_" + generic; - _GLOBAL_DEF(import_setting_string + use_generic, p_enabled, true); - if (ProjectSettings::get_singleton()->get(import_setting_string + use_generic)) { - for (int32_t i = 0; i < exts.size(); i++) { - r_extensions->push_back(exts[i]); - } - } -} - -uint32_t EditorSceneImporterAssimp::get_import_flags() const { - return IMPORT_SCENE; -} - -void EditorSceneImporterAssimp::_bind_methods() { -} - -Node *EditorSceneImporterAssimp::import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, - List<String> *r_missing_deps, Error *r_err) { - Assimp::Importer importer; - importer.SetPropertyBool(AI_CONFIG_PP_FD_REMOVE, true); - // Cannot remove pivot points because the static mesh will be in the wrong place - importer.SetPropertyBool(AI_CONFIG_IMPORT_FBX_PRESERVE_PIVOTS, false); - int32_t max_bone_weights = 4; - //if (p_flags & IMPORT_ANIMATION_EIGHT_WEIGHTS) { - // const int eight_bones = 8; - // importer.SetPropertyBool(AI_CONFIG_PP_LBW_MAX_WEIGHTS, eight_bones); - // max_bone_weights = eight_bones; - //} - - importer.SetPropertyInteger(AI_CONFIG_PP_SBP_REMOVE, aiPrimitiveType_LINE | aiPrimitiveType_POINT); - - //importer.SetPropertyFloat(AI_CONFIG_PP_DB_THRESHOLD, 1.0f); - int32_t post_process_Steps = aiProcess_CalcTangentSpace | - aiProcess_GlobalScale | - // imports models and listens to their file scale for CM to M conversions - //aiProcess_FlipUVs | - aiProcess_FlipWindingOrder | - // very important for culling so that it is done in the correct order. - //aiProcess_DropNormals | - //aiProcess_GenSmoothNormals | - //aiProcess_JoinIdenticalVertices | - aiProcess_ImproveCacheLocality | - //aiProcess_RemoveRedundantMaterials | // Causes a crash - //aiProcess_SplitLargeMeshes | - aiProcess_Triangulate | - aiProcess_GenUVCoords | - //aiProcess_FindDegenerates | - //aiProcess_SortByPType | - // aiProcess_FindInvalidData | - aiProcess_TransformUVCoords | - aiProcess_FindInstances | - //aiProcess_FixInfacingNormals | - //aiProcess_ValidateDataStructure | - aiProcess_OptimizeMeshes | - aiProcess_PopulateArmatureData | - //aiProcess_OptimizeGraph | - //aiProcess_Debone | - // aiProcess_EmbedTextures | - //aiProcess_SplitByBoneCount | - 0; - String g_path = ProjectSettings::get_singleton()->globalize_path(p_path); - aiScene *scene = (aiScene *)importer.ReadFile(g_path.utf8().ptr(), post_process_Steps); - - ERR_FAIL_COND_V_MSG(scene == nullptr, nullptr, String("Open Asset Import failed to open: ") + String(importer.GetErrorString())); - - return _generate_scene(p_path, scene, p_flags, p_bake_fps, max_bone_weights); -} - -template <class T> -struct EditorSceneImporterAssetImportInterpolate { - T lerp(const T &a, const T &b, float c) const { - return a + (b - a) * c; - } - - T catmull_rom(const T &p0, const T &p1, const T &p2, const T &p3, float t) { - float t2 = t * t; - float t3 = t2 * t; - - return 0.5f * ((2.0f * p1) + (-p0 + p2) * t + (2.0f * p0 - 5.0f * p1 + 4.0f * p2 - p3) * t2 + (-p0 + 3.0f * p1 - 3.0f * p2 + p3) * t3); - } - - T bezier(T start, T control_1, T control_2, T end, float t) { - /* Formula from Wikipedia article on Bezier curves. */ - real_t omt = (1.0 - t); - real_t omt2 = omt * omt; - real_t omt3 = omt2 * omt; - real_t t2 = t * t; - real_t t3 = t2 * t; - - return start * omt3 + control_1 * omt2 * t * 3.0 + control_2 * omt * t2 * 3.0 + end * t3; - } -}; - -//thank you for existing, partial specialization -template <> -struct EditorSceneImporterAssetImportInterpolate<Quat> { - Quat lerp(const Quat &a, const Quat &b, float c) const { - ERR_FAIL_COND_V_MSG(!a.is_normalized(), Quat(), "The quaternion \"a\" must be normalized."); - ERR_FAIL_COND_V_MSG(!b.is_normalized(), Quat(), "The quaternion \"b\" must be normalized."); - - return a.slerp(b, c).normalized(); - } - - Quat catmull_rom(const Quat &p0, const Quat &p1, const Quat &p2, const Quat &p3, float c) { - ERR_FAIL_COND_V_MSG(!p1.is_normalized(), Quat(), "The quaternion \"p1\" must be normalized."); - ERR_FAIL_COND_V_MSG(!p2.is_normalized(), Quat(), "The quaternion \"p2\" must be normalized."); - - return p1.slerp(p2, c).normalized(); - } - - Quat bezier(Quat start, Quat control_1, Quat control_2, Quat end, float t) { - ERR_FAIL_COND_V_MSG(!start.is_normalized(), Quat(), "The start quaternion must be normalized."); - ERR_FAIL_COND_V_MSG(!end.is_normalized(), Quat(), "The end quaternion must be normalized."); - - return start.slerp(end, t).normalized(); - } -}; - -template <class T> -T EditorSceneImporterAssimp::_interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, float p_time, - AssetImportAnimation::Interpolation p_interp) { - //could use binary search, worth it? - int idx = -1; - for (int i = 0; i < p_times.size(); i++) { - if (p_times[i] > p_time) { - break; - } - idx++; - } - - EditorSceneImporterAssetImportInterpolate<T> interp; - - switch (p_interp) { - case AssetImportAnimation::INTERP_LINEAR: { - if (idx == -1) { - return p_values[0]; - } else if (idx >= p_times.size() - 1) { - return p_values[p_times.size() - 1]; - } - - float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - return interp.lerp(p_values[idx], p_values[idx + 1], c); - - } break; - case AssetImportAnimation::INTERP_STEP: { - if (idx == -1) { - return p_values[0]; - } else if (idx >= p_times.size() - 1) { - return p_values[p_times.size() - 1]; - } - - return p_values[idx]; - - } break; - case AssetImportAnimation::INTERP_CATMULLROMSPLINE: { - if (idx == -1) { - return p_values[1]; - } else if (idx >= p_times.size() - 1) { - return p_values[1 + p_times.size() - 1]; - } - - float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - return interp.catmull_rom(p_values[idx - 1], p_values[idx], p_values[idx + 1], p_values[idx + 3], c); - - } break; - case AssetImportAnimation::INTERP_CUBIC_SPLINE: { - if (idx == -1) { - return p_values[1]; - } else if (idx >= p_times.size() - 1) { - return p_values[(p_times.size() - 1) * 3 + 1]; - } - - float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); - - T from = p_values[idx * 3 + 1]; - T c1 = from + p_values[idx * 3 + 2]; - T to = p_values[idx * 3 + 4]; - T c2 = to + p_values[idx * 3 + 3]; - - return interp.bezier(from, c1, c2, to, c); - - } break; - } - - ERR_FAIL_V(p_values[0]); -} - -aiBone *EditorSceneImporterAssimp::get_bone_from_stack(ImportState &state, aiString name) { - List<aiBone *>::Element *iter; - aiBone *bone = nullptr; - for (iter = state.bone_stack.front(); iter; iter = iter->next()) { - bone = (aiBone *)iter->get(); - - if (bone && bone->mName == name) { - state.bone_stack.erase(bone); - return bone; - } - } - - return nullptr; -} - -Node3D * -EditorSceneImporterAssimp::_generate_scene(const String &p_path, aiScene *scene, const uint32_t p_flags, int p_bake_fps, - const int32_t p_max_bone_weights) { - ERR_FAIL_COND_V(scene == nullptr, nullptr); - - ImportState state; - state.path = p_path; - state.assimp_scene = scene; - state.max_bone_weights = p_max_bone_weights; - state.animation_player = nullptr; - state.import_flags = p_flags; - - // populate light map - for (unsigned int l = 0; l < scene->mNumLights; l++) { - aiLight *ai_light = scene->mLights[l]; - ERR_CONTINUE(ai_light == nullptr); - state.light_cache[AssimpUtils::get_assimp_string(ai_light->mName)] = l; - } - - // fill camera cache - for (unsigned int c = 0; c < scene->mNumCameras; c++) { - aiCamera *ai_camera = scene->mCameras[c]; - ERR_CONTINUE(ai_camera == nullptr); - state.camera_cache[AssimpUtils::get_assimp_string(ai_camera->mName)] = c; - } - - if (scene->mRootNode) { - state.nodes.push_back(scene->mRootNode); - - // make flat node tree - in order to make processing deterministic - for (unsigned int i = 0; i < scene->mRootNode->mNumChildren; i++) { - _generate_node(state, scene->mRootNode->mChildren[i]); - } - - RegenerateBoneStack(state); - - Node *last_valid_parent = nullptr; - - List<const aiNode *>::Element *iter; - for (iter = state.nodes.front(); iter; iter = iter->next()) { - const aiNode *element_assimp_node = iter->get(); - const aiNode *parent_assimp_node = element_assimp_node->mParent; - - String node_name = AssimpUtils::get_assimp_string(element_assimp_node->mName); - //print_verbose("node: " + node_name); - - Node3D *spatial = nullptr; - Transform transform = AssimpUtils::assimp_matrix_transform(element_assimp_node->mTransformation); - - // retrieve this node bone - aiBone *bone = get_bone_from_stack(state, element_assimp_node->mName); - - if (state.light_cache.has(node_name)) { - spatial = create_light(state, node_name, transform); - } else if (state.camera_cache.has(node_name)) { - spatial = create_camera(state, node_name, transform); - } else if (state.armature_nodes.find(element_assimp_node)) { - // create skeleton - print_verbose("Making skeleton: " + node_name); - Skeleton3D *skeleton = memnew(Skeleton3D); - spatial = skeleton; - if (!state.armature_skeletons.has(element_assimp_node)) { - state.armature_skeletons.insert(element_assimp_node, skeleton); - } - } else if (bone != nullptr) { - continue; - } else { - spatial = memnew(Node3D); - } - - ERR_CONTINUE_MSG(spatial == nullptr, "FBX Import - are we out of ram?"); - // we on purpose set the transform and name after creating the node. - - spatial->set_name(node_name); - spatial->set_global_transform(transform); - - // first element is root - if (iter == state.nodes.front()) { - state.root = spatial; - } - - // flat node map parent lookup tool - state.flat_node_map.insert(element_assimp_node, spatial); - - Map<const aiNode *, Node3D *>::Element *parent_lookup = state.flat_node_map.find(parent_assimp_node); - - // note: this always fails on the root node :) keep that in mind this is by design - if (parent_lookup) { - Node3D *parent_node = parent_lookup->value(); - - ERR_FAIL_COND_V_MSG(parent_node == nullptr, state.root, - "Parent node invalid even though lookup successful, out of ram?"); - - if (spatial != state.root) { - parent_node->add_child(spatial); - spatial->set_owner(state.root); - } else { - // required - think about it root never has a parent yet is valid, anything else without a parent is not valid. - } - } else if (spatial != state.root) { - // if the ainode is not in the tree - // parent it to the last good parent found - if (last_valid_parent) { - last_valid_parent->add_child(spatial); - spatial->set_owner(state.root); - } else { - // this is a serious error? - memdelete(spatial); - } - } - - // update last valid parent - last_valid_parent = spatial; - } - print_verbose("node counts: " + itos(state.nodes.size())); - - // make clean bone stack - RegenerateBoneStack(state); - - print_verbose("generating godot bone data"); - - print_verbose("Godot bone stack count: " + itos(state.bone_stack.size())); - - // This is a list of bones, duplicates are from other meshes and must be dealt with properly - for (List<aiBone *>::Element *element = state.bone_stack.front(); element; element = element->next()) { - aiBone *bone = element->get(); - - ERR_CONTINUE_MSG(!bone, "invalid bone read from assimp?"); - - // Utilities for armature lookup - for now only FBX makes these - aiNode *armature_for_bone = bone->mArmature; - - // Utilities for bone node lookup - for now only FBX makes these - aiNode *bone_node = bone->mNode; - aiNode *parent_node = bone_node->mParent; - - String bone_name = AssimpUtils::get_anim_string_from_assimp(bone->mName); - ERR_CONTINUE_MSG(armature_for_bone == nullptr, "Armature for bone invalid: " + bone_name); - Skeleton3D *skeleton = state.armature_skeletons[armature_for_bone]; - - state.skeleton_bone_map[bone] = skeleton; - - if (bone_name.empty()) { - bone_name = "untitled_bone_name"; - WARN_PRINT("Untitled bone name detected... report with file please"); - } - - // todo: this is where skin support goes - if (skeleton && skeleton->find_bone(bone_name) == -1) { - print_verbose("[Godot Glue] Imported bone" + bone_name); - int boneIdx = skeleton->get_bone_count(); - - Transform pform = AssimpUtils::assimp_matrix_transform(bone->mNode->mTransformation); - skeleton->add_bone(bone_name); - skeleton->set_bone_rest(boneIdx, pform); - - if (parent_node != nullptr) { - int parent_bone_id = skeleton->find_bone(AssimpUtils::get_anim_string_from_assimp(parent_node->mName)); - int current_bone_id = boneIdx; - skeleton->set_bone_parent(current_bone_id, parent_bone_id); - } - } - } - - print_verbose("generating mesh phase from skeletal mesh"); - - List<Node3D *> cleanup_template_nodes; - - for (Map<const aiNode *, Node3D *>::Element *key_value_pair = state.flat_node_map.front(); key_value_pair; key_value_pair = key_value_pair->next()) { - const aiNode *assimp_node = key_value_pair->key(); - Node3D *mesh_template = key_value_pair->value(); - - ERR_CONTINUE(assimp_node == nullptr); - ERR_CONTINUE(mesh_template == nullptr); - - Node *parent_node = mesh_template->get_parent(); - - if (mesh_template == state.root) { - continue; - } - - if (parent_node == nullptr) { - print_error("Found invalid parent node!"); - continue; // root node - } - - String node_name = AssimpUtils::get_assimp_string(assimp_node->mName); - Transform node_transform = AssimpUtils::assimp_matrix_transform(assimp_node->mTransformation); - - if (assimp_node->mNumMeshes > 0) { - MeshInstance3D *mesh = create_mesh(state, assimp_node, node_name, parent_node, node_transform); - if (mesh) { - parent_node->remove_child(mesh_template); - - // re-parent children - List<Node *> children; - // re-parent all children to new node - // note: since get_child_count will change during execution we must build a list first to be safe. - for (int childId = 0; childId < mesh_template->get_child_count(); childId++) { - // get child - Node *child = mesh_template->get_child(childId); - children.push_back(child); - } - - for (List<Node *>::Element *element = children.front(); element; element = element->next()) { - // reparent the children to the real mesh node. - mesh_template->remove_child(element->get()); - mesh->add_child(element->get()); - element->get()->set_owner(state.root); - } - - // update mesh in list so that each mesh node is available - // this makes the template unavailable which is the desired behaviour - state.flat_node_map[assimp_node] = mesh; - - cleanup_template_nodes.push_back(mesh_template); - - // clean up this list we don't need it - children.clear(); - } - } - } - - for (List<Node3D *>::Element *element = cleanup_template_nodes.front(); element; element = element->next()) { - if (element->get()) { - memdelete(element->get()); - } - } - } - - if (p_flags & IMPORT_ANIMATION && scene->mNumAnimations) { - state.animation_player = memnew(AnimationPlayer); - state.root->add_child(state.animation_player); - state.animation_player->set_owner(state.root); - - for (uint32_t i = 0; i < scene->mNumAnimations; i++) { - _import_animation(state, i, p_bake_fps); - } - } - - // - // Cleanup operations - // - - state.mesh_cache.clear(); - state.material_cache.clear(); - state.light_cache.clear(); - state.camera_cache.clear(); - state.assimp_node_map.clear(); - state.path_to_image_cache.clear(); - state.nodes.clear(); - state.flat_node_map.clear(); - state.armature_skeletons.clear(); - state.bone_stack.clear(); - return state.root; -} - -void EditorSceneImporterAssimp::_insert_animation_track(ImportState &scene, const aiAnimation *assimp_anim, int track_id, - int anim_fps, Ref<Animation> animation, float ticks_per_second, - Skeleton3D *skeleton, const NodePath &node_path, - const String &node_name, aiBone *track_bone) { - const aiNodeAnim *assimp_track = assimp_anim->mChannels[track_id]; - //make transform track - int track_idx = animation->get_track_count(); - animation->add_track(Animation::TYPE_TRANSFORM); - animation->track_set_path(track_idx, node_path); - //first determine animation length - - float increment = 1.0 / float(anim_fps); - float time = 0.0; - - bool last = false; - - Vector<Vector3> pos_values; - Vector<float> pos_times; - Vector<Vector3> scale_values; - Vector<float> scale_times; - Vector<Quat> rot_values; - Vector<float> rot_times; - - for (size_t p = 0; p < assimp_track->mNumPositionKeys; p++) { - aiVector3D pos = assimp_track->mPositionKeys[p].mValue; - pos_values.push_back(Vector3(pos.x, pos.y, pos.z)); - pos_times.push_back(assimp_track->mPositionKeys[p].mTime / ticks_per_second); - } - - for (size_t r = 0; r < assimp_track->mNumRotationKeys; r++) { - aiQuaternion quat = assimp_track->mRotationKeys[r].mValue; - rot_values.push_back(Quat(quat.x, quat.y, quat.z, quat.w).normalized()); - rot_times.push_back(assimp_track->mRotationKeys[r].mTime / ticks_per_second); - } - - for (size_t sc = 0; sc < assimp_track->mNumScalingKeys; sc++) { - aiVector3D scale = assimp_track->mScalingKeys[sc].mValue; - scale_values.push_back(Vector3(scale.x, scale.y, scale.z)); - scale_times.push_back(assimp_track->mScalingKeys[sc].mTime / ticks_per_second); - } - - while (true) { - Vector3 pos; - Quat rot; - Vector3 scale(1, 1, 1); - - if (pos_values.size()) { - pos = _interpolate_track<Vector3>(pos_times, pos_values, time, AssetImportAnimation::INTERP_LINEAR); - } - - if (rot_values.size()) { - rot = _interpolate_track<Quat>(rot_times, rot_values, time, - AssetImportAnimation::INTERP_LINEAR) - .normalized(); - } - - if (scale_values.size()) { - scale = _interpolate_track<Vector3>(scale_times, scale_values, time, AssetImportAnimation::INTERP_LINEAR); - } - - if (skeleton) { - int skeleton_bone = skeleton->find_bone(node_name); - - if (skeleton_bone >= 0 && track_bone) { - Transform xform; - xform.basis.set_quat_scale(rot, scale); - xform.origin = pos; - - xform = skeleton->get_bone_rest(skeleton_bone).inverse() * xform; - - rot = xform.basis.get_rotation_quat(); - rot.normalize(); - scale = xform.basis.get_scale(); - pos = xform.origin; - } else { - ERR_FAIL_MSG("Skeleton bone lookup failed for skeleton: " + skeleton->get_name()); - } - } - - animation->track_set_interpolation_type(track_idx, Animation::INTERPOLATION_LINEAR); - animation->transform_track_insert_key(track_idx, time, pos, rot, scale); - - if (last) { //done this way so a key is always inserted past the end (for proper interpolation) - break; - } - time += increment; - if (time >= animation->get_length()) { - last = true; - } - } -} - -// I really do not like this but need to figure out a better way of removing it later. -Node *EditorSceneImporterAssimp::get_node_by_name(ImportState &state, String name) { - for (Map<const aiNode *, Node3D *>::Element *key_value_pair = state.flat_node_map.front(); key_value_pair; key_value_pair = key_value_pair->next()) { - const aiNode *assimp_node = key_value_pair->key(); - Node3D *node = key_value_pair->value(); - - String node_name = AssimpUtils::get_assimp_string(assimp_node->mName); - if (name == node_name && node) { - return node; - } - } - return nullptr; -} - -/* Bone stack is a fifo handler for multiple armatures since armatures aren't a thing in assimp (yet) */ -void EditorSceneImporterAssimp::RegenerateBoneStack(ImportState &state) { - state.bone_stack.clear(); - // build bone stack list - for (unsigned int mesh_id = 0; mesh_id < state.assimp_scene->mNumMeshes; ++mesh_id) { - aiMesh *mesh = state.assimp_scene->mMeshes[mesh_id]; - - // iterate over all the bones on the mesh for this node only! - for (unsigned int boneIndex = 0; boneIndex < mesh->mNumBones; boneIndex++) { - aiBone *bone = mesh->mBones[boneIndex]; - - // doubtful this is required right now but best to check - if (!state.bone_stack.find(bone)) { - //print_verbose("[assimp] bone stack added: " + String(bone->mName.C_Str()) ); - state.bone_stack.push_back(bone); - } - } - } -} - -/* Bone stack is a fifo handler for multiple armatures since armatures aren't a thing in assimp (yet) */ -void EditorSceneImporterAssimp::RegenerateBoneStack(ImportState &state, aiMesh *mesh) { - state.bone_stack.clear(); - // iterate over all the bones on the mesh for this node only! - for (unsigned int boneIndex = 0; boneIndex < mesh->mNumBones; boneIndex++) { - aiBone *bone = mesh->mBones[boneIndex]; - if (state.bone_stack.find(bone) == nullptr) { - state.bone_stack.push_back(bone); - } - } -} - -// animation tracks are per bone - -void EditorSceneImporterAssimp::_import_animation(ImportState &state, int p_animation_index, int p_bake_fps) { - ERR_FAIL_INDEX(p_animation_index, (int)state.assimp_scene->mNumAnimations); - - const aiAnimation *anim = state.assimp_scene->mAnimations[p_animation_index]; - String name = AssimpUtils::get_anim_string_from_assimp(anim->mName); - if (name == String()) { - name = "Animation " + itos(p_animation_index + 1); - } - print_verbose("import animation: " + name); - float ticks_per_second = anim->mTicksPerSecond; - - if (state.assimp_scene->mMetaData != nullptr && Math::is_equal_approx(ticks_per_second, 0.0f)) { - int32_t time_mode = 0; - state.assimp_scene->mMetaData->Get("TimeMode", time_mode); - ticks_per_second = AssimpUtils::get_fbx_fps(time_mode, state.assimp_scene); - } - - //? - //if ((p_path.get_file().get_extension().to_lower() == "glb" || p_path.get_file().get_extension().to_lower() == "gltf") && Math::is_equal_approx(ticks_per_second, 0.0f)) { - // ticks_per_second = 1000.0f; - //} - - if (Math::is_equal_approx(ticks_per_second, 0.0f)) { - ticks_per_second = 25.0f; - } - - Ref<Animation> animation; - animation.instance(); - animation->set_name(name); - animation->set_length(anim->mDuration / ticks_per_second); - - if (name.begins_with("loop") || name.ends_with("loop") || name.begins_with("cycle") || name.ends_with("cycle")) { - animation->set_loop(true); - } - - // generate bone stack for animation import - RegenerateBoneStack(state); - - //regular tracks - for (size_t i = 0; i < anim->mNumChannels; i++) { - const aiNodeAnim *track = anim->mChannels[i]; - String node_name = AssimpUtils::get_assimp_string(track->mNodeName); - print_verbose("track name import: " + node_name); - if (track->mNumRotationKeys == 0 && track->mNumPositionKeys == 0 && track->mNumScalingKeys == 0) { - continue; //do not bother - } - - Skeleton3D *skeleton = nullptr; - NodePath node_path; - aiBone *bone = nullptr; - - // Import skeleton bone animation for this track - // Any bone will do, no point in processing more than just what is in the skeleton - { - bone = get_bone_from_stack(state, track->mNodeName); - - if (bone) { - // get skeleton by bone - skeleton = state.armature_skeletons[bone->mArmature]; - - if (skeleton) { - String path = state.root->get_path_to(skeleton); - path += ":" + node_name; - node_path = path; - - if (node_path != NodePath()) { - _insert_animation_track(state, anim, i, p_bake_fps, animation, ticks_per_second, skeleton, - node_path, node_name, bone); - } else { - print_error("Failed to find valid node path for animation"); - } - } - } - } - - // not a bone - // note this is flaky it uses node names which is unreliable - Node *allocated_node = get_node_by_name(state, node_name); - // todo: implement skeleton grabbing for node based animations too :) - // check if node exists, if it does then also apply animation track for node and bones above are all handled. - // this is now inclusive animation handling so that - // we import all the data and do not miss anything. - if (allocated_node) { - node_path = state.root->get_path_to(allocated_node); - - if (node_path != NodePath()) { - _insert_animation_track(state, anim, i, p_bake_fps, animation, ticks_per_second, skeleton, - node_path, node_name, nullptr); - } - } - } - - //blend shape tracks - - for (size_t i = 0; i < anim->mNumMorphMeshChannels; i++) { - const aiMeshMorphAnim *anim_mesh = anim->mMorphMeshChannels[i]; - - const String prop_name = AssimpUtils::get_assimp_string(anim_mesh->mName); - const String mesh_name = prop_name.split("*")[0]; - - ERR_CONTINUE(prop_name.split("*").size() != 2); - - Node *item = get_node_by_name(state, mesh_name); - ERR_CONTINUE_MSG(!item, "failed to look up node by name"); - const MeshInstance3D *mesh_instance = Object::cast_to<MeshInstance3D>(item); - ERR_CONTINUE(mesh_instance == nullptr); - - String base_path = state.root->get_path_to(mesh_instance); - - Ref<Mesh> mesh = mesh_instance->get_mesh(); - ERR_CONTINUE(mesh.is_null()); - - //add the tracks for this mesh - int base_track = animation->get_track_count(); - for (int j = 0; j < mesh->get_blend_shape_count(); j++) { - animation->add_track(Animation::TYPE_VALUE); - animation->track_set_path(base_track + j, base_path + ":blend_shapes/" + mesh->get_blend_shape_name(j)); - } - - for (size_t k = 0; k < anim_mesh->mNumKeys; k++) { - for (size_t j = 0; j < anim_mesh->mKeys[k].mNumValuesAndWeights; j++) { - float t = anim_mesh->mKeys[k].mTime / ticks_per_second; - float w = anim_mesh->mKeys[k].mWeights[j]; - - animation->track_insert_key(base_track + j, t, w); - } - } - } - - if (animation->get_track_count()) { - state.animation_player->add_animation(name, animation); - } -} - -// -// Mesh Generation from indices ? why do we need so much mesh code -// [debt needs looked into] -Ref<Mesh> -EditorSceneImporterAssimp::_generate_mesh_from_surface_indices(ImportState &state, const Vector<int> &p_surface_indices, - const aiNode *assimp_node, Ref<Skin> &skin, - Skeleton3D *&skeleton_assigned) { - Ref<ArrayMesh> mesh; - mesh.instance(); - bool has_uvs = false; - uint32_t mesh_flags = 0; - - Map<String, uint32_t> morph_mesh_string_lookup; - - for (int i = 0; i < p_surface_indices.size(); i++) { - const unsigned int mesh_idx = p_surface_indices[0]; - const aiMesh *ai_mesh = state.assimp_scene->mMeshes[mesh_idx]; - for (size_t j = 0; j < ai_mesh->mNumAnimMeshes; j++) { - String ai_anim_mesh_name = AssimpUtils::get_assimp_string(ai_mesh->mAnimMeshes[j]->mName); - if (!morph_mesh_string_lookup.has(ai_anim_mesh_name)) { - morph_mesh_string_lookup.insert(ai_anim_mesh_name, j); - mesh->set_blend_shape_mode(ArrayMesh::BLEND_SHAPE_MODE_NORMALIZED); - if (ai_anim_mesh_name.empty()) { - ai_anim_mesh_name = String("morph_") + itos(j); - } - mesh->add_blend_shape(ai_anim_mesh_name); - } - } - } - // - // Process Vertex Weights - // - for (int i = 0; i < p_surface_indices.size(); i++) { - const unsigned int mesh_idx = p_surface_indices[i]; - const aiMesh *ai_mesh = state.assimp_scene->mMeshes[mesh_idx]; - - Map<uint32_t, Vector<BoneInfo>> vertex_weights; - - if (ai_mesh->mNumBones > 0) { - for (size_t b = 0; b < ai_mesh->mNumBones; b++) { - aiBone *bone = ai_mesh->mBones[b]; - - if (!skeleton_assigned) { - print_verbose("Assigned mesh skeleton during mesh creation"); - skeleton_assigned = state.skeleton_bone_map[bone]; - - if (!skin.is_valid()) { - print_verbose("Configured new skin"); - skin.instance(); - } else { - print_verbose("Reusing existing skin!"); - } - } - // skeleton_assigned = - String bone_name = AssimpUtils::get_assimp_string(bone->mName); - int bone_index = skeleton_assigned->find_bone(bone_name); - ERR_CONTINUE(bone_index == -1); - for (size_t w = 0; w < bone->mNumWeights; w++) { - aiVertexWeight ai_weights = bone->mWeights[w]; - - BoneInfo bi; - uint32_t vertex_index = ai_weights.mVertexId; - bi.bone = bone_index; - bi.weight = ai_weights.mWeight; - - if (!vertex_weights.has(vertex_index)) { - vertex_weights[vertex_index] = Vector<BoneInfo>(); - } - - vertex_weights[vertex_index].push_back(bi); - } - } - } - - // - // Create mesh from data from assimp - // - - Ref<SurfaceTool> st; - st.instance(); - st->begin(Mesh::PRIMITIVE_TRIANGLES); - - for (size_t j = 0; j < ai_mesh->mNumVertices; j++) { - // Get the texture coordinates if they exist - if (ai_mesh->HasTextureCoords(0)) { - has_uvs = true; - st->set_uv(Vector2(ai_mesh->mTextureCoords[0][j].x, 1.0f - ai_mesh->mTextureCoords[0][j].y)); - } - - if (ai_mesh->HasTextureCoords(1)) { - has_uvs = true; - st->set_uv2(Vector2(ai_mesh->mTextureCoords[1][j].x, 1.0f - ai_mesh->mTextureCoords[1][j].y)); - } - - // Assign vertex colors - if (ai_mesh->HasVertexColors(0)) { - Color color = Color(ai_mesh->mColors[0]->r, ai_mesh->mColors[0]->g, ai_mesh->mColors[0]->b, - ai_mesh->mColors[0]->a); - st->set_color(color); - } - - // Work out normal calculations? - this needs work it doesn't work properly on huestos - if (ai_mesh->mNormals != nullptr) { - const aiVector3D normals = ai_mesh->mNormals[j]; - const Vector3 godot_normal = Vector3(normals.x, normals.y, normals.z); - st->set_normal(godot_normal); - if (ai_mesh->HasTangentsAndBitangents()) { - const aiVector3D tangents = ai_mesh->mTangents[j]; - const Vector3 godot_tangent = Vector3(tangents.x, tangents.y, tangents.z); - const aiVector3D bitangent = ai_mesh->mBitangents[j]; - const Vector3 godot_bitangent = Vector3(bitangent.x, bitangent.y, bitangent.z); - float d = godot_normal.cross(godot_tangent).dot(godot_bitangent) > 0.0f ? 1.0f : -1.0f; - st->set_tangent(Plane(tangents.x, tangents.y, tangents.z, d)); - } - } - - // We have vertex weights right? - if (vertex_weights.has(j)) { - Vector<BoneInfo> bone_info = vertex_weights[j]; - Vector<int> bones; - bones.resize(bone_info.size()); - Vector<float> weights; - weights.resize(bone_info.size()); - - // todo? do we really need to loop over all bones? - assimp may have helper to find all influences on this vertex. - for (int k = 0; k < bone_info.size(); k++) { - bones.write[k] = bone_info[k].bone; - weights.write[k] = bone_info[k].weight; - } - - st->set_bones(bones); - st->set_weights(weights); - } - - // Assign vertex - const aiVector3D pos = ai_mesh->mVertices[j]; - - // note we must include node offset transform as this is relative to world space not local space. - Vector3 godot_pos = Vector3(pos.x, pos.y, pos.z); - st->add_vertex(godot_pos); - } - - // fire replacement for face handling - for (size_t j = 0; j < ai_mesh->mNumFaces; j++) { - const aiFace face = ai_mesh->mFaces[j]; - for (unsigned int k = 0; k < face.mNumIndices; k++) { - st->add_index(face.mIndices[k]); - } - } - - if (ai_mesh->HasTangentsAndBitangents() == false && has_uvs) { - st->generate_tangents(); - } - - aiMaterial *ai_material = state.assimp_scene->mMaterials[ai_mesh->mMaterialIndex]; - Ref<StandardMaterial3D> mat; - mat.instance(); - - int32_t mat_two_sided = 0; - if (AI_SUCCESS == ai_material->Get(AI_MATKEY_TWOSIDED, mat_two_sided)) { - if (mat_two_sided > 0) { - mat->set_cull_mode(StandardMaterial3D::CULL_DISABLED); - } else { - mat->set_cull_mode(StandardMaterial3D::CULL_BACK); - } - } - - aiString mat_name; - if (AI_SUCCESS == ai_material->Get(AI_MATKEY_NAME, mat_name)) { - mat->set_name(AssimpUtils::get_assimp_string(mat_name)); - } - - // Culling handling for meshes - - // cull all back faces - mat->set_cull_mode(StandardMaterial3D::CULL_DISABLED); - - // Now process materials - aiTextureType base_color = aiTextureType_BASE_COLOR; - { - String filename, path; - AssimpImageData image_data; - - if (AssimpUtils::GetAssimpTexture(state, ai_material, base_color, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - - // anything transparent must be culled - if (image_data.raw_image->detect_alpha() != Image::ALPHA_NONE) { - mat->set_transparency(StandardMaterial3D::TRANSPARENCY_ALPHA_DEPTH_PRE_PASS); - mat->set_cull_mode(StandardMaterial3D::CULL_DISABLED); // since you can see both sides in transparent mode - } - - mat->set_texture(StandardMaterial3D::TEXTURE_ALBEDO, image_data.texture); - } - } - - aiTextureType tex_diffuse = aiTextureType_DIFFUSE; - { - String filename, path; - AssimpImageData image_data; - - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_diffuse, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - - // anything transparent must be culled - if (image_data.raw_image->detect_alpha() != Image::ALPHA_NONE) { - mat->set_transparency(StandardMaterial3D::TRANSPARENCY_ALPHA_DEPTH_PRE_PASS); - mat->set_cull_mode(StandardMaterial3D::CULL_DISABLED); // since you can see both sides in transparent mode - } - - mat->set_texture(StandardMaterial3D::TEXTURE_ALBEDO, image_data.texture); - } - - aiColor4D clr_diffuse; - if (AI_SUCCESS == ai_material->Get(AI_MATKEY_COLOR_DIFFUSE, clr_diffuse)) { - if (Math::is_equal_approx(clr_diffuse.a, 1.0f) == false) { - mat->set_transparency(StandardMaterial3D::TRANSPARENCY_ALPHA_DEPTH_PRE_PASS); - mat->set_cull_mode(StandardMaterial3D::CULL_DISABLED); // since you can see both sides in transparent mode - } - mat->set_albedo(Color(clr_diffuse.r, clr_diffuse.g, clr_diffuse.b, clr_diffuse.a)); - } - } - - aiTextureType tex_normal = aiTextureType_NORMALS; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_normal, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_feature(StandardMaterial3D::Feature::FEATURE_NORMAL_MAPPING, true); - mat->set_texture(StandardMaterial3D::TEXTURE_NORMAL, image_data.texture); - } else { - aiString texture_path; - if (AI_SUCCESS == ai_material->Get(AI_MATKEY_FBX_NORMAL_TEXTURE, AI_PROPERTIES, texture_path)) { - if (AssimpUtils::CreateAssimpTexture(state, texture_path, filename, path, image_data)) { - mat->set_feature(StandardMaterial3D::Feature::FEATURE_NORMAL_MAPPING, true); - mat->set_texture(StandardMaterial3D::TEXTURE_NORMAL, image_data.texture); - } - } - } - } - - aiTextureType tex_normal_camera = aiTextureType_NORMAL_CAMERA; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_normal_camera, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_feature(StandardMaterial3D::Feature::FEATURE_NORMAL_MAPPING, true); - mat->set_texture(StandardMaterial3D::TEXTURE_NORMAL, image_data.texture); - } - } - - aiTextureType tex_emission_color = aiTextureType_EMISSION_COLOR; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_emission_color, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_feature(StandardMaterial3D::Feature::FEATURE_NORMAL_MAPPING, true); - mat->set_texture(StandardMaterial3D::TEXTURE_NORMAL, image_data.texture); - } - } - - aiTextureType tex_metalness = aiTextureType_METALNESS; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_metalness, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_texture(StandardMaterial3D::TEXTURE_METALLIC, image_data.texture); - } - } - - aiTextureType tex_roughness = aiTextureType_DIFFUSE_ROUGHNESS; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_roughness, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_texture(StandardMaterial3D::TEXTURE_ROUGHNESS, image_data.texture); - } - } - - aiTextureType tex_emissive = aiTextureType_EMISSIVE; - { - String filename = ""; - String path = ""; - Ref<Image> texture; - AssimpImageData image_data; - - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_emissive, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_feature(StandardMaterial3D::FEATURE_EMISSION, true); - mat->set_texture(StandardMaterial3D::TEXTURE_EMISSION, image_data.texture); - } else { - // Process emission textures - aiString texture_emissive_path; - if (AI_SUCCESS == - ai_material->Get(AI_MATKEY_FBX_MAYA_EMISSION_TEXTURE, AI_PROPERTIES, texture_emissive_path)) { - if (AssimpUtils::CreateAssimpTexture(state, texture_emissive_path, filename, path, image_data)) { - mat->set_feature(StandardMaterial3D::FEATURE_EMISSION, true); - mat->set_texture(StandardMaterial3D::TEXTURE_EMISSION, image_data.texture); - } - } else { - float pbr_emission = 0.0f; - if (AI_SUCCESS == ai_material->Get(AI_MATKEY_FBX_MAYA_EMISSIVE_FACTOR, AI_NULL, pbr_emission)) { - mat->set_emission(Color(pbr_emission, pbr_emission, pbr_emission, 1.0f)); - } - } - } - } - - aiTextureType tex_specular = aiTextureType_SPECULAR; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_specular, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_texture(StandardMaterial3D::TEXTURE_METALLIC, image_data.texture); - } - } - - aiTextureType tex_ao_map = aiTextureType_AMBIENT_OCCLUSION; - { - String filename, path; - Ref<ImageTexture> texture; - AssimpImageData image_data; - - // Process texture normal map - if (AssimpUtils::GetAssimpTexture(state, ai_material, tex_ao_map, filename, path, image_data)) { - AssimpUtils::set_texture_mapping_mode(image_data.map_mode, image_data.texture); - mat->set_feature(StandardMaterial3D::FEATURE_AMBIENT_OCCLUSION, true); - mat->set_texture(StandardMaterial3D::TEXTURE_AMBIENT_OCCLUSION, image_data.texture); - } - } - - Array array_mesh = st->commit_to_arrays(); - Array morphs; - morphs.resize(ai_mesh->mNumAnimMeshes); - Mesh::PrimitiveType primitive = Mesh::PRIMITIVE_TRIANGLES; - - for (size_t j = 0; j < ai_mesh->mNumAnimMeshes; j++) { - String ai_anim_mesh_name = AssimpUtils::get_assimp_string(ai_mesh->mAnimMeshes[j]->mName); - - if (ai_anim_mesh_name.empty()) { - ai_anim_mesh_name = String("morph_") + itos(j); - } - - Array array_copy; - array_copy.resize(RenderingServer::ARRAY_MAX); - - for (int l = 0; l < RenderingServer::ARRAY_MAX; l++) { - array_copy[l] = array_mesh[l].duplicate(true); - } - - const size_t num_vertices = ai_mesh->mAnimMeshes[j]->mNumVertices; - array_copy[Mesh::ARRAY_INDEX] = Variant(); - if (ai_mesh->mAnimMeshes[j]->HasPositions()) { - PackedVector3Array vertices; - vertices.resize(num_vertices); - for (size_t l = 0; l < num_vertices; l++) { - const aiVector3D ai_pos = ai_mesh->mAnimMeshes[j]->mVertices[l]; - Vector3 position = Vector3(ai_pos.x, ai_pos.y, ai_pos.z); - vertices.ptrw()[l] = position; - } - PackedVector3Array new_vertices = array_copy[RenderingServer::ARRAY_VERTEX].duplicate(true); - ERR_CONTINUE(vertices.size() != new_vertices.size()); - for (int32_t l = 0; l < new_vertices.size(); l++) { - Vector3 *w = new_vertices.ptrw(); - w[l] = vertices[l]; - } - array_copy[RenderingServer::ARRAY_VERTEX] = new_vertices; - } - - int32_t color_set = 0; - if (ai_mesh->mAnimMeshes[j]->HasVertexColors(color_set)) { - PackedColorArray colors; - colors.resize(num_vertices); - for (size_t l = 0; l < num_vertices; l++) { - const aiColor4D ai_color = ai_mesh->mAnimMeshes[j]->mColors[color_set][l]; - Color color = Color(ai_color.r, ai_color.g, ai_color.b, ai_color.a); - colors.ptrw()[l] = color; - } - PackedColorArray new_colors = array_copy[RenderingServer::ARRAY_COLOR].duplicate(true); - ERR_CONTINUE(colors.size() != new_colors.size()); - for (int32_t l = 0; l < colors.size(); l++) { - Color *w = new_colors.ptrw(); - w[l] = colors[l]; - } - array_copy[RenderingServer::ARRAY_COLOR] = new_colors; - } - - if (ai_mesh->mAnimMeshes[j]->HasNormals()) { - PackedVector3Array normals; - normals.resize(num_vertices); - for (size_t l = 0; l < num_vertices; l++) { - const aiVector3D ai_normal = ai_mesh->mAnimMeshes[j]->mNormals[l]; - Vector3 normal = Vector3(ai_normal.x, ai_normal.y, ai_normal.z); - normals.ptrw()[l] = normal; - } - PackedVector3Array new_normals = array_copy[RenderingServer::ARRAY_NORMAL].duplicate(true); - ERR_CONTINUE(normals.size() != new_normals.size()); - for (int l = 0; l < normals.size(); l++) { - Vector3 *w = new_normals.ptrw(); - w[l] = normals[l]; - } - array_copy[RenderingServer::ARRAY_NORMAL] = new_normals; - } - - if (ai_mesh->mAnimMeshes[j]->HasTangentsAndBitangents()) { - PackedColorArray tangents; - tangents.resize(num_vertices); - Color *w = tangents.ptrw(); - for (size_t l = 0; l < num_vertices; l++) { - AssimpUtils::calc_tangent_from_mesh(ai_mesh, j, l, l, w); - } - PackedFloat32Array new_tangents = array_copy[RenderingServer::ARRAY_TANGENT].duplicate(true); - ERR_CONTINUE(new_tangents.size() != tangents.size() * 4); - for (int32_t l = 0; l < tangents.size(); l++) { - new_tangents.ptrw()[l + 0] = tangents[l].r; - new_tangents.ptrw()[l + 1] = tangents[l].g; - new_tangents.ptrw()[l + 2] = tangents[l].b; - new_tangents.ptrw()[l + 3] = tangents[l].a; - } - array_copy[RenderingServer::ARRAY_TANGENT] = new_tangents; - } - - morphs[j] = array_copy; - } - mesh->add_surface_from_arrays(primitive, array_mesh, morphs, Dictionary(), mesh_flags); - mesh->surface_set_material(i, mat); - mesh->surface_set_name(i, AssimpUtils::get_assimp_string(ai_mesh->mName)); - } - - return mesh; -} - -/** - * Create a new mesh for the node supplied - */ -MeshInstance3D * -EditorSceneImporterAssimp::create_mesh(ImportState &state, const aiNode *assimp_node, const String &node_name, Node *active_node, Transform node_transform) { - /* MESH NODE */ - Ref<Mesh> mesh; - Ref<Skin> skin; - // see if we have mesh cache for this. - Vector<int> surface_indices; - - RegenerateBoneStack(state); - - // Configure indices - for (uint32_t i = 0; i < assimp_node->mNumMeshes; i++) { - int mesh_index = assimp_node->mMeshes[i]; - // create list of mesh indexes - surface_indices.push_back(mesh_index); - } - - //surface_indices.sort(); - String mesh_key; - for (int i = 0; i < surface_indices.size(); i++) { - if (i > 0) { - mesh_key += ":"; - } - mesh_key += itos(surface_indices[i]); - } - - Skeleton3D *skeleton = nullptr; - aiNode *armature = nullptr; - - if (!state.mesh_cache.has(mesh_key)) { - mesh = _generate_mesh_from_surface_indices(state, surface_indices, assimp_node, skin, skeleton); - state.mesh_cache[mesh_key] = mesh; - } - - MeshInstance3D *mesh_node = memnew(MeshInstance3D); - mesh = state.mesh_cache[mesh_key]; - mesh_node->set_mesh(mesh); - - // if we have a valid skeleton set it up - if (skin.is_valid()) { - for (uint32_t i = 0; i < assimp_node->mNumMeshes; i++) { - unsigned int mesh_index = assimp_node->mMeshes[i]; - const aiMesh *ai_mesh = state.assimp_scene->mMeshes[mesh_index]; - - // please remember bone id relative to the skin is NOT the mesh relative index. - // it is the index relative to the skeleton that is why - // we have state.bone_id_map, it allows for duplicate bone id's too :) - // hope this makes sense - - int bind_count = 0; - for (unsigned int boneId = 0; boneId < ai_mesh->mNumBones; ++boneId) { - aiBone *iterBone = ai_mesh->mBones[boneId]; - - // used to reparent mesh to the correct armature later on if assigned. - if (!armature) { - print_verbose("Configured mesh armature, will reparent later to armature"); - armature = iterBone->mArmature; - } - - if (skeleton) { - int id = skeleton->find_bone(AssimpUtils::get_assimp_string(iterBone->mName)); - if (id != -1) { - print_verbose("Set bind bone: mesh: " + itos(mesh_index) + " bone index: " + itos(id)); - Transform t = AssimpUtils::assimp_matrix_transform(iterBone->mOffsetMatrix); - - skin->add_bind(bind_count, t); - skin->set_bind_bone(bind_count, id); - bind_count++; - } - } - } - } - - print_verbose("Finished configuring bind pose for skin mesh"); - } - - // this code parents all meshes with bones to the armature they are for - // GLTF2 specification relies on this and we are enforcing it for FBX. - if (armature && state.flat_node_map[armature]) { - Node *armature_parent = state.flat_node_map[armature]; - print_verbose("Parented mesh " + node_name + " to armature " + armature_parent->get_name()); - // static mesh handling - armature_parent->add_child(mesh_node); - // transform must be identity - mesh_node->set_global_transform(Transform()); - mesh_node->set_name(node_name); - mesh_node->set_owner(state.root); - } else { - // static mesh handling - active_node->add_child(mesh_node); - mesh_node->set_global_transform(node_transform); - mesh_node->set_name(node_name); - mesh_node->set_owner(state.root); - } - - if (skeleton) { - print_verbose("Attempted to set skeleton path!"); - mesh_node->set_skeleton_path(mesh_node->get_path_to(skeleton)); - mesh_node->set_skin(skin); - } - - return mesh_node; -} - -/** - * Create a light for the scene - * Automatically caches lights for lookup later - */ -Node3D *EditorSceneImporterAssimp::create_light( - ImportState &state, - const String &node_name, - Transform &look_at_transform) { - Light3D *light = nullptr; - aiLight *assimp_light = state.assimp_scene->mLights[state.light_cache[node_name]]; - ERR_FAIL_COND_V(!assimp_light, nullptr); - - if (assimp_light->mType == aiLightSource_DIRECTIONAL) { - light = memnew(DirectionalLight3D); - } else if (assimp_light->mType == aiLightSource_POINT) { - light = memnew(OmniLight3D); - } else if (assimp_light->mType == aiLightSource_SPOT) { - light = memnew(SpotLight3D); - } - ERR_FAIL_COND_V(light == nullptr, nullptr); - - if (assimp_light->mType != aiLightSource_POINT) { - Vector3 pos = Vector3( - assimp_light->mPosition.x, - assimp_light->mPosition.y, - assimp_light->mPosition.z); - Vector3 look_at = Vector3( - assimp_light->mDirection.y, - assimp_light->mDirection.x, - assimp_light->mDirection.z) - .normalized(); - Vector3 up = Vector3( - assimp_light->mUp.x, - assimp_light->mUp.y, - assimp_light->mUp.z); - - look_at_transform.set_look_at(pos, look_at, up); - } - // properties for light variables should be put here. - // not really hugely important yet but we will need them in the future - - light->set_color( - Color(assimp_light->mColorDiffuse.r, assimp_light->mColorDiffuse.g, assimp_light->mColorDiffuse.b)); - - return light; -} - -/** - * Create camera for the scene - */ -Node3D *EditorSceneImporterAssimp::create_camera( - ImportState &state, - const String &node_name, - Transform &look_at_transform) { - aiCamera *camera = state.assimp_scene->mCameras[state.camera_cache[node_name]]; - ERR_FAIL_COND_V(!camera, nullptr); - - Camera3D *camera_node = memnew(Camera3D); - ERR_FAIL_COND_V(!camera_node, nullptr); - float near = camera->mClipPlaneNear; - if (Math::is_equal_approx(near, 0.0f)) { - near = 0.1f; - } - camera_node->set_perspective(Math::rad2deg(camera->mHorizontalFOV) * 2.0f, near, camera->mClipPlaneFar); - Vector3 pos = Vector3(camera->mPosition.x, camera->mPosition.y, camera->mPosition.z); - Vector3 look_at = Vector3(camera->mLookAt.y, camera->mLookAt.x, camera->mLookAt.z).normalized(); - Vector3 up = Vector3(camera->mUp.x, camera->mUp.y, camera->mUp.z); - - look_at_transform.set_look_at(pos + look_at_transform.origin, look_at, up); - return camera_node; -} - -/** - * Generate node - * Recursive call to iterate over all nodes - */ -void EditorSceneImporterAssimp::_generate_node( - ImportState &state, - const aiNode *assimp_node) { - ERR_FAIL_COND(assimp_node == nullptr); - state.nodes.push_back(assimp_node); - String parent_name = AssimpUtils::get_assimp_string(assimp_node->mParent->mName); - - // please note - // duplicate bone names exist - // this is why we only check if the bone exists - // so everything else is useless but the name - // please do not copy any other values from get_bone_by_name. - aiBone *parent_bone = get_bone_by_name(state.assimp_scene, assimp_node->mParent->mName); - aiBone *current_bone = get_bone_by_name(state.assimp_scene, assimp_node->mName); - - // is this an armature - // parent null - // and this is the first bone :) - if (parent_bone == nullptr && current_bone) { - state.armature_nodes.push_back(assimp_node->mParent); - print_verbose("found valid armature: " + parent_name); - } - - for (size_t i = 0; i < assimp_node->mNumChildren; i++) { - _generate_node(state, assimp_node->mChildren[i]); - } -} diff --git a/modules/assimp/godot_update_assimp.sh b/modules/assimp/godot_update_assimp.sh deleted file mode 100755 index ff8ff59e97..0000000000 --- a/modules/assimp/godot_update_assimp.sh +++ /dev/null @@ -1,262 +0,0 @@ -rm -rf ../../thirdparty/assimp -cd ../../thirdparty/ -git clone https://github.com/assimp/assimp.git -cd assimp -rm -rf code/3DSExporter.h -rm -rf code/3DSLoader.h -rm -rf code/3MFXmlTags.h -rm -rf code/ABCImporter.h -rm -rf code/ACLoader.h -rm -rf code/AMFImporter_Macro.hpp -rm -rf code/ASELoader.h -rm -rf code/assbin_chunks.h -rm -rf code/AssbinExporter.h -rm -rf code/AssbinLoader.h -rm -rf code/AssimpCExport.cpp -rm -rf code/AssxmlExporter.h -rm -rf code/B3DImporter.h -# rm -rf code/BaseProcess.cpp -# rm -rf code/BaseProcess.h -# rm -rf code/Bitmap.cpp -rm -rf code/BlenderBMesh.cpp -rm -rf code/BlenderBMesh.h -rm -rf code/BlenderCustomData.cpp -rm -rf code/BlenderCustomData.h -rm -rf code/BlenderIntermediate.h -rm -rf code/BlenderLoader.h -rm -rf code/BlenderModifier.h -rm -rf code/BlenderSceneGen.h -rm -rf code/BlenderTessellator.h -rm -rf code/BVHLoader.h -rm -rf code/C4DImporter.h -# rm -rf code/CalcTangentsProcess.h -# rm -rf code/CInterfaceIOWrapper.cpp -# rm -rf code/CInterfaceIOWrapper.h -rm -rf code/COBLoader.h -rm -rf code/COBScene.h -rm -rf code/ColladaExporter.h -rm -rf code/ColladaLoader.h -# rm -rf code/ComputeUVMappingProcess.h -# rm -rf code/ConvertToLHProcess.h -# rm -rf code/CreateAnimMesh.cpp -rm -rf code/CSMLoader.h -rm -rf code/D3MFExporter.h -rm -rf code/D3MFImporter.h -rm -rf code/D3MFOpcPackage.h -# rm -rf code/DeboneProcess.h -# rm -rf code/DefaultIOStream.cpp -# rm -rf code/DefaultIOSystem.cpp -# rm -rf code/DefaultProgressHandler.h -# rm -rf code/DropFaceNormalsProcess.cpp -# rm -rf code/DropFaceNormalsProcess.h -rm -rf code/DXFHelper.h -rm -rf code/DXFLoader.h -# rm -rf code/EmbedTexturesProcess.cpp -# rm -rf code/EmbedTexturesProcess.h -# rm -rf code/FBXCommon.h -# rm -rf code/FBXCompileConfig.h -# rm -rf code/FBXDeformer.cpp -# rm -rf code/FBXDocumentUtil.cpp -# rm -rf code/FBXDocumentUtil.h -# rm -rf code/FBXExporter.h -# rm -rf code/FBXExportNode.h -# rm -rf code/FBXExportProperty.h -# rm -rf code/FBXImporter.cpp -# rm -rf code/FBXImporter.h -# rm -rf code/FBXImportSettings.h -# rm -rf code/FBXMeshGeometry.h -# rm -rf code/FBXModel.cpp -# rm -rf code/FBXNodeAttribute.cpp -# rm -rf code/FBXParser.h -# rm -rf code/FBXProperties.cpp -# rm -rf code/FBXProperties.h -# rm -rf code/FBXTokenizer.cpp -# rm -rf code/FBXTokenizer.h -# rm -rf code/FBXUtil.cpp -# rm -rf code/FBXUtil.h -# rm -rf code/FileLogStream.h -# rm -rf code/FindDegenerates.h -# rm -rf code/FindInstancesProcess.h -# rm -rf code/FindInvalidDataProcess.h -rm -rf code/FIReader.hpp -# rm -rf code/FixNormalsStep.cpp -# rm -rf code/FixNormalsStep.h -# rm -rf code/GenFaceNormalsProcess.cpp -# rm -rf code/GenFaceNormalsProcess.h -# rm -rf code/GenVertexNormalsProcess.cpp -# rm -rf code/GenVertexNormalsProcess.h -rm -rf code/glTF2Asset.h -rm -rf code/glTF2Asset.inl -rm -rf code/glTF2AssetWriter.inl -rm -rf code/glTF2Exporter.cpp -rm -rf code/glTF2Importer.cpp -rm -rf code/glTF2AssetWriter.h -rm -rf code/glTFAsset.h -rm -rf code/glTFAsset.inl -rm -rf code/glTFAssetWriter.inl -rm -rf code/glTFExporter.cpp -rm -rf code/glTFImporter.cpp -rm -rf code/glTF2Exporter.h -rm -rf code/glTF2Importer.h -rm -rf code/glTFAssetWriter.h -rm -rf code/glTFExporter.h -rm -rf code/glTFImporter.h -rm -rf code/HalfLifeFileData.h -rm -rf code/HMPFileData.h -rm -rf code/HMPLoader.h -rm -rf code/HMPLoader.cpp -rm -rf code/IFF.h -# rm -rf code/Importer.h -# rm -rf code/ImproveCacheLocality.h -rm -rf code/IRRLoader.h -rm -rf code/IRRMeshLoader.h -rm -rf code/IRRShared.h -# rm -rf code/JoinVerticesProcess.h -# rm -rf code/LimitBoneWeightsProcess.cpp -# rm -rf code/LimitBoneWeightsProcess.h -rm -rf code/LWSLoader.h -rm -rf code/makefile.mingw -# rm -rf code/MakeVerboseFormat.cpp -# rm -rf code/MakeVerboseFormat.h -# rm -rf code/MaterialSystem.h -rm -rf code/MD2FileData.h -rm -rf code/MD2Loader.h -rm -rf code/MD2NormalTable.h -rm -rf code/MD3FileData.h -rm -rf code/MD3Loader.h -rm -rf code/MD4FileData.h -rm -rf code/MD5Loader.h -rm -rf code/MD5Parser.cpp -rm -rf code/MDCFileData.h -rm -rf code/MDCLoader.h -rm -rf code/MDLDefaultColorMap.h -# rm -rf code/MMDCpp14.h -# rm -rf code/MMDImporter.h -rm -rf code/MS3DLoader.h -rm -rf code/NDOLoader.h -rm -rf code/NFFLoader.h -rm -rf code/ObjExporter.h -rm -rf code/ObjFileImporter.h -rm -rf code/ObjFileMtlImporter.h -rm -rf code/ObjFileParser.h -rm -rf code/ObjTools.h -rm -rf code/ObjExporter.cpp -rm -rf code/ObjFileImporter.cpp -rm -rf code/ObjFileMtlImporter.cpp -rm -rf code/ObjFileParser.cpp -rm -rf code/OFFLoader.h -rm -rf code/OFFLoader.cpp -rm -rf code/OgreImporter.cpp -rm -rf code/OgreImporter.h -rm -rf code/OgreParsingUtils.h -rm -rf code/OgreXmlSerializer.h -rm -rf code/OgreXmlSerializer.cpp -rm -rf code/OgreBinarySerializer.cpp -rm -rf code/OpenGEXExporter.cpp -rm -rf code/OpenGEXExporter.h -rm -rf code/OpenGEXImporter.h -rm -rf code/OpenGEXStructs.h -rm -rf code/OpenGEXImporter.cpp -# rm -rf code/OptimizeGraph.h -# rm -rf code/OptimizeMeshes.cpp -# rm -rf code/OptimizeMeshes.h -rm -rf code/PlyExporter.h -rm -rf code/PlyLoader.h -# rm -rf code/PolyTools.h -# rm -rf code/PostStepRegistry.cpp -# rm -rf code/PretransformVertices.h -rm -rf code/Q3BSPFileData.h -rm -rf code/Q3BSPFileImporter.h -rm -rf code/Q3BSPFileParser.cpp -rm -rf code/Q3BSPFileParser.h -rm -rf code/Q3BSPZipArchive.cpp -rm -rf code/Q3BSPZipArchive.h -rm -rf code/Q3DLoader.h -rm -rf code/Q3DLoader.cpp -rm -rf code/Q3BSPFileImporter.cpp -rm -rf code/RawLoader.h -# rm -rf code/RemoveComments.cpp -# rm -rf code/RemoveRedundantMaterials.cpp -# rm -rf code/RemoveRedundantMaterials.h -# rm -rf code/RemoveVCProcess.h -# rm -rf code/ScaleProcess.cpp -# rm -rf code/ScaleProcess.h -# rm -rf code/scene.cpp -# rm -rf code/ScenePreprocessor.cpp -# rm -rf code/ScenePreprocessor.h -# rm -rf code/ScenePrivate.h -# rm -rf code/SGSpatialSort.cpp -rm -rf code/SIBImporter.h -rm -rf code/SMDLoader.cpp -# rm -rf code/simd.cpp -# rm -rf code/simd.h -# rm -rf code/SortByPTypeProcess.h -# rm -rf code/SplitByBoneCountProcess.h -# rm -rf code/SplitLargeMeshes.h -# rm -rf code/StdOStreamLogStream.h -rm -rf code/StepExporter.h -rm -rf code/StepExporter.cpp -rm -rf code/STLExporter.cpp -rm -rf code/STLExporter.h -rm -rf code/STLLoader.h -rm -rf code/STLLoader.cpp -# rm -rf code/TargetAnimation.cpp -# rm -rf code/TargetAnimation.h -rm -rf code/TerragenLoader.h -rm -rf code/TerragenLoader.cpp -# rm -rf code/TextureTransform.h -# rm -rf code/TriangulateProcess.h -rm -rf code/UnrealLoader.h -# rm -rf code/ValidateDataStructure.h -# rm -rf code/Version.cpp -# rm -rf code/VertexTriangleAdjacency.cpp -# rm -rf code/VertexTriangleAdjacency.h -# rm -rf code/Win32DebugLogStream.h -rm -rf code/X3DImporter_Macro.hpp -rm -rf code/X3DImporter_Metadata.cpp -rm -rf code/X3DImporter_Networking.cpp -rm -rf code/X3DImporter_Texturing.cpp -rm -rf code/X3DImporter_Shape.cpp -rm -rf code/X3DImporter_Rendering.cpp -rm -rf code/X3DImporter_Postprocess.cpp -rm -rf code/X3DImporter_Light.cpp -rm -rf code/X3DImporter_Group.cpp -rm -rf code/X3DImporter_Geometry3D.cpp -rm -rf code/X3DImporter_Geometry2D.cpp -rm -rf code/X3DImporter.cpp -rm -rf code/X3DExporter.cpp -rm -rf code/X3DVocabulary.cpp -rm -rf code/XFileExporter.h -rm -rf code/XFileExporter.cpp -rm -rf code/XFileHelper.h -rm -rf code/XFileHelper.cpp -rm -rf code/XFileImporter.h -rm -rf code/XFileImporter.cpp -rm -rf code/XFileParser.h -rm -rf code/XFileParser.cpp -rm -rf code/XGLLoader.h -rm -rf code/XGLLoader.cpp -rm -rf code/Importer -rm -rf .git -rm -rf cmake-modules -rm -rf doc -rm -rf packaging -rm -rf port -rm -rf samples -rm -rf scripts -rm -rf test -rm -rf tools -rm -rf contrib/zlib -rm -rf contrib/android-cmake -rm -rf contrib/gtest -rm -rf contrib/clipper -rm -rf contrib/irrXML -rm -rf contrib/Open3DGC -rm -rf contrib/openddlparser -rm -rf contrib/poly2tri -#rm -rf contrib/rapidjson -rm -rf contrib/unzip -rm -rf contrib/zip -rm -rf contrib/stb_image -rm .travis* - diff --git a/modules/assimp/import_utils.h b/modules/assimp/import_utils.h deleted file mode 100644 index dc85d06fed..0000000000 --- a/modules/assimp/import_utils.h +++ /dev/null @@ -1,463 +0,0 @@ -/*************************************************************************/ -/* import_utils.h */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#ifndef IMPORT_UTILS_IMPORTER_ASSIMP_H -#define IMPORT_UTILS_IMPORTER_ASSIMP_H - -#include "core/io/image_loader.h" -#include "import_state.h" - -#include <assimp/SceneCombiner.h> -#include <assimp/cexport.h> -#include <assimp/cimport.h> -#include <assimp/matrix4x4.h> -#include <assimp/pbrmaterial.h> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/Importer.hpp> -#include <assimp/LogStream.hpp> -#include <assimp/Logger.hpp> -#include <string> - -using namespace AssimpImporter; - -#define AI_PROPERTIES aiTextureType_UNKNOWN, 0 -#define AI_NULL 0, 0 -#define AI_MATKEY_FBX_MAYA_BASE_COLOR_FACTOR "$raw.Maya|baseColor" -#define AI_MATKEY_FBX_MAYA_METALNESS_FACTOR "$raw.Maya|metalness" -#define AI_MATKEY_FBX_MAYA_DIFFUSE_ROUGHNESS_FACTOR "$raw.Maya|diffuseRoughness" - -#define AI_MATKEY_FBX_MAYA_EMISSION_TEXTURE "$raw.Maya|emissionColor|file" -#define AI_MATKEY_FBX_MAYA_EMISSIVE_FACTOR "$raw.Maya|emission" -#define AI_MATKEY_FBX_MAYA_METALNESS_TEXTURE "$raw.Maya|metalness|file" -#define AI_MATKEY_FBX_MAYA_METALNESS_UV_XFORM "$raw.Maya|metalness|uvtrafo" -#define AI_MATKEY_FBX_MAYA_DIFFUSE_ROUGHNESS_TEXTURE "$raw.Maya|diffuseRoughness|file" -#define AI_MATKEY_FBX_MAYA_DIFFUSE_ROUGHNESS_UV_XFORM "$raw.Maya|diffuseRoughness|uvtrafo" -#define AI_MATKEY_FBX_MAYA_BASE_COLOR_TEXTURE "$raw.Maya|baseColor|file" -#define AI_MATKEY_FBX_MAYA_BASE_COLOR_UV_XFORM "$raw.Maya|baseColor|uvtrafo" -#define AI_MATKEY_FBX_MAYA_NORMAL_TEXTURE "$raw.Maya|normalCamera|file" -#define AI_MATKEY_FBX_MAYA_NORMAL_UV_XFORM "$raw.Maya|normalCamera|uvtrafo" - -#define AI_MATKEY_FBX_NORMAL_TEXTURE "$raw.Maya|normalCamera|file" -#define AI_MATKEY_FBX_NORMAL_UV_XFORM "$raw.Maya|normalCamera|uvtrafo" - -#define AI_MATKEY_FBX_MAYA_STINGRAY_DISPLACEMENT_SCALING_FACTOR "$raw.Maya|displacementscaling" -#define AI_MATKEY_FBX_MAYA_STINGRAY_BASE_COLOR_FACTOR "$raw.Maya|base_color" -#define AI_MATKEY_FBX_MAYA_STINGRAY_EMISSIVE_FACTOR "$raw.Maya|emissive" -#define AI_MATKEY_FBX_MAYA_STINGRAY_METALLIC_FACTOR "$raw.Maya|metallic" -#define AI_MATKEY_FBX_MAYA_STINGRAY_ROUGHNESS_FACTOR "$raw.Maya|roughness" -#define AI_MATKEY_FBX_MAYA_STINGRAY_EMISSIVE_INTENSITY_FACTOR "$raw.Maya|emissive_intensity" - -#define AI_MATKEY_FBX_MAYA_STINGRAY_NORMAL_TEXTURE "$raw.Maya|TEX_normal_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_NORMAL_UV_XFORM "$raw.Maya|TEX_normal_map|uvtrafo" -#define AI_MATKEY_FBX_MAYA_STINGRAY_COLOR_TEXTURE "$raw.Maya|TEX_color_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_COLOR_UV_XFORM "$raw.Maya|TEX_color_map|uvtrafo" -#define AI_MATKEY_FBX_MAYA_STINGRAY_METALLIC_TEXTURE "$raw.Maya|TEX_metallic_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_METALLIC_UV_XFORM "$raw.Maya|TEX_metallic_map|uvtrafo" -#define AI_MATKEY_FBX_MAYA_STINGRAY_ROUGHNESS_TEXTURE "$raw.Maya|TEX_roughness_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_ROUGHNESS_UV_XFORM "$raw.Maya|TEX_roughness_map|uvtrafo" -#define AI_MATKEY_FBX_MAYA_STINGRAY_EMISSIVE_TEXTURE "$raw.Maya|TEX_emissive_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_EMISSIVE_UV_XFORM "$raw.Maya|TEX_emissive_map|uvtrafo" -#define AI_MATKEY_FBX_MAYA_STINGRAY_AO_TEXTURE "$raw.Maya|TEX_ao_map|file" -#define AI_MATKEY_FBX_MAYA_STINGRAY_AO_UV_XFORM "$raw.Maya|TEX_ao_map|uvtrafo" - -/** - * Assimp Utils - * Conversion tools / glue code to convert from assimp to godot -*/ -class AssimpUtils { -public: - /** - * calculate tangents for mesh data from assimp data - */ - static void calc_tangent_from_mesh(const aiMesh *ai_mesh, int i, int tri_index, int index, Color *w) { - const aiVector3D normals = ai_mesh->mAnimMeshes[i]->mNormals[tri_index]; - const Vector3 godot_normal = Vector3(normals.x, normals.y, normals.z); - const aiVector3D tangent = ai_mesh->mAnimMeshes[i]->mTangents[tri_index]; - const Vector3 godot_tangent = Vector3(tangent.x, tangent.y, tangent.z); - const aiVector3D bitangent = ai_mesh->mAnimMeshes[i]->mBitangents[tri_index]; - const Vector3 godot_bitangent = Vector3(bitangent.x, bitangent.y, bitangent.z); - float d = godot_normal.cross(godot_tangent).dot(godot_bitangent) > 0.0f ? 1.0f : -1.0f; - Color plane_tangent = Color(tangent.x, tangent.y, tangent.z, d); - w[index] = plane_tangent; - } - - struct AssetImportFbx { - enum ETimeMode { - TIME_MODE_DEFAULT = 0, - TIME_MODE_120 = 1, - TIME_MODE_100 = 2, - TIME_MODE_60 = 3, - TIME_MODE_50 = 4, - TIME_MODE_48 = 5, - TIME_MODE_30 = 6, - TIME_MODE_30_DROP = 7, - TIME_MODE_NTSC_DROP_FRAME = 8, - TIME_MODE_NTSC_FULL_FRAME = 9, - TIME_MODE_PAL = 10, - TIME_MODE_CINEMA = 11, - TIME_MODE_1000 = 12, - TIME_MODE_CINEMA_ND = 13, - TIME_MODE_CUSTOM = 14, - TIME_MODE_TIME_MODE_COUNT = 15 - }; - enum UpAxis { - UP_VECTOR_AXIS_X = 1, - UP_VECTOR_AXIS_Y = 2, - UP_VECTOR_AXIS_Z = 3 - }; - enum FrontAxis { - FRONT_PARITY_EVEN = 1, - FRONT_PARITY_ODD = 2, - }; - - enum CoordAxis { - COORD_RIGHT = 0, - COORD_LEFT = 1 - }; - }; - - /** Get assimp string - * automatically filters the string data - */ - static String get_assimp_string(const aiString &p_string) { - //convert an assimp String to a Godot String - String name; - name.parse_utf8(p_string.C_Str() /*,p_string.length*/); - if (name.find(":") != -1) { - String replaced_name = name.split(":")[1]; - print_verbose("Replacing " + name + " containing : with " + replaced_name); - name = replaced_name; - } - - return name; - } - - static String get_anim_string_from_assimp(const aiString &p_string) { - String name; - name.parse_utf8(p_string.C_Str() /*,p_string.length*/); - if (name.find(":") != -1) { - String replaced_name = name.split(":")[1]; - print_verbose("Replacing " + name + " containing : with " + replaced_name); - name = replaced_name; - } - return name; - } - - /** - * No filter logic get_raw_string_from_assimp - * This just convers the aiString to a parsed utf8 string - * Without removing special chars etc - */ - static String get_raw_string_from_assimp(const aiString &p_string) { - String name; - name.parse_utf8(p_string.C_Str() /*,p_string.length*/); - return name; - } - - static Ref<Animation> import_animation(const String &p_path, uint32_t p_flags, int p_bake_fps) { - return Ref<Animation>(); - } - - /** - * Converts aiMatrix4x4 to godot Transform - */ - static const Transform assimp_matrix_transform(const aiMatrix4x4 p_matrix) { - aiMatrix4x4 matrix = p_matrix; - Transform xform; - xform.set(matrix.a1, matrix.a2, matrix.a3, matrix.b1, matrix.b2, matrix.b3, matrix.c1, matrix.c2, matrix.c3, matrix.a4, matrix.b4, matrix.c4); - return xform; - } - - /** Get fbx fps for time mode meta data - */ - static float get_fbx_fps(int32_t time_mode, const aiScene *p_scene) { - switch (time_mode) { - case AssetImportFbx::TIME_MODE_DEFAULT: - return 24; //hack - case AssetImportFbx::TIME_MODE_120: - return 120; - case AssetImportFbx::TIME_MODE_100: - return 100; - case AssetImportFbx::TIME_MODE_60: - return 60; - case AssetImportFbx::TIME_MODE_50: - return 50; - case AssetImportFbx::TIME_MODE_48: - return 48; - case AssetImportFbx::TIME_MODE_30: - return 30; - case AssetImportFbx::TIME_MODE_30_DROP: - return 30; - case AssetImportFbx::TIME_MODE_NTSC_DROP_FRAME: - return 29.9700262f; - case AssetImportFbx::TIME_MODE_NTSC_FULL_FRAME: - return 29.9700262f; - case AssetImportFbx::TIME_MODE_PAL: - return 25; - case AssetImportFbx::TIME_MODE_CINEMA: - return 24; - case AssetImportFbx::TIME_MODE_1000: - return 1000; - case AssetImportFbx::TIME_MODE_CINEMA_ND: - return 23.976f; - case AssetImportFbx::TIME_MODE_CUSTOM: - int32_t frame_rate = -1; - p_scene->mMetaData->Get("FrameRate", frame_rate); - return frame_rate; - } - return 0; - } - - /** - * Get global transform for the current node - so we can use world space rather than - * local space coordinates - * useful if you need global - although recommend using local wherever possible over global - * as you could break fbx scaling :) - */ - static Transform _get_global_assimp_node_transform(const aiNode *p_current_node) { - aiNode const *current_node = p_current_node; - Transform xform; - while (current_node != nullptr) { - xform = assimp_matrix_transform(current_node->mTransformation) * xform; - current_node = current_node->mParent; - } - return xform; - } - - /** - * Find hardcoded textures from assimp which could be in many different directories - */ - static void find_texture_path(const String &p_path, _Directory &dir, String &path, bool &found, String extension) { - Vector<String> paths; - paths.push_back(path.get_basename() + extension); - paths.push_back(path + extension); - paths.push_back(path); - paths.push_back(p_path.get_base_dir().plus_file(path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file(path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file(path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("textures/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("textures/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("textures/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("Textures/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("Textures/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("Textures/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("../Textures/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../Textures/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../Textures/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("../textures/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../textures/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../textures/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("texture/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("texture/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("texture/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("Texture/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("Texture/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("Texture/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("../Texture/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../Texture/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../Texture/" + path.get_file())); - paths.push_back(p_path.get_base_dir().plus_file("../texture/" + path.get_file().get_basename() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../texture/" + path.get_file() + extension)); - paths.push_back(p_path.get_base_dir().plus_file("../texture/" + path.get_file())); - for (int i = 0; i < paths.size(); i++) { - if (dir.file_exists(paths[i])) { - found = true; - path = paths[i]; - return; - } - } - } - - /** find the texture path for the supplied fbx path inside godot - * very simple lookup for subfolders etc for a texture which may or may not be in a directory - */ - static void find_texture_path(const String &r_p_path, String &r_path, bool &r_found) { - _Directory dir; - - List<String> exts; - ImageLoader::get_recognized_extensions(&exts); - - Vector<String> split_path = r_path.get_basename().split("*"); - if (split_path.size() == 2) { - r_found = true; - return; - } - - if (dir.file_exists(r_p_path.get_base_dir() + r_path.get_file())) { - r_path = r_p_path.get_base_dir() + r_path.get_file(); - r_found = true; - return; - } - - for (int32_t i = 0; i < exts.size(); i++) { - if (r_found) { - return; - } - find_texture_path(r_p_path, dir, r_path, r_found, "." + exts[i]); - } - } - - /** - * set_texture_mapping_mode - * Helper to check the mapping mode of the texture (repeat, clamp and mirror) - */ - static void set_texture_mapping_mode(aiTextureMapMode *map_mode, Ref<ImageTexture> texture) { - ERR_FAIL_COND(texture.is_null()); - ERR_FAIL_COND(map_mode == nullptr); - // FIXME: Commented out during Vulkan port. - /* - aiTextureMapMode tex_mode = map_mode[0]; - - int32_t flags = Texture2D::FLAGS_DEFAULT; - if (tex_mode == aiTextureMapMode_Wrap) { - //Default - } else if (tex_mode == aiTextureMapMode_Clamp) { - flags = flags & ~Texture2D::FLAG_REPEAT; - } else if (tex_mode == aiTextureMapMode_Mirror) { - flags = flags | Texture2D::FLAG_MIRRORED_REPEAT; - } - texture->set_flags(flags); - */ - } - - /** - * Load or load from cache image :) - */ - static Ref<Image> load_image(ImportState &state, const aiScene *p_scene, String p_path) { - Map<String, Ref<Image>>::Element *match = state.path_to_image_cache.find(p_path); - - // if our cache contains this image then don't bother - if (match) { - return match->get(); - } - - Vector<String> split_path = p_path.get_basename().split("*"); - if (split_path.size() == 2) { - size_t texture_idx = split_path[1].to_int(); - ERR_FAIL_COND_V(texture_idx >= p_scene->mNumTextures, Ref<Image>()); - aiTexture *tex = p_scene->mTextures[texture_idx]; - String filename = AssimpUtils::get_raw_string_from_assimp(tex->mFilename); - filename = filename.get_file(); - print_verbose("Open Asset Import: Loading embedded texture " + filename); - if (tex->mHeight == 0) { - if (tex->CheckFormat("png")) { - ERR_FAIL_COND_V(Image::_png_mem_loader_func == nullptr, Ref<Image>()); - Ref<Image> img = Image::_png_mem_loader_func((uint8_t *)tex->pcData, tex->mWidth); - ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); - state.path_to_image_cache.insert(p_path, img); - return img; - } else if (tex->CheckFormat("jpg")) { - ERR_FAIL_COND_V(Image::_jpg_mem_loader_func == nullptr, Ref<Image>()); - Ref<Image> img = Image::_jpg_mem_loader_func((uint8_t *)tex->pcData, tex->mWidth); - ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); - state.path_to_image_cache.insert(p_path, img); - return img; - } else if (tex->CheckFormat("dds")) { - ERR_FAIL_COND_V_MSG(true, Ref<Image>(), "Open Asset Import: Embedded dds not implemented"); - } - } else { - Ref<Image> img; - img.instance(); - PackedByteArray arr; - uint32_t size = tex->mWidth * tex->mHeight; - arr.resize(size); - memcpy(arr.ptrw(), tex->pcData, size); - ERR_FAIL_COND_V(arr.size() % 4 != 0, Ref<Image>()); - //ARGB8888 to RGBA8888 - for (int32_t i = 0; i < arr.size() / 4; i++) { - arr.ptrw()[(4 * i) + 3] = arr[(4 * i) + 0]; - arr.ptrw()[(4 * i) + 0] = arr[(4 * i) + 1]; - arr.ptrw()[(4 * i) + 1] = arr[(4 * i) + 2]; - arr.ptrw()[(4 * i) + 2] = arr[(4 * i) + 3]; - } - img->create(tex->mWidth, tex->mHeight, true, Image::FORMAT_RGBA8, arr); - ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); - state.path_to_image_cache.insert(p_path, img); - return img; - } - return Ref<Image>(); - } else { - Ref<Texture2D> texture = ResourceLoader::load(p_path); - ERR_FAIL_COND_V(texture.is_null(), Ref<Image>()); - Ref<Image> image = texture->get_data(); - ERR_FAIL_COND_V(image.is_null(), Ref<Image>()); - state.path_to_image_cache.insert(p_path, image); - return image; - } - - return Ref<Image>(); - } - - /* create texture from assimp data, if found in path */ - static bool CreateAssimpTexture( - AssimpImporter::ImportState &state, - aiString texture_path, - String &filename, - String &path, - AssimpImageData &image_state) { - filename = get_raw_string_from_assimp(texture_path); - path = state.path.get_base_dir().plus_file(filename.replace("\\", "/")); - bool found = false; - find_texture_path(state.path, path, found); - if (found) { - image_state.raw_image = AssimpUtils::load_image(state, state.assimp_scene, path); - if (image_state.raw_image.is_valid()) { - image_state.texture.instance(); - image_state.texture->create_from_image(image_state.raw_image); - // FIXME: Commented out during Vulkan port. - //image_state.texture->set_storage(ImageTexture::STORAGE_COMPRESS_LOSSY); - return true; - } - } - - return false; - } - /** GetAssimpTexture - * Designed to retrieve textures for you - */ - static bool GetAssimpTexture( - AssimpImporter::ImportState &state, - aiMaterial *ai_material, - aiTextureType texture_type, - String &filename, - String &path, - AssimpImageData &image_state) { - aiString ai_filename = aiString(); - if (AI_SUCCESS == ai_material->GetTexture(texture_type, 0, &ai_filename, nullptr, nullptr, nullptr, nullptr, image_state.map_mode)) { - return CreateAssimpTexture(state, ai_filename, filename, path, image_state); - } - - return false; - } -}; - -#endif // IMPORT_UTILS_IMPORTER_ASSIMP_H diff --git a/modules/fbx/README.md b/modules/fbx/README.md new file mode 100644 index 0000000000..2a2f186463 --- /dev/null +++ b/modules/fbx/README.md @@ -0,0 +1,196 @@ +# Open Source FBX Specification for the Importer + +The goal of this document is to make everything in FBX clearly stated, any errors will be corrected over time this +is a first draft. + +## fbx parser - originally from assimp + +- Folder: /modules/fbx/fbx_parser +- Upstream: assimp +- Original Version: git (308db73d0b3c2d1870cd3e465eaa283692a4cf23, 2019) +- License: BSD-3-Clause + +This can never be updated from upstream, we have heavily modified the parser to provide memory safety and add some +functionality. If anything we should give this parser back to assimp at some point as it has a lot of new features. + +# Updating assimp fbx parser + +Don't. it's not possible the code is rewritten in many areas to remove thirdparty deps and various bugs are fixed. + +Many days were put into rewriting the parser to use safe code and safe memory accessors. + +# File Headers + +FBX Binaries start with the header "Kaydara FBX Binary" + +FBX ASCII documents contain a larger header, sometimes with copyright information for a file. + +Detecting these is pretty simple. + +# What is an OP link? +It's an object to property link. It lists the properties for that object in some cases. Source and destination based by +ID. + +# What is a OO link? +Its an object to object link, it contains the ID source and destination ID. + +# FBX Node connections + +Nodes in FBX are connected using OO links, This means Object to Object. + +FBX has a single other kind of link which is Object Property, this is used for Object to Property Links, this can be + extra attributes, defaults, or even some simple settings. + +# Bones / Joints / Locators + +Bones in FBX are nodes, they initially have the Model:: Type, then have links to SubDeformer the sub deformer +is part of the skin there is also an explicit Skin link, which then links to the geometry using OO links in the +document. + +# Rotation Order in FBX compared to Godot + +**Godot uses the rotation order:** YXZ + +**FBX has dynamic rotation order to prevent gimbal lock with complex animations** + +```cpp +enum RotOrder { + RotOrder_EulerXYZ = 0 + RotOrder_EulerXZY, + RotOrder_EulerYZX, + RotOrder_EulerYXZ, + RotOrder_EulerZXY, + RotOrder_EulerZYX, + RotOrder_SphericXYZ // nobody uses this - as far as we can tell +}; +``` + + +# Pivot transforms + +### Pivot description: +- Maya and 3DS max consider everything to be in node space (bones joints, skins, lights, cameras, etc) +- Everything is a node, this means essentially nodes are auto or variants +- They are local to the node in the tree. +- They are used to calculate where a node is in space +```c++ +// For a better reference you can check editor_scene_importer_fbx.h +// references: GenFBXTransform / read the data in +// references: ComputePivotTransform / run the calculation +// This is the local pivot transform for the node, not the global transforms +Transform ComputePivotTransform( + Transform chain[TransformationComp_MAXIMUM], + Transform &geometric_transform) { + // Maya pivots + Transform T = chain[TransformationComp_Translation]; + Transform Roff = chain[TransformationComp_RotationOffset]; + Transform Rp = chain[TransformationComp_RotationPivot]; + Transform Rpre = chain[TransformationComp_PreRotation]; + Transform R = chain[TransformationComp_Rotation]; + Transform Rpost = chain[TransformationComp_PostRotation]; + Transform Soff = chain[TransformationComp_ScalingOffset]; + Transform Sp = chain[TransformationComp_ScalingPivot]; + Transform S = chain[TransformationComp_Scaling]; + + // 3DS Max Pivots + Transform OT = chain[TransformationComp_GeometricTranslation]; + Transform OR = chain[TransformationComp_GeometricRotation]; + Transform OS = chain[TransformationComp_GeometricScaling]; + + // Calculate 3DS max pivot transform - use geometric space (e.g doesn't effect children nodes only the current node) + geometric_transform = OT * OR * OS; + // Calculate standard maya pivots + return T * Roff * Rp * Rpre * R * Rpost.inverse() * Rp.inverse() * Soff * Sp * S * Sp.inverse(); +} +``` + +# Transform inheritance for FBX Nodes + +The goal of below is to explain why they implement this in the first place. + +The use case is to make nodes have an option to override their local scaling or to make scaling influenced by orientation, which i would imagine would be useful for when you need to rotate a node and the child to scale based on the orientation rather than setting on the rotation matrix planes. +```cpp +// not modified the formatting here since this code must remain clear +enum TransformInheritance { + Transform_RrSs = 0, + // Parent Rotation * Local Rotation * Parent Scale * Local Scale -- Parent Rotation Offset * Parent ScalingOffset (Local scaling is offset by rotation of parent node) + Transform_RSrs = 1, // Parent Rotation * Parent Scale * Local Rotation * Local Scale -- Parent * Local (normal mode) + Transform_Rrs = 2, // Parent Rotation * Local Rotation * Local Scale -- Node transform scale is the only relevant component + TransformInheritance_MAX // end-of-enum sentinel +}; + +enum TransformInheritance { + Transform_RrSs = 0, + // Local scaling is offset by rotation of parent node + Transform_RSrs = 1, + // Parent * Local (normal mode) + Transform_Rrs = 2, + // Node transform scale is the only relevant component + TransformInheritance_MAX // end-of-enum sentinel +}; +``` + +# Axis in FBX + +Godot has one format for the declared axis + +AXIS X, AXIS Y, -AXIS Z + +FBX supports any format you can think of. As it has to support Maya and 3DS Max. + +#### FBX File Header +```json +GlobalSettings: { + Version: 1000 + Properties70: { + P: "UpAxis", "int", "Integer", "",1 + P: "UpAxisSign", "int", "Integer", "",1 + P: "FrontAxis", "int", "Integer", "",2 + P: "FrontAxisSign", "int", "Integer", "",1 + P: "CoordAxis", "int", "Integer", "",0 + P: "CoordAxisSign", "int", "Integer", "",1 + P: "OriginalUpAxis", "int", "Integer", "",1 + P: "OriginalUpAxisSign", "int", "Integer", "",1 + P: "UnitScaleFactor", "double", "Number", "",1 + P: "OriginalUnitScaleFactor", "double", "Number", "",1 + P: "AmbientColor", "ColorRGB", "Color", "",0,0,0 + P: "DefaultCamera", "KString", "", "", "Producer Perspective" + P: "TimeMode", "enum", "", "",6 + P: "TimeProtocol", "enum", "", "",2 + P: "SnapOnFrameMode", "enum", "", "",0 + P: "TimeSpanStart", "KTime", "Time", "",0 + P: "TimeSpanStop", "KTime", "Time", "",92372316000 + P: "CustomFrameRate", "double", "Number", "",-1 + P: "TimeMarker", "Compound", "", "" + P: "CurrentTimeMarker", "int", "Integer", "",-1 + } +} +``` + +#### FBX FILE declares axis dynamically using FBX header +Coord is X +Up is Y +Front is Z + +#### GODOT - constant reference point +Coord is X positive, +Y is up positive, +Front is -Z negative + +### Explaining MeshGeometry indexing + +Reference type declared: +- Direct (directly related to the mapping information type) +- IndexToDirect (Map with key value, meaning depends on the MappingInformationType) + +ControlPoint is a vertex +* None The mapping is undetermined. +* ByVertex There will be one mapping coordinate for each surface control point/vertex. + * If you have direct reference type vertices [x] + * If you have IndexToDirect reference type the UV +* ByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part. (Sorted by polygon, referencing vertex) +* ByPolygon There can be only one mapping coordinate for the whole polygon. + * One mapping per polygon polygon x has this normal x + * For each vertex of the polygon then set the normal to x +* ByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements. (Mapping is referencing the edge id) +* AllSame There can be only one mapping coordinate for the whole surface. diff --git a/modules/fbx/SCsub b/modules/fbx/SCsub new file mode 100644 index 0000000000..84220a66fa --- /dev/null +++ b/modules/fbx/SCsub @@ -0,0 +1,15 @@ +#!/usr/bin/env python + +Import("env") +Import("env_modules") + +env_fbx = env_modules.Clone() + +# Make includes relative to the folder path specified here so our includes are clean +env_fbx.Prepend(CPPPATH=["#modules/fbx/"]) + +# Godot's own source files +env_fbx.add_source_files(env.modules_sources, "tools/*.cpp") +env_fbx.add_source_files(env.modules_sources, "data/*.cpp") +env_fbx.add_source_files(env.modules_sources, "fbx_parser/*.cpp") +env_fbx.add_source_files(env.modules_sources, "*.cpp") diff --git a/modules/fbx/config.py b/modules/fbx/config.py new file mode 100644 index 0000000000..78929800b3 --- /dev/null +++ b/modules/fbx/config.py @@ -0,0 +1,16 @@ +def can_build(env, platform): + return env["tools"] + + +def configure(env): + pass + + +def get_doc_classes(): + return [ + "EditorSceneImporterFBX", + ] + + +def get_doc_path(): + return "doc_classes" diff --git a/modules/fbx/data/fbx_anim_container.h b/modules/fbx/data/fbx_anim_container.h new file mode 100644 index 0000000000..e45dd84d6a --- /dev/null +++ b/modules/fbx/data/fbx_anim_container.h @@ -0,0 +1,46 @@ +/*************************************************************************/ +/* fbx_anim_container.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_ANIM_CONTAINER_H +#define FBX_ANIM_CONTAINER_H + +#include "core/math/vector3.h" + +// Generic keyframes 99.99 percent of files will be vector3, except if quat interp is used, or visibility tracks +// FBXTrack is used in a map in the implementation in fbx/editor_scene_importer_fbx.cpp +// to avoid having to rewrite the entire logic I refactored this into the code instead. +// once it works I can rewrite so we can add the fun misc features / small features +struct FBXTrack { + bool has_default = false; + Vector3 default_value; + std::map<int64_t, Vector3> keyframes; +}; + +#endif //MODEL_ABSTRACTION_ANIM_CONTAINER_H diff --git a/modules/fbx/data/fbx_bone.cpp b/modules/fbx/data/fbx_bone.cpp new file mode 100644 index 0000000000..7ccb0b3717 --- /dev/null +++ b/modules/fbx/data/fbx_bone.cpp @@ -0,0 +1,56 @@ +/*************************************************************************/ +/* fbx_bone.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "fbx_bone.h" + +#include "fbx_node.h" +#include "import_state.h" + +Ref<FBXNode> FBXSkinDeformer::get_link(const ImportState &state) const { + print_verbose("bone name: " + bone->bone_name); + + // safe for when deformers must be polyfilled when skin has different count of binds to bones in the scene ;) + if (!cluster) { + return nullptr; + } + + ERR_FAIL_COND_V_MSG(cluster->TargetNode() == nullptr, nullptr, "bone has invalid target node"); + + Ref<FBXNode> link_node; + uint64_t id = cluster->TargetNode()->ID(); + if (state.fbx_target_map.has(id)) { + link_node = state.fbx_target_map[id]; + } else { + print_error("link node not found for " + itos(id)); + } + + // the node in space this is for, like if it's FOR a target. + return link_node; +} diff --git a/modules/fbx/data/fbx_bone.h b/modules/fbx/data/fbx_bone.h new file mode 100644 index 0000000000..5d797da24f --- /dev/null +++ b/modules/fbx/data/fbx_bone.h @@ -0,0 +1,90 @@ +/*************************************************************************/ +/* fbx_bone.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_BONE_H +#define FBX_BONE_H + +#include "fbx_node.h" +#include "import_state.h" + +#include "fbx_parser/FBXDocument.h" + +struct PivotTransform; + +struct FBXBone : public Reference { + uint64_t parent_bone_id = 0; + uint64_t bone_id = 0; + + bool valid_parent = false; // if the parent bone id is set up. + String bone_name = String(); // bone name + + bool is_root_bone() const { + return !valid_parent; + } + + // Godot specific data + int godot_bone_id = -2; // godot internal bone id assigned after import + + // if a bone / armature is the root then FBX skeleton will contain the bone not any other skeleton. + // this is to support joints by themselves in scenes + bool valid_armature_id = false; + uint64_t armature_id = 0; + + /* link node is the parent bone */ + mutable const FBXDocParser::Geometry *geometry = nullptr; + mutable const FBXDocParser::ModelLimbNode *limb_node = nullptr; + + void set_node(Ref<FBXNode> p_node) { + node = p_node; + } + + // Stores the pivot xform for this bone + + Ref<FBXNode> node = nullptr; + Ref<FBXBone> parent_bone = nullptr; + Ref<FBXSkeleton> fbx_skeleton = nullptr; +}; + +struct FBXSkinDeformer { + FBXSkinDeformer(Ref<FBXBone> p_bone, const FBXDocParser::Cluster *p_cluster) : + cluster(p_cluster), bone(p_bone) {} + ~FBXSkinDeformer() {} + const FBXDocParser::Cluster *cluster; + Ref<FBXBone> bone; + + /* get associate model - the model can be invalid sometimes */ + Ref<FBXBone> get_associate_model() const { + return bone->parent_bone; + } + + Ref<FBXNode> get_link(const ImportState &state) const; +}; + +#endif // FBX_BONE_H diff --git a/modules/fbx/data/fbx_material.cpp b/modules/fbx/data/fbx_material.cpp new file mode 100644 index 0000000000..b00ac2083e --- /dev/null +++ b/modules/fbx/data/fbx_material.cpp @@ -0,0 +1,464 @@ +/*************************************************************************/ +/* fbx_material.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "fbx_material.h" +#include "scene/resources/material.h" +#include "scene/resources/texture.h" +#include "tools/validation_tools.h" + +String FBXMaterial::get_material_name() const { + return material_name; +} + +void FBXMaterial::set_imported_material(FBXDocParser::Material *p_material) { + material = p_material; +} + +void FBXMaterial::add_search_string(String p_filename, String p_current_directory, String search_directory, Vector<String> &texture_search_paths) { + if (search_directory.empty()) { + texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file(p_filename)); + } else { + texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file(search_directory + "/" + p_filename)); + texture_search_paths.push_back(p_current_directory.get_base_dir().plus_file("../" + search_directory + "/" + p_filename)); + } +} + +String find_file(const String &p_base, const String &p_file_to_find) { + _Directory dir; + dir.open(p_base); + + dir.list_dir_begin(); + String n = dir.get_next(); + while (n != String()) { + if (n == "." || n == "..") { + n = dir.get_next(); + continue; + } + if (dir.current_is_dir()) { + // Don't use `path_to` or the returned path will be wrong. + const String f = find_file(p_base + "/" + n, p_file_to_find); + if (f != "") { + return f; + } + } else if (n == p_file_to_find) { + return p_base + "/" + n; + } + n = dir.get_next(); + } + dir.list_dir_end(); + + return String(); +} + +// fbx will not give us good path information and let's not regex them to fix them +// no relative paths are in fbx generally they have a rel field but it's populated incorrectly by the SDK. +String FBXMaterial::find_texture_path_by_filename(const String p_filename, const String p_current_directory) { + _Directory dir; + Vector<String> paths; + add_search_string(p_filename, p_current_directory, "", paths); + add_search_string(p_filename, p_current_directory, "texture", paths); + add_search_string(p_filename, p_current_directory, "textures", paths); + add_search_string(p_filename, p_current_directory, "Textures", paths); + add_search_string(p_filename, p_current_directory, "materials", paths); + add_search_string(p_filename, p_current_directory, "mats", paths); + add_search_string(p_filename, p_current_directory, "pictures", paths); + add_search_string(p_filename, p_current_directory, "images", paths); + + for (int i = 0; i < paths.size(); i++) { + if (dir.file_exists(paths[i])) { + return paths[i]; + } + } + + // We were not able to find the texture in the common locations, + // try to find it into the project globally. + // The common textures can be stored into one of those folders: + // res://asset + // res://texture + // res://material + // res://mat + // res://image + // res://picture + // + // Note the folders can also be called with custom names, like: + // res://my_assets + // since the keyword `asset` is into the directory name the textures will be + // searched there too. + + dir.open("res://"); + dir.list_dir_begin(); + String n = dir.get_next(); + while (n != String()) { + if (n == "." || n == "..") { + n = dir.get_next(); + continue; + } + if (dir.current_is_dir()) { + const String lower_n = n.to_lower(); + if ( + // Don't need to use plural. + lower_n.find("asset") >= 0 || + lower_n.find("texture") >= 0 || + lower_n.find("material") >= 0 || + lower_n.find("mat") >= 0 || + lower_n.find("image") >= 0 || + lower_n.find("picture") >= 0) { + // Don't use `path_to` or the returned path will be wrong. + const String f = find_file(String("res://") + n, p_filename); + if (f != "") { + return f; + } + } + } + n = dir.get_next(); + } + dir.list_dir_end(); + + return ""; +} + +template <class T> +T extract_from_prop(FBXDocParser::PropertyPtr prop, const T &p_default, const std::string &p_name, const String &p_type) { + ERR_FAIL_COND_V_MSG(prop == nullptr, p_default, "invalid property passed to extractor"); + const FBXDocParser::TypedProperty<T> *val = dynamic_cast<const FBXDocParser::TypedProperty<T> *>(prop); + + ERR_FAIL_COND_V_MSG(val == nullptr, p_default, "The FBX is corrupted, the property `" + String(p_name.c_str()) + "` is a `" + String(typeid(*prop).name()) + "` but should be a " + p_type); + // Make sure to not lost any eventual opacity. + return val->Value(); +} + +Ref<StandardMaterial3D> FBXMaterial::import_material(ImportState &state) { + ERR_FAIL_COND_V(material == nullptr, nullptr); + + const String p_fbx_current_directory = state.path; + + Ref<StandardMaterial3D> spatial_material; + spatial_material.instance(); + + // read the material file + // is material two sided + // read material name + print_verbose("[material] material name: " + ImportUtils::FBXNodeToName(material->Name())); + + material_name = ImportUtils::FBXNodeToName(material->Name()); + + for (const std::pair<std::string, const FBXDocParser::Texture *> iter : material->Textures()) { + const uint64_t texture_id = iter.second->ID(); + const std::string &fbx_mapping_name = iter.first; + const FBXDocParser::Texture *fbx_texture_data = iter.second; + const String absolute_texture_path = iter.second->FileName().c_str(); + const String texture_name = absolute_texture_path.get_file(); + const String file_extension = absolute_texture_path.get_extension().to_upper(); + + const String debug_string = "texture id: " + itos(texture_id) + " texture name: " + String(iter.second->Name().c_str()) + " mapping name: " + String(fbx_mapping_name.c_str()); + // remember errors STILL need this string at the end for when you aren't in verbose debug mode :) they need context for when you're not verbose-ing. + print_verbose(debug_string); + + const String file_extension_uppercase = file_extension.to_upper(); + + if (fbx_transparency_flags.count(fbx_mapping_name) > 0) { + // just enable it later let's make this fine-tuned. + spatial_material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA); + } + + ERR_CONTINUE_MSG(file_extension.empty(), "your texture has no file extension so we had to ignore it, let us know if you think this is wrong file an issue on github! " + debug_string); + ERR_CONTINUE_MSG(fbx_texture_map.count(fbx_mapping_name) <= 0, "This material has a texture with mapping name: " + String(fbx_mapping_name.c_str()) + " which is not yet supported by this importer. Consider opening an issue so we can support it."); + ERR_CONTINUE_MSG( + file_extension_uppercase != "PNG" && + file_extension_uppercase != "JPEG" && + file_extension_uppercase != "JPG" && + file_extension_uppercase != "TGA" && + file_extension_uppercase != "WEBP" && + file_extension_uppercase != "DDS", + "The FBX file contains a texture with an unrecognized extension: " + file_extension_uppercase); + + print_verbose("Getting FBX mapping mode for " + String(fbx_mapping_name.c_str())); + // get the texture map type + const StandardMaterial3D::TextureParam mapping_mode = fbx_texture_map.at(fbx_mapping_name); + print_verbose("Set FBX mapping mode to " + get_texture_param_name(mapping_mode)); + + Ref<Texture> texture; + print_verbose("texture mapping name: " + texture_name); + + if (state.cached_image_searches.has(texture_name)) { + texture = state.cached_image_searches[texture_name]; + } else { + String path = find_texture_path_by_filename(texture_name, p_fbx_current_directory); + if (!path.empty()) { + Ref<Texture2D> image_texture = ResourceLoader::load(path); + + ERR_CONTINUE(image_texture.is_null()); + + texture = image_texture; + state.cached_image_searches.insert(texture_name, texture); + print_verbose("Created texture from loaded image file."); + + } else if (fbx_texture_data != nullptr && fbx_texture_data->Media() != nullptr && fbx_texture_data->Media()->IsEmbedded()) { + // This is an embedded texture. Extract it. + Ref<Image> image; + //image.instance(); // oooo double instance bug? why make Image::_png_blah call + + const String extension = texture_name.get_extension().to_upper(); + if (extension == "PNG") { + // The stored file is a PNG. + image = Image::_png_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength()); + ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded PNG image load fail."); + + } else if ( + extension == "JPEG" || + extension == "JPG") { + // The stored file is a JPEG. + image = Image::_jpg_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength()); + ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded JPEG image load fail."); + + } else if (extension == "TGA") { + // The stored file is a TGA. + image = Image::_tga_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength()); + ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded TGA image load fail."); + + } else if (extension == "WEBP") { + // The stored file is a WEBP. + image = Image::_webp_mem_loader_func(fbx_texture_data->Media()->Content(), fbx_texture_data->Media()->ContentLength()); + ERR_CONTINUE_MSG(image.is_valid() == false, "FBX Embedded WEBP image load fail."); + + // } else if (extension == "DDS") { + // // In this moment is not possible to extract a DDS from a buffer, TODO consider add it to godot. See `textureloader_dds.cpp::load(). + // // The stored file is a DDS. + } else { + ERR_CONTINUE_MSG(true, "The embedded image with extension: " + extension + " is not yet supported. Open an issue please."); + } + + Ref<ImageTexture> image_texture; + image_texture.instance(); + image_texture->create_from_image(image); + + texture = image_texture; + + // TODO: this is potentially making something with the same name have a match incorrectly USE FBX ID as Hash. #fuck it later. + state.cached_image_searches[texture_name] = texture; + print_verbose("Created texture from embedded image."); + } else { + ERR_CONTINUE_MSG(true, "The FBX texture, with name: `" + texture_name + "`, is not found into the project nor is stored as embedded file. Make sure to insert the texture as embedded file or into the project, then reimport."); + } + } + + spatial_material->set_texture(mapping_mode, texture); + } + + if (spatial_material.is_valid()) { + spatial_material->set_name(material_name); + } + + /// ALL below is related to properties + for (FBXDocParser::LazyPropertyMap::value_type iter : material->Props()->GetLazyProperties()) { + const std::string name = iter.first; + + if (name.empty()) { + continue; + } + + PropertyDesc desc = PROPERTY_DESC_NOT_FOUND; + if (fbx_properties_desc.count(name) > 0) { + desc = fbx_properties_desc.at(name); + } + + // check if we can ignore this it will be done at the next phase + if (desc == PROPERTY_DESC_NOT_FOUND || desc == PROPERTY_DESC_IGNORE) { + // count the texture mapping references. Skip this one if it's found and we can't look up a property value. + if (fbx_texture_map.count(name) > 0) { + continue; // safe to ignore it's a texture mapping. + } + } + + if (desc == PROPERTY_DESC_IGNORE) { + //WARN_PRINT("[Ignored] The FBX material parameter: `" + String(name.c_str()) + "` is ignored."); + continue; + } else { + print_verbose("FBX Material parameter: " + String(name.c_str())); + + // Check for Diffuse material system / lambert materials / legacy basically + if (name == "Diffuse" && !warning_non_pbr_material) { + ValidationTracker::get_singleton()->add_validation_error(state.path, "Invalid material settings change to Ai Standard Surface shader, mat name: " + material_name.c_escape()); + warning_non_pbr_material = true; + } + } + + // DISABLE when adding support for all weird and wonderful material formats + if (desc == PROPERTY_DESC_NOT_FOUND) { + continue; + } + + ERR_CONTINUE_MSG(desc == PROPERTY_DESC_NOT_FOUND, "The FBX material parameter: `" + String(name.c_str()) + "` was not recognized. Please open an issue so we can add the support to it."); + + const FBXDocParser::PropertyTable *tbl = material->Props(); + FBXDocParser::PropertyPtr prop = tbl->Get(name); + + ERR_CONTINUE_MSG(prop == nullptr, "This file may be corrupted because is not possible to extract the material parameter: " + String(name.c_str())); + + if (spatial_material.is_null()) { + // Done here so if no data no material is created. + spatial_material.instance(); + } + + const FBXDocParser::TypedProperty<real_t> *real_value = dynamic_cast<const FBXDocParser::TypedProperty<real_t> *>(prop); + const FBXDocParser::TypedProperty<Vector3> *vector_value = dynamic_cast<const FBXDocParser::TypedProperty<Vector3> *>(prop); + + if (!real_value && !vector_value) { + //WARN_PRINT("unsupported datatype in property: " + String(name.c_str())); + continue; + } + + if (vector_value && !real_value) { + if (vector_value->Value() == Vector3(0, 0, 0) && !real_value) { + continue; + } + } + + switch (desc) { + case PROPERTY_DESC_ALBEDO_COLOR: { + if (vector_value) { + const Vector3 &color = vector_value->Value(); + // Make sure to not lost any eventual opacity. + if (color != Vector3(0, 0, 0)) { + Color c = Color(); + c[0] = color[0]; + c[1] = color[1]; + c[2] = color[2]; + spatial_material->set_albedo(c); + } + + } else if (real_value) { + print_error("albedo is unsupported format?"); + } + } break; + case PROPERTY_DESC_TRANSPARENT: { + if (real_value) { + const real_t opacity = real_value->Value(); + if (opacity < (1.0 - CMP_EPSILON)) { + Color c = spatial_material->get_albedo(); + c.a = opacity; + spatial_material->set_albedo(c); + + spatial_material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA); + spatial_material->set_depth_draw_mode(BaseMaterial3D::DEPTH_DRAW_OPAQUE_ONLY); + } + } else if (vector_value) { + print_error("unsupported transparent desc type vector!"); + } + } break; + case PROPERTY_DESC_SPECULAR: { + if (real_value) { + print_verbose("specular real value: " + rtos(real_value->Value())); + spatial_material->set_specular(MIN(1.0, real_value->Value())); + } + + if (vector_value) { + print_error("unsupported specular vector value: " + vector_value->Value()); + } + } break; + + case PROPERTY_DESC_SPECULAR_COLOR: { + if (vector_value) { + print_error("unsupported specular color: " + vector_value->Value()); + } + } break; + case PROPERTY_DESC_SHINYNESS: { + if (real_value) { + print_error("unsupported shinyness:" + rtos(real_value->Value())); + } + } break; + case PROPERTY_DESC_METALLIC: { + if (real_value) { + print_verbose("metallic real value: " + rtos(real_value->Value())); + spatial_material->set_metallic(MIN(1.0f, real_value->Value())); + } else { + print_error("unsupported value type for metallic"); + } + } break; + case PROPERTY_DESC_ROUGHNESS: { + if (real_value) { + print_verbose("roughness real value: " + rtos(real_value->Value())); + spatial_material->set_roughness(MIN(1.0f, real_value->Value())); + } else { + print_error("unsupported value type for roughness"); + } + } break; + case PROPERTY_DESC_COAT: { + if (real_value) { + print_verbose("clearcoat real value: " + rtos(real_value->Value())); + spatial_material->set_clearcoat(MIN(1.0f, real_value->Value())); + } else { + print_error("unsupported value type for clearcoat"); + } + } break; + case PROPERTY_DESC_COAT_ROUGHNESS: { + // meaning is that approx equal to zero is disabled not actually zero. ;) + if (real_value && Math::is_equal_approx(real_value->Value(), 0.0f)) { + print_verbose("clearcoat real value: " + rtos(real_value->Value())); + spatial_material->set_clearcoat_gloss(1.0 - real_value->Value()); + } else { + print_error("unsupported value type for clearcoat gloss"); + } + } break; + case PROPERTY_DESC_EMISSIVE: { + if (real_value && Math::is_equal_approx(real_value->Value(), 0.0f)) { + print_verbose("Emissive real value: " + rtos(real_value->Value())); + spatial_material->set_emission_energy(real_value->Value()); + } else if (vector_value && !vector_value->Value().is_equal_approx(Vector3(0, 0, 0))) { + const Vector3 &color = vector_value->Value(); + Color c; + c[0] = color[0]; + c[1] = color[1]; + c[2] = color[2]; + spatial_material->set_emission(c); + } + } break; + case PROPERTY_DESC_EMISSIVE_COLOR: { + if (vector_value && !vector_value->Value().is_equal_approx(Vector3(0, 0, 0))) { + const Vector3 &color = vector_value->Value(); + Color c; + c[0] = color[0]; + c[1] = color[1]; + c[2] = color[2]; + spatial_material->set_emission(c); + } else { + print_error("unsupported value type for emissive color"); + } + } break; + case PROPERTY_DESC_NOT_FOUND: + case PROPERTY_DESC_IGNORE: + break; + default: + break; + } + } + + return spatial_material; +} diff --git a/modules/fbx/data/fbx_material.h b/modules/fbx/data/fbx_material.h new file mode 100644 index 0000000000..08b93ac01b --- /dev/null +++ b/modules/fbx/data/fbx_material.h @@ -0,0 +1,286 @@ +/*************************************************************************/ +/* fbx_material.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_MATERIAL_H +#define FBX_MATERIAL_H + +#include "tools/import_utils.h" + +#include "core/object/reference.h" +#include "core/string/ustring.h" + +struct FBXMaterial : public Reference { + String material_name = String(); + bool warning_non_pbr_material = false; + FBXDocParser::Material *material = nullptr; + + /* Godot materials + *** Texture Maps: + * Albedo - color, texture + * Metallic - specular, metallic, texture + * Roughness - roughness, texture + * Emission - color, texture + * Normal Map - scale, texture + * Ambient Occlusion - texture + * Refraction - scale, texture + *** Has Settings for: + * UV1 - SCALE, OFFSET + * UV2 - SCALE, OFFSET + *** Flags for + * Transparent + * Cull Mode + */ + + enum class MapMode { + AlbedoM = 0, + MetallicM, + SpecularM, + EmissionM, + RoughnessM, + NormalM, + AmbientOcclusionM, + RefractionM, + ReflectionM, + }; + + /* Returns the string representation of the TextureParam enum */ + static String get_texture_param_name(StandardMaterial3D::TextureParam param) { + switch (param) { + case StandardMaterial3D::TEXTURE_ALBEDO: + return "TEXTURE_ALBEDO"; + case StandardMaterial3D::TEXTURE_METALLIC: + return "TEXTURE_METALLIC"; + case StandardMaterial3D::TEXTURE_ROUGHNESS: + return "TEXTURE_ROUGHNESS"; + case StandardMaterial3D::TEXTURE_EMISSION: + return "TEXTURE_EMISSION"; + case StandardMaterial3D::TEXTURE_NORMAL: + return "TEXTURE_NORMAL"; + case StandardMaterial3D::TEXTURE_RIM: + return "TEXTURE_RIM"; + case StandardMaterial3D::TEXTURE_CLEARCOAT: + return "TEXTURE_CLEARCOAT"; + case StandardMaterial3D::TEXTURE_FLOWMAP: + return "TEXTURE_FLOWMAP"; + case StandardMaterial3D::TEXTURE_AMBIENT_OCCLUSION: + return "TEXTURE_AMBIENT_OCCLUSION"; + // case StandardMaterial3D::TEXTURE_DEPTH: // TODO: work out how to make this function again! + // return "TEXTURE_DEPTH"; + case StandardMaterial3D::TEXTURE_SUBSURFACE_SCATTERING: + return "TEXTURE_SUBSURFACE_SCATTERING"; + // case StandardMaterial3D::TEXTURE_TRANSMISSION: // TODO: work out how to make this function again! + // return "TEXTURE_TRANSMISSION"; + case StandardMaterial3D::TEXTURE_REFRACTION: + return "TEXTURE_REFRACTION"; + case StandardMaterial3D::TEXTURE_DETAIL_MASK: + return "TEXTURE_DETAIL_MASK"; + case StandardMaterial3D::TEXTURE_DETAIL_ALBEDO: + return "TEXTURE_DETAIL_ALBEDO"; + case StandardMaterial3D::TEXTURE_DETAIL_NORMAL: + return "TEXTURE_DETAIL_NORMAL"; + case StandardMaterial3D::TEXTURE_MAX: + return "TEXTURE_MAX"; + default: + return "broken horribly"; + } + }; + + // TODO make this static? + const std::map<std::string, bool> fbx_transparency_flags = { + /* Transparent */ + { "TransparentColor", true }, + { "Maya|opacity", true } + }; + + // TODO make this static? + const std::map<std::string, StandardMaterial3D::TextureParam> fbx_texture_map = { + /* Diffuse */ + { "Maya|base", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "DiffuseColor", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "Maya|DiffuseTexture", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "Maya|baseColor", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "Maya|baseColor|file", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "3dsMax|Parameters|base_color_map", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "Maya|TEX_color_map|file", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + { "Maya|TEX_color_map", StandardMaterial3D::TextureParam::TEXTURE_ALBEDO }, + /* Emission */ + { "EmissiveColor", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "EmissiveFactor", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "Maya|emissionColor", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "Maya|emissionColor|file", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "3dsMax|Parameters|emission_map", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "Maya|TEX_emissive_map", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + { "Maya|TEX_emissive_map|file", StandardMaterial3D::TextureParam::TEXTURE_EMISSION }, + /* Metallic */ + { "Maya|metalness", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + { "Maya|metalness|file", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + { "3dsMax|Parameters|metalness_map", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + { "Maya|TEX_metallic_map", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + { "Maya|TEX_metallic_map|file", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + + /* Roughness */ + // Arnold Roughness Map + { "Maya|specularRoughness", StandardMaterial3D::TextureParam::TEXTURE_ROUGHNESS }, + + { "3dsMax|Parameters|roughness_map", StandardMaterial3D::TextureParam::TEXTURE_ROUGHNESS }, + { "Maya|TEX_roughness_map", StandardMaterial3D::TextureParam::TEXTURE_ROUGHNESS }, + { "Maya|TEX_roughness_map|file", StandardMaterial3D::TextureParam::TEXTURE_ROUGHNESS }, + + /* Normal */ + { "NormalMap", StandardMaterial3D::TextureParam::TEXTURE_NORMAL }, + //{ "Bump", Material::TextureParam::TEXTURE_NORMAL }, + //{ "3dsMax|Parameters|bump_map", Material::TextureParam::TEXTURE_NORMAL }, + { "Maya|NormalTexture", StandardMaterial3D::TextureParam::TEXTURE_NORMAL }, + //{ "Maya|normalCamera", Material::TextureParam::TEXTURE_NORMAL }, + //{ "Maya|normalCamera|file", Material::TextureParam::TEXTURE_NORMAL }, + { "Maya|TEX_normal_map", StandardMaterial3D::TextureParam::TEXTURE_NORMAL }, + { "Maya|TEX_normal_map|file", StandardMaterial3D::TextureParam::TEXTURE_NORMAL }, + /* AO */ + { "Maya|TEX_ao_map", StandardMaterial3D::TextureParam::TEXTURE_AMBIENT_OCCLUSION }, + { "Maya|TEX_ao_map|file", StandardMaterial3D::TextureParam::TEXTURE_AMBIENT_OCCLUSION }, + + // TODO: specular workflow conversion + // { "SpecularColor", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + // { "Maya|specularColor", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + // { "Maya|SpecularTexture", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + // { "Maya|SpecularTexture|file", StandardMaterial3D::TextureParam::TEXTURE_METALLIC }, + // { "ShininessExponent", SpatialMaterial::TextureParam::UNSUPPORTED }, + // { "ReflectionFactor", SpatialMaterial::TextureParam::UNSUPPORTED }, + + //{ "TransparentColor",SpatialMaterial::TextureParam::TEXTURE_CHANNEL_ALPHA }, + //{ "TransparencyFactor",SpatialMaterial::TextureParam::TEXTURE_CHANNEL_ALPHA } + + // TODO: diffuse roughness + //{ "Maya|diffuseRoughness", SpatialMaterial::TextureParam::UNSUPPORTED }, + //{ "Maya|diffuseRoughness|file", SpatialMaterial::TextureParam::UNSUPPORTED }, + + }; + + // TODO make this static? + enum PropertyDesc { + PROPERTY_DESC_NOT_FOUND, + PROPERTY_DESC_ALBEDO_COLOR, + PROPERTY_DESC_TRANSPARENT, + PROPERTY_DESC_METALLIC, + PROPERTY_DESC_ROUGHNESS, + PROPERTY_DESC_SPECULAR, + PROPERTY_DESC_SPECULAR_COLOR, + PROPERTY_DESC_SHINYNESS, + PROPERTY_DESC_COAT, + PROPERTY_DESC_COAT_ROUGHNESS, + PROPERTY_DESC_EMISSIVE, + PROPERTY_DESC_EMISSIVE_COLOR, + PROPERTY_DESC_IGNORE + }; + + const std::map<std::string, PropertyDesc> fbx_properties_desc = { + /* Albedo */ + { "DiffuseColor", PROPERTY_DESC_ALBEDO_COLOR }, + { "Maya|baseColor", PROPERTY_DESC_ALBEDO_COLOR }, + + /* Specular */ + { "Maya|specular", PROPERTY_DESC_SPECULAR }, + { "Maya|specularColor", PROPERTY_DESC_SPECULAR_COLOR }, + + /* Specular roughness - arnold roughness map */ + { "Maya|specularRoughness", PROPERTY_DESC_ROUGHNESS }, + + /* Transparent */ + { "Opacity", PROPERTY_DESC_TRANSPARENT }, + { "TransparencyFactor", PROPERTY_DESC_TRANSPARENT }, + { "Maya|opacity", PROPERTY_DESC_TRANSPARENT }, + + /* Metallic */ + { "Shininess", PROPERTY_DESC_METALLIC }, + { "Reflectivity", PROPERTY_DESC_METALLIC }, + { "Maya|metalness", PROPERTY_DESC_METALLIC }, + { "Maya|metallic", PROPERTY_DESC_METALLIC }, + + /* Roughness */ + { "Maya|roughness", PROPERTY_DESC_ROUGHNESS }, + + /* Coat */ + //{ "Maya|coat", PROPERTY_DESC_COAT }, + + /* Coat roughness */ + //{ "Maya|coatRoughness", PROPERTY_DESC_COAT_ROUGHNESS }, + + /* Emissive */ + { "Maya|emission", PROPERTY_DESC_EMISSIVE }, + { "Maya|emissive", PROPERTY_DESC_EMISSIVE }, + + /* Emissive color */ + { "EmissiveColor", PROPERTY_DESC_EMISSIVE_COLOR }, + { "Maya|emissionColor", PROPERTY_DESC_EMISSIVE_COLOR }, + + /* Ignore */ + { "Maya|diffuseRoughness", PROPERTY_DESC_IGNORE }, + { "Maya", PROPERTY_DESC_IGNORE }, + { "Diffuse", PROPERTY_DESC_ALBEDO_COLOR }, + { "Maya|TypeId", PROPERTY_DESC_IGNORE }, + { "Ambient", PROPERTY_DESC_IGNORE }, + { "AmbientColor", PROPERTY_DESC_IGNORE }, + { "ShininessExponent", PROPERTY_DESC_IGNORE }, + { "Specular", PROPERTY_DESC_IGNORE }, + { "SpecularColor", PROPERTY_DESC_IGNORE }, + { "SpecularFactor", PROPERTY_DESC_IGNORE }, + //{ "BumpFactor", PROPERTY_DESC_IGNORE }, + { "Maya|exitToBackground", PROPERTY_DESC_IGNORE }, + { "Maya|indirectDiffuse", PROPERTY_DESC_IGNORE }, + { "Maya|indirectSpecular", PROPERTY_DESC_IGNORE }, + { "Maya|internalReflections", PROPERTY_DESC_IGNORE }, + { "DiffuseFactor", PROPERTY_DESC_IGNORE }, + { "AmbientFactor", PROPERTY_DESC_IGNORE }, + { "ReflectionColor", PROPERTY_DESC_IGNORE }, + { "Emissive", PROPERTY_DESC_IGNORE }, + { "Maya|coatColor", PROPERTY_DESC_IGNORE }, + { "Maya|coatNormal", PROPERTY_DESC_IGNORE }, + { "Maya|coatIOR", PROPERTY_DESC_IGNORE }, + }; + + /* storing the texture properties like color */ + template <class T> + struct TexturePropertyMapping : Reference { + StandardMaterial3D::TextureParam map_mode = StandardMaterial3D::TextureParam::TEXTURE_ALBEDO; + const T property = T(); + }; + + static void add_search_string(String p_filename, String p_current_directory, String search_directory, Vector<String> &texture_search_paths); + + static String find_texture_path_by_filename(const String p_filename, const String p_current_directory); + + String get_material_name() const; + + void set_imported_material(FBXDocParser::Material *p_material); + + Ref<StandardMaterial3D> import_material(ImportState &state); +}; + +#endif // FBX_MATERIAL_H diff --git a/modules/fbx/data/fbx_mesh_data.cpp b/modules/fbx/data/fbx_mesh_data.cpp new file mode 100644 index 0000000000..0728866adb --- /dev/null +++ b/modules/fbx/data/fbx_mesh_data.cpp @@ -0,0 +1,1461 @@ +/*************************************************************************/ +/* fbx_mesh_data.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "fbx_mesh_data.h" + +#include "core/templates/local_vector.h" +#include "scene/resources/mesh.h" +#include "scene/resources/surface_tool.h" + +#include "thirdparty/misc/triangulator.h" + +template <class T> +T collect_first(const Vector<VertexData<T>> *p_data, T p_fall_back) { + if (p_data->empty()) { + return p_fall_back; + } + + return (*p_data)[0].data; +} + +template <class T> +HashMap<int, T> collect_all(const Vector<VertexData<T>> *p_data, HashMap<int, T> p_fall_back) { + if (p_data->empty()) { + return p_fall_back; + } + + HashMap<int, T> collection; + for (int i = 0; i < p_data->size(); i += 1) { + const VertexData<T> &vd = (*p_data)[i]; + collection[vd.polygon_index] = vd.data; + } + return collection; +} + +template <class T> +T collect_average(const Vector<VertexData<T>> *p_data, T p_fall_back) { + if (p_data->empty()) { + return p_fall_back; + } + + T combined = (*p_data)[0].data; // Make sure the data is always correctly initialized. + print_verbose("size of data: " + itos(p_data->size())); + for (int i = 1; i < p_data->size(); i += 1) { + combined += (*p_data)[i].data; + } + combined = combined / real_t(p_data->size()); + + return combined.normalized(); +} + +HashMap<int, Vector3> collect_normal(const Vector<VertexData<Vector3>> *p_data, HashMap<int, Vector3> p_fall_back) { + if (p_data->empty()) { + return p_fall_back; + } + + HashMap<int, Vector3> collection; + for (int i = 0; i < p_data->size(); i += 1) { + const VertexData<Vector3> &vd = (*p_data)[i]; + collection[vd.polygon_index] = vd.data; + } + return collection; +} + +HashMap<int, Vector2> collect_uv(const Vector<VertexData<Vector2>> *p_data, HashMap<int, Vector2> p_fall_back) { + if (p_data->empty()) { + return p_fall_back; + } + + HashMap<int, Vector2> collection; + for (int i = 0; i < p_data->size(); i += 1) { + const VertexData<Vector2> &vd = (*p_data)[i]; + collection[vd.polygon_index] = vd.data; + } + return collection; +} + +typedef int Vertex; +typedef int SurfaceId; +typedef int PolygonId; +typedef int DataIndex; + +struct SurfaceData { + Ref<SurfaceTool> surface_tool; + OrderedHashMap<Vertex, int> lookup_table; // proposed fix is to replace lookup_table[vertex_id] to give the position of the vertices_map[int] index. + LocalVector<Vertex> vertices_map; // this must be ordered the same as insertion <-- slow to do find() operation. + Ref<Material> material; + HashMap<PolygonId, Vector<DataIndex>> surface_polygon_vertex; + Array morphs; +}; + +EditorSceneImporterMeshNode3D *FBXMeshData::create_fbx_mesh(const ImportState &state, const FBXDocParser::MeshGeometry *p_mesh_geometry, const FBXDocParser::Model *model, bool use_compression) { + mesh_geometry = p_mesh_geometry; + // todo: make this just use a uint64_t FBX ID this is a copy of our original materials unfortunately. + const std::vector<const FBXDocParser::Material *> &material_lookup = model->GetMaterials(); + + // TODO: perf hotspot on large files + // this can be a very large copy + std::vector<int> polygon_indices = mesh_geometry->get_polygon_indices(); + std::vector<Vector3> vertices = mesh_geometry->get_vertices(); + + // Phase 1. Parse all FBX data. + HashMap<int, Vector3> normals; + HashMap<int, HashMap<int, Vector3>> normals_raw = extract_per_vertex_data( + vertices.size(), + mesh_geometry->get_edge_map(), + polygon_indices, + mesh_geometry->get_normals(), + &collect_all, + HashMap<int, Vector3>()); + + // List<int> keys; + // normals.get_key_list(&keys); + // + // const std::vector<Assimp::FBX::MeshGeometry::Edge>& edges = mesh_geometry->get_edge_map(); + // for (int index = 0; index < keys.size(); index++) { + // const int key = keys[index]; + // const int v1 = edges[key].vertex_0; + // const int v2 = edges[key].vertex_1; + // const Vector3& n1 = normals.get(v1); + // const Vector3& n2 = normals.get(v2); + // print_verbose("[" + itos(v1) + "] n1: " + n1 + "\n[" + itos(v2) + "] n2: " + n2); + // //print_verbose("[" + itos(key) + "] n1: " + n1 + ", n2: " + n2) ; + // //print_verbose("vindex: " + itos(edges[key].vertex_0) + ", vindex2: " + itos(edges[key].vertex_1)); + // //Vector3 ver1 = vertices[edges[key].vertex_0]; + // //Vector3 ver2 = vertices[edges[key].vertex_1]; + // /*real_t angle1 = Math::rad2deg(n1.angle_to(n2)); + // real_t angle2 = Math::rad2deg(n2.angle_to(n1)); + // print_verbose("angle of normals: " + rtos(angle1) + " angle 2" + rtos(angle2));*/ + // } + + HashMap<int, Vector2> uvs_0; + HashMap<int, HashMap<int, Vector2>> uvs_0_raw = extract_per_vertex_data( + vertices.size(), + mesh_geometry->get_edge_map(), + polygon_indices, + mesh_geometry->get_uv_0(), + &collect_all, + HashMap<int, Vector2>()); + + HashMap<int, Vector2> uvs_1; + HashMap<int, HashMap<int, Vector2>> uvs_1_raw = extract_per_vertex_data( + vertices.size(), + mesh_geometry->get_edge_map(), + polygon_indices, + mesh_geometry->get_uv_1(), + &collect_all, + HashMap<int, Vector2>()); + + HashMap<int, Color> colors; + HashMap<int, HashMap<int, Color>> colors_raw = extract_per_vertex_data( + vertices.size(), + mesh_geometry->get_edge_map(), + polygon_indices, + mesh_geometry->get_colors(), + &collect_all, + HashMap<int, Color>()); + + // TODO what about tangents? + // TODO what about bi-nomials? + // TODO there is other? + + HashMap<int, SurfaceId> polygon_surfaces = extract_per_polygon( + vertices.size(), + polygon_indices, + mesh_geometry->get_material_allocation_id(), + -1); + + HashMap<String, MorphVertexData> morphs; + extract_morphs(mesh_geometry, morphs); + + // TODO please add skinning. + //mesh_id = mesh_geometry->ID(); + + sanitize_vertex_weights(state); + + // Re organize polygon vertices to to correctly take into account strange + // UVs. + reorganize_vertices( + polygon_indices, + vertices, + normals, + uvs_0, + uvs_1, + colors, + morphs, + normals_raw, + colors_raw, + uvs_0_raw, + uvs_1_raw); + + const int color_count = colors.size(); + print_verbose("Vertex color count: " + itos(color_count)); + + // Make sure that from this moment on the mesh_geometry is no used anymore. + // This is a safety step, because the mesh_geometry data are no more valid + // at this point. + + const int vertex_count = vertices.size(); + + print_verbose("Vertex count: " + itos(vertex_count)); + + // The map key is the material allocator id that is also used as surface id. + HashMap<SurfaceId, SurfaceData> surfaces; + + // Phase 2. For each material create a surface tool (So a different mesh). + { + if (polygon_surfaces.empty()) { + // No material, just use the default one with index -1. + // Set -1 to all polygons. + const int polygon_count = count_polygons(polygon_indices); + for (int p = 0; p < polygon_count; p += 1) { + polygon_surfaces[p] = -1; + } + } + + // Create the surface now. + for (const int *polygon_id = polygon_surfaces.next(nullptr); polygon_id != nullptr; polygon_id = polygon_surfaces.next(polygon_id)) { + const int surface_id = polygon_surfaces[*polygon_id]; + if (surfaces.has(surface_id) == false) { + SurfaceData sd; + sd.surface_tool.instance(); + sd.surface_tool->begin(Mesh::PRIMITIVE_TRIANGLES); + + if (surface_id < 0) { + // nothing to do + } else if (surface_id < (int)material_lookup.size()) { + const FBXDocParser::Material *mat_mapping = material_lookup.at(surface_id); + const uint64_t mapping_id = mat_mapping->ID(); + if (state.cached_materials.has(mapping_id)) { + sd.material = state.cached_materials[mapping_id]; + } + } else { + WARN_PRINT("out of bounds surface detected, FBX file has corrupt material data"); + } + + surfaces.set(surface_id, sd); + } + } + } + + // Phase 3. Map the vertices relative to each surface, in this way we can + // just insert the vertices that we need per each surface. + { + PolygonId polygon_index = -1; + SurfaceId surface_id = -1; + SurfaceData *surface_data = nullptr; + + for (size_t polygon_vertex = 0; polygon_vertex < polygon_indices.size(); polygon_vertex += 1) { + if (is_start_of_polygon(polygon_indices, polygon_vertex)) { + polygon_index += 1; + ERR_FAIL_COND_V_MSG(polygon_surfaces.has(polygon_index) == false, nullptr, "The FBX file is corrupted, This surface_index is not expected."); + surface_id = polygon_surfaces[polygon_index]; + surface_data = surfaces.getptr(surface_id); + CRASH_COND(surface_data == nullptr); // Can't be null. + } + + const int vertex = get_vertex_from_polygon_vertex(polygon_indices, polygon_vertex); + + // The vertex position in the surface + // Uses a lookup table for speed with large scenes + bool has_polygon_vertex_index = surface_data->lookup_table.has(vertex); + int surface_polygon_vertex_index = -1; + + if (has_polygon_vertex_index) { + surface_polygon_vertex_index = surface_data->lookup_table[vertex]; + } else { + surface_polygon_vertex_index = surface_data->vertices_map.size(); + surface_data->lookup_table[vertex] = surface_polygon_vertex_index; + surface_data->vertices_map.push_back(vertex); + } + + surface_data->surface_polygon_vertex[polygon_index].push_back(surface_polygon_vertex_index); + } + } + + //print_verbose("[debug UV 1] UV1: " + itos(uvs_0.size())); + //print_verbose("[debug UV 2] UV2: " + itos(uvs_1.size())); + + // Phase 4. Per each surface just insert the vertices and add the indices. + for (const SurfaceId *surface_id = surfaces.next(nullptr); surface_id != nullptr; surface_id = surfaces.next(surface_id)) { + SurfaceData *surface = surfaces.getptr(*surface_id); + + // Just add the vertices data. + for (unsigned int i = 0; i < surface->vertices_map.size(); i += 1) { + const Vertex vertex = surface->vertices_map[i]; + + // This must be done before add_vertex because the surface tool is + // expecting this before the st->add_vertex() call + add_vertex(state, + surface->surface_tool, + state.scale, + vertex, + vertices, + normals, + uvs_0, + uvs_1, + colors); + } + + // Triangulate the various polygons and add the indices. + for (const PolygonId *polygon_id = surface->surface_polygon_vertex.next(nullptr); polygon_id != nullptr; polygon_id = surface->surface_polygon_vertex.next(polygon_id)) { + const Vector<DataIndex> *indices = surface->surface_polygon_vertex.getptr(*polygon_id); + + triangulate_polygon( + surface->surface_tool, + *indices, + surface->vertices_map, + vertices); + } + } + + // Phase 5. Compose the morphs if any. + for (const SurfaceId *surface_id = surfaces.next(nullptr); surface_id != nullptr; surface_id = surfaces.next(surface_id)) { + SurfaceData *surface = surfaces.getptr(*surface_id); + + for (const String *morph_name = morphs.next(nullptr); morph_name != nullptr; morph_name = morphs.next(morph_name)) { + MorphVertexData *morph_data = morphs.getptr(*morph_name); + + // As said by the docs, this is not supposed to be different than + // vertex_count. + CRASH_COND(morph_data->vertices.size() != vertex_count); + CRASH_COND(morph_data->normals.size() != vertex_count); + + Vector3 *vertices_ptr = morph_data->vertices.ptrw(); + Vector3 *normals_ptr = morph_data->normals.ptrw(); + + Ref<SurfaceTool> morph_st; + morph_st.instance(); + morph_st->begin(Mesh::PRIMITIVE_TRIANGLES); + + for (unsigned int vi = 0; vi < surface->vertices_map.size(); vi += 1) { + const Vertex vertex = surface->vertices_map[vi]; + add_vertex( + state, + morph_st, + state.scale, + vertex, + vertices, + normals, + uvs_0, + uvs_1, + colors, + vertices_ptr[vertex], + normals_ptr[vertex]); + } + + morph_st->generate_tangents(); + surface->morphs.push_back(morph_st->commit_to_arrays()); + } + } + + // Phase 6. Compose the mesh and return it. + Ref<EditorSceneImporterMesh> mesh; + mesh.instance(); + + // Add blend shape info. + for (const String *morph_name = morphs.next(nullptr); morph_name != nullptr; morph_name = morphs.next(morph_name)) { + mesh->add_blend_shape(*morph_name); + } + + // TODO always normalized, Why? + mesh->set_blend_shape_mode(Mesh::BLEND_SHAPE_MODE_NORMALIZED); + + // Add surfaces. + int in_mesh_surface_id = 0; + for (const SurfaceId *surface_id = surfaces.next(nullptr); surface_id != nullptr; surface_id = surfaces.next(surface_id)) { + SurfaceData *surface = surfaces.getptr(*surface_id); + + // you can't generate them without a valid uv map. + if (uvs_0_raw.size() > 0) { + surface->surface_tool->generate_tangents(); + } + + Array mesh_array = surface->surface_tool->commit_to_arrays(); + Array blend_shapes = surface->morphs; + + if (surface->material.is_valid()) { + mesh->add_surface(Mesh::PRIMITIVE_TRIANGLES, mesh_array, blend_shapes, Dictionary(), surface->material, surface->material->get_name()); + } else { + mesh->add_surface(Mesh::PRIMITIVE_TRIANGLES, mesh_array, blend_shapes); + } + + in_mesh_surface_id += 1; + } + + EditorSceneImporterMeshNode3D *godot_mesh = memnew(EditorSceneImporterMeshNode3D); + godot_mesh->set_mesh(mesh); + return godot_mesh; +} + +void FBXMeshData::sanitize_vertex_weights(const ImportState &state) { + const int max_vertex_influence_count = RS::ARRAY_WEIGHTS_SIZE; + Map<int, int> skeleton_to_skin_bind_id; + // TODO: error's need added + const FBXDocParser::Skin *fbx_skin = mesh_geometry->DeformerSkin(); + + if (fbx_skin == nullptr || fbx_skin->Clusters().size() == 0) { + return; // do nothing + } + + // + // Precalculate the skin cluster mapping + // + + int bind_id = 0; + for (const FBXDocParser::Cluster *cluster : fbx_skin->Clusters()) { + Ref<FBXBone> bone = state.fbx_bone_map[cluster->TargetNode()->ID()]; + skeleton_to_skin_bind_id.insert(bone->godot_bone_id, bind_id); + bind_id++; + } + + for (const Vertex *v = vertex_weights.next(nullptr); v != nullptr; v = vertex_weights.next(v)) { + VertexWeightMapping *vm = vertex_weights.getptr(*v); + ERR_CONTINUE(vm->bones.size() != vm->weights.size()); // No message, already checked. + ERR_CONTINUE(vm->bones_ref.size() != vm->weights.size()); // No message, already checked. + + const int initial_size = vm->weights.size(); + { + // Init bone id + int *bones_ptr = vm->bones.ptrw(); + Ref<FBXBone> *bones_ref_ptr = vm->bones_ref.ptrw(); + + for (int i = 0; i < vm->weights.size(); i += 1) { + // At this point this is not possible because the skeleton is already initialized. + CRASH_COND(bones_ref_ptr[i]->godot_bone_id == -2); + bones_ptr[i] = skeleton_to_skin_bind_id[bones_ref_ptr[i]->godot_bone_id]; + } + + // From this point on the data is no more valid. + vm->bones_ref.clear(); + } + + { + // Sort + real_t *weights_ptr = vm->weights.ptrw(); + int *bones_ptr = vm->bones.ptrw(); + for (int i = 0; i < vm->weights.size(); i += 1) { + for (int x = i + 1; x < vm->weights.size(); x += 1) { + if (weights_ptr[i] < weights_ptr[x]) { + SWAP(weights_ptr[i], weights_ptr[x]); + SWAP(bones_ptr[i], bones_ptr[x]); + } + } + } + } + + { + // Resize + vm->weights.resize(max_vertex_influence_count); + vm->bones.resize(max_vertex_influence_count); + real_t *weights_ptr = vm->weights.ptrw(); + int *bones_ptr = vm->bones.ptrw(); + for (int i = initial_size; i < max_vertex_influence_count; i += 1) { + weights_ptr[i] = 0.0; + bones_ptr[i] = 0; + } + + // Normalize + real_t sum = 0.0; + for (int i = 0; i < max_vertex_influence_count; i += 1) { + sum += weights_ptr[i]; + } + if (sum > 0.0) { + for (int i = 0; i < vm->weights.size(); i += 1) { + weights_ptr[i] = weights_ptr[i] / sum; + } + } + } + } +} + +void FBXMeshData::reorganize_vertices( + // TODO: perf hotspot on insane files + std::vector<int> &r_polygon_indices, + std::vector<Vector3> &r_vertices, + HashMap<int, Vector3> &r_normals, + HashMap<int, Vector2> &r_uv_1, + HashMap<int, Vector2> &r_uv_2, + HashMap<int, Color> &r_color, + HashMap<String, MorphVertexData> &r_morphs, + HashMap<int, HashMap<int, Vector3>> &r_normals_raw, + HashMap<int, HashMap<int, Color>> &r_colors_raw, + HashMap<int, HashMap<int, Vector2>> &r_uv_1_raw, + HashMap<int, HashMap<int, Vector2>> &r_uv_2_raw) { + // Key: OldVertex; Value: [New vertices]; + HashMap<int, Vector<int>> duplicated_vertices; + + PolygonId polygon_index = -1; + for (int pv = 0; pv < (int)r_polygon_indices.size(); pv += 1) { + if (is_start_of_polygon(r_polygon_indices, pv)) { + polygon_index += 1; + } + const Vertex index = get_vertex_from_polygon_vertex(r_polygon_indices, pv); + + bool need_duplication = false; + Vector2 this_vert_poly_uv1 = Vector2(); + Vector2 this_vert_poly_uv2 = Vector2(); + Vector3 this_vert_poly_normal = Vector3(); + Color this_vert_poly_color = Color(); + + // Take the normal and see if we need to duplicate this polygon. + if (r_normals_raw.has(index)) { + const HashMap<PolygonId, Vector3> *nrml_arr = r_normals_raw.getptr(index); + + if (nrml_arr->has(polygon_index)) { + this_vert_poly_normal = nrml_arr->get(polygon_index); + } else if (nrml_arr->has(-1)) { + this_vert_poly_normal = nrml_arr->get(-1); + } else { + print_error("invalid normal detected: " + itos(index) + " polygon index: " + itos(polygon_index)); + for (const PolygonId *pid = nrml_arr->next(nullptr); pid != nullptr; pid = nrml_arr->next(pid)) { + print_verbose("debug contents key: " + itos(*pid)); + + if (nrml_arr->has(*pid)) { + print_verbose("contents valid: " + nrml_arr->get(*pid)); + } + } + } + + // Now, check if we need to duplicate it. + for (const PolygonId *pid = nrml_arr->next(nullptr); pid != nullptr; pid = nrml_arr->next(pid)) { + if (*pid == polygon_index) { + continue; + } + + const Vector3 vert_poly_normal = *nrml_arr->getptr(*pid); + if ((this_vert_poly_normal - vert_poly_normal).length_squared() > CMP_EPSILON) { + // Yes this polygon need duplication. + need_duplication = true; + break; + } + } + } + + // TODO: make me vertex color + // Take the normal and see if we need to duplicate this polygon. + if (r_colors_raw.has(index)) { + const HashMap<PolygonId, Color> *color_arr = r_colors_raw.getptr(index); + + if (color_arr->has(polygon_index)) { + this_vert_poly_color = color_arr->get(polygon_index); + } else if (color_arr->has(-1)) { + this_vert_poly_color = color_arr->get(-1); + } else { + print_error("invalid color detected: " + itos(index) + " polygon index: " + itos(polygon_index)); + for (const PolygonId *pid = color_arr->next(nullptr); pid != nullptr; pid = color_arr->next(pid)) { + print_verbose("debug contents key: " + itos(*pid)); + + if (color_arr->has(*pid)) { + print_verbose("contents valid: " + color_arr->get(*pid)); + } + } + } + + // Now, check if we need to duplicate it. + for (const PolygonId *pid = color_arr->next(nullptr); pid != nullptr; pid = color_arr->next(pid)) { + if (*pid == polygon_index) { + continue; + } + + const Color vert_poly_color = *color_arr->getptr(*pid); + if (!this_vert_poly_color.is_equal_approx(vert_poly_color)) { + // Yes this polygon need duplication. + need_duplication = true; + break; + } + } + } + + // Take the UV1 and UV2 and see if we need to duplicate this polygon. + { + HashMap<int, HashMap<int, Vector2>> *uv_raw = &r_uv_1_raw; + Vector2 *this_vert_poly_uv = &this_vert_poly_uv1; + for (int kk = 0; kk < 2; kk++) { + if (uv_raw->has(index)) { + const HashMap<PolygonId, Vector2> *uvs = uv_raw->getptr(index); + + if (uvs->has(polygon_index)) { + // This Polygon has its own uv. + (*this_vert_poly_uv) = *uvs->getptr(polygon_index); + + // Check if we need to duplicate it. + for (const PolygonId *pid = uvs->next(nullptr); pid != nullptr; pid = uvs->next(pid)) { + if (*pid == polygon_index) { + continue; + } + const Vector2 vert_poly_uv = *uvs->getptr(*pid); + if (((*this_vert_poly_uv) - vert_poly_uv).length_squared() > CMP_EPSILON) { + // Yes this polygon need duplication. + need_duplication = true; + break; + } + } + } else if (uvs->has(-1)) { + // It has the default UV. + (*this_vert_poly_uv) = *uvs->getptr(-1); + } else if (uvs->size() > 0) { + // No uv, this is strange, just take the first and duplicate. + (*this_vert_poly_uv) = *uvs->getptr(*uvs->next(nullptr)); + WARN_PRINT("No UVs for this polygon, while there is no default and some other polygons have it. This FBX file may be corrupted."); + } + } + uv_raw = &r_uv_2_raw; + this_vert_poly_uv = &this_vert_poly_uv2; + } + } + + // If we want to duplicate it, Let's see if we already duplicated this + // vertex. + if (need_duplication) { + if (duplicated_vertices.has(index)) { + Vertex similar_vertex = -1; + // Let's see if one of the new vertices has the same data of this. + const Vector<int> *new_vertices = duplicated_vertices.getptr(index); + for (int j = 0; j < new_vertices->size(); j += 1) { + const Vertex new_vertex = (*new_vertices)[j]; + bool same_uv1 = false; + bool same_uv2 = false; + bool same_normal = false; + bool same_color = false; + + if (r_uv_1.has(new_vertex)) { + if ((this_vert_poly_uv1 - (*r_uv_1.getptr(new_vertex))).length_squared() <= CMP_EPSILON) { + same_uv1 = true; + } + } + + if (r_uv_2.has(new_vertex)) { + if ((this_vert_poly_uv2 - (*r_uv_2.getptr(new_vertex))).length_squared() <= CMP_EPSILON) { + same_uv2 = true; + } + } + + if (r_color.has(new_vertex)) { + if (this_vert_poly_color.is_equal_approx((*r_color.getptr(new_vertex)))) { + same_color = true; + } + } + + if (r_normals.has(new_vertex)) { + if ((this_vert_poly_normal - (*r_normals.getptr(new_vertex))).length_squared() <= CMP_EPSILON) { + same_uv2 = true; + } + } + + if (same_uv1 && same_uv2 && same_normal && same_color) { + similar_vertex = new_vertex; + break; + } + } + + if (similar_vertex != -1) { + // Update polygon. + if (is_end_of_polygon(r_polygon_indices, pv)) { + r_polygon_indices[pv] = ~similar_vertex; + } else { + r_polygon_indices[pv] = similar_vertex; + } + need_duplication = false; + } + } + } + + if (need_duplication) { + const Vertex old_index = index; + const Vertex new_index = r_vertices.size(); + + // Polygon index. + if (is_end_of_polygon(r_polygon_indices, pv)) { + r_polygon_indices[pv] = ~new_index; + } else { + r_polygon_indices[pv] = new_index; + } + + // Vertex position. + r_vertices.push_back(r_vertices[old_index]); + + // Normals + if (r_normals_raw.has(old_index)) { + r_normals.set(new_index, this_vert_poly_normal); + r_normals_raw.getptr(old_index)->erase(polygon_index); + r_normals_raw[new_index][polygon_index] = this_vert_poly_normal; + } + + // Vertex Color + if (r_colors_raw.has(old_index)) { + r_color.set(new_index, this_vert_poly_color); + r_colors_raw.getptr(old_index)->erase(polygon_index); + r_colors_raw[new_index][polygon_index] = this_vert_poly_color; + } + + // UV 0 + if (r_uv_1_raw.has(old_index)) { + r_uv_1.set(new_index, this_vert_poly_uv1); + r_uv_1_raw.getptr(old_index)->erase(polygon_index); + r_uv_1_raw[new_index][polygon_index] = this_vert_poly_uv1; + } + + // UV 1 + if (r_uv_2_raw.has(old_index)) { + r_uv_2.set(new_index, this_vert_poly_uv2); + r_uv_2_raw.getptr(old_index)->erase(polygon_index); + r_uv_2_raw[new_index][polygon_index] = this_vert_poly_uv2; + } + + // Morphs + for (const String *mname = r_morphs.next(nullptr); mname != nullptr; mname = r_morphs.next(mname)) { + MorphVertexData *d = r_morphs.getptr(*mname); + // This can't never happen. + CRASH_COND(d == nullptr); + if (d->vertices.size() > old_index) { + d->vertices.push_back(d->vertices[old_index]); + } + if (d->normals.size() > old_index) { + d->normals.push_back(d->normals[old_index]); + } + } + + if (vertex_weights.has(old_index)) { + vertex_weights.set(new_index, vertex_weights[old_index]); + } + + duplicated_vertices[old_index].push_back(new_index); + } else { + if (r_normals_raw.has(index) && + r_normals.has(index) == false) { + r_normals.set(index, this_vert_poly_normal); + } + + if (r_colors_raw.has(index) && r_color.has(index) == false) { + r_color.set(index, this_vert_poly_color); + } + + if (r_uv_1_raw.has(index) && + r_uv_1.has(index) == false) { + r_uv_1.set(index, this_vert_poly_uv1); + } + + if (r_uv_2_raw.has(index) && + r_uv_2.has(index) == false) { + r_uv_2.set(index, this_vert_poly_uv2); + } + } + } +} + +void FBXMeshData::add_vertex( + const ImportState &state, + Ref<SurfaceTool> p_surface_tool, + real_t p_scale, + Vertex p_vertex, + const std::vector<Vector3> &p_vertices_position, + const HashMap<int, Vector3> &p_normals, + const HashMap<int, Vector2> &p_uvs_0, + const HashMap<int, Vector2> &p_uvs_1, + const HashMap<int, Color> &p_colors, + const Vector3 &p_morph_value, + const Vector3 &p_morph_normal) { + ERR_FAIL_INDEX_MSG(p_vertex, (Vertex)p_vertices_position.size(), "FBX file is corrupted, the position of the vertex can't be retrieved."); + + if (p_normals.has(p_vertex)) { + p_surface_tool->set_normal(p_normals[p_vertex] + p_morph_normal); + } + + if (p_uvs_0.has(p_vertex)) { + //print_verbose("uv1: [" + itos(p_vertex) + "] " + p_uvs_0[p_vertex]); + // Inverts Y UV. + p_surface_tool->set_uv(Vector2(p_uvs_0[p_vertex].x, 1 - p_uvs_0[p_vertex].y)); + } + + if (p_uvs_1.has(p_vertex)) { + //print_verbose("uv2: [" + itos(p_vertex) + "] " + p_uvs_1[p_vertex]); + // Inverts Y UV. + p_surface_tool->set_uv2(Vector2(p_uvs_1[p_vertex].x, 1 - p_uvs_1[p_vertex].y)); + } + + if (p_colors.has(p_vertex)) { + p_surface_tool->set_color(p_colors[p_vertex]); + } + + // TODO what about binormals? + // TODO there is other? + + if (vertex_weights.has(p_vertex)) { + // Let's extract the weight info. + const VertexWeightMapping *vm = vertex_weights.getptr(p_vertex); + const Vector<int> &bones = vm->bones; + + // the bug is that the bone idx is wrong because it is not ref'ing the skin. + + if (bones.size() > RS::ARRAY_WEIGHTS_SIZE) { + print_error("[weight overflow detected]"); + } + + p_surface_tool->set_weights(vm->weights); + // 0 1 2 3 4 5 6 7 < local skeleton / skin for mesh + // 0 1 2 3 4 5 6 7 8 9 10 < actual skeleton with all joints + p_surface_tool->set_bones(bones); + } + + // The surface tool want the vertex position as last thing. + p_surface_tool->add_vertex((p_vertices_position[p_vertex] + p_morph_value) * p_scale); +} + +void FBXMeshData::triangulate_polygon(Ref<SurfaceTool> st, Vector<int> p_polygon_vertex, const Vector<Vertex> p_surface_vertex_map, const std::vector<Vector3> &p_vertices) const { + const int polygon_vertex_count = p_polygon_vertex.size(); + if (polygon_vertex_count == 1) { + // point to triangle + st->add_index(p_polygon_vertex[0]); + st->add_index(p_polygon_vertex[0]); + st->add_index(p_polygon_vertex[0]); + return; + } else if (polygon_vertex_count == 2) { + // line to triangle + st->add_index(p_polygon_vertex[1]); + st->add_index(p_polygon_vertex[1]); + st->add_index(p_polygon_vertex[0]); + return; + } else if (polygon_vertex_count == 3) { + // triangle to triangle + st->add_index(p_polygon_vertex[0]); + st->add_index(p_polygon_vertex[2]); + st->add_index(p_polygon_vertex[1]); + return; + } else if (polygon_vertex_count == 4) { + // quad to triangle - this code is awesome for import times + // it prevents triangles being generated slowly + st->add_index(p_polygon_vertex[0]); + st->add_index(p_polygon_vertex[2]); + st->add_index(p_polygon_vertex[1]); + st->add_index(p_polygon_vertex[2]); + st->add_index(p_polygon_vertex[0]); + st->add_index(p_polygon_vertex[3]); + return; + } else { + // non triangulated - we must run the triangulation algorithm + bool is_simple_convex = false; + // this code is 'slow' but required it triangulates all the unsupported geometry. + // Doesn't allow for bigger polygons because those are unlikely be convex + if (polygon_vertex_count <= 6) { + // Start from true, check if it's false. + is_simple_convex = true; + Vector3 first_vec; + for (int i = 0; i < polygon_vertex_count; i += 1) { + const Vector3 p1 = p_vertices[p_surface_vertex_map[p_polygon_vertex[i]]]; + const Vector3 p2 = p_vertices[p_surface_vertex_map[p_polygon_vertex[(i + 1) % polygon_vertex_count]]]; + const Vector3 p3 = p_vertices[p_surface_vertex_map[p_polygon_vertex[(i + 2) % polygon_vertex_count]]]; + + const Vector3 edge1 = p1 - p2; + const Vector3 edge2 = p3 - p2; + + const Vector3 res = edge1.normalized().cross(edge2.normalized()).normalized(); + if (i == 0) { + first_vec = res; + } else { + if (first_vec.dot(res) < 0.0) { + // Ok we found an angle that is not the same dir of the + // others. + is_simple_convex = false; + break; + } + } + } + } + + if (is_simple_convex) { + // This is a convex polygon, so just triangulate it. + for (int i = 0; i < (polygon_vertex_count - 2); i += 1) { + st->add_index(p_polygon_vertex[2 + i]); + st->add_index(p_polygon_vertex[1 + i]); + st->add_index(p_polygon_vertex[0]); + } + return; + } + } + + { + // This is a concave polygon. + + std::vector<Vector3> poly_vertices(polygon_vertex_count); + for (int i = 0; i < polygon_vertex_count; i += 1) { + poly_vertices[i] = p_vertices[p_surface_vertex_map[p_polygon_vertex[i]]]; + } + + const Vector3 poly_norm = get_poly_normal(poly_vertices); + if (poly_norm.length_squared() <= CMP_EPSILON) { + ERR_FAIL_COND_MSG(poly_norm.length_squared() <= CMP_EPSILON, "The normal of this poly was not computed. Is this FBX file corrupted."); + } + + // Select the plan coordinate. + int axis_1_coord = 0; + int axis_2_coord = 1; + { + real_t inv = poly_norm.z; + + const real_t axis_x = ABS(poly_norm.x); + const real_t axis_y = ABS(poly_norm.y); + const real_t axis_z = ABS(poly_norm.z); + + if (axis_x > axis_y) { + if (axis_x > axis_z) { + // For the most part the normal point toward X. + axis_1_coord = 1; + axis_2_coord = 2; + inv = poly_norm.x; + } + } else if (axis_y > axis_z) { + // For the most part the normal point toward Y. + axis_1_coord = 2; + axis_2_coord = 0; + inv = poly_norm.y; + } + + // Swap projection axes to take the negated projection vector into account + if (inv < 0.0f) { + SWAP(axis_1_coord, axis_2_coord); + } + } + + TriangulatorPoly triangulator_poly; + triangulator_poly.Init(polygon_vertex_count); + std::vector<Vector2> projected_vertices(polygon_vertex_count); + for (int i = 0; i < polygon_vertex_count; i += 1) { + const Vector2 pv(poly_vertices[i][axis_1_coord], poly_vertices[i][axis_2_coord]); + projected_vertices[i] = pv; + triangulator_poly.GetPoint(i) = pv; + } + triangulator_poly.SetOrientation(TRIANGULATOR_CCW); + + List<TriangulatorPoly> out_poly; + + TriangulatorPartition triangulator_partition; + if (triangulator_partition.Triangulate_OPT(&triangulator_poly, &out_poly) == 0) { // Good result. + if (triangulator_partition.Triangulate_EC(&triangulator_poly, &out_poly) == 0) { // Medium result. + if (triangulator_partition.Triangulate_MONO(&triangulator_poly, &out_poly) == 0) { // Really poor result. + ERR_FAIL_MSG("The triangulation of this polygon failed, please try to triangulate your mesh or check if it has broken polygons."); + } + } + } + + std::vector<Vector2> tris(out_poly.size()); + for (List<TriangulatorPoly>::Element *I = out_poly.front(); I; I = I->next()) { + TriangulatorPoly &tp = I->get(); + + ERR_FAIL_COND_MSG(tp.GetNumPoints() != 3, "The triangulator retuned more points, how this is possible?"); + // Find Index + for (int i = 2; i >= 0; i -= 1) { + const Vector2 vertex = tp.GetPoint(i); + bool done = false; + // Find Index + for (int y = 0; y < polygon_vertex_count; y += 1) { + if ((projected_vertices[y] - vertex).length_squared() <= CMP_EPSILON) { + // This seems the right vertex + st->add_index(p_polygon_vertex[y]); + done = true; + break; + } + } + ERR_FAIL_COND(done == false); + } + } + } +} + +void FBXMeshData::gen_weight_info(Ref<SurfaceTool> st, Vertex vertex_id) const { + if (vertex_weights.empty()) { + return; + } + + if (vertex_weights.has(vertex_id)) { + // Let's extract the weight info. + const VertexWeightMapping *vm = vertex_weights.getptr(vertex_id); + st->set_weights(vm->weights); + st->set_bones(vm->bones); + } +} + +int FBXMeshData::get_vertex_from_polygon_vertex(const std::vector<int> &p_polygon_indices, int p_index) const { + if (p_index < 0 || p_index >= (int)p_polygon_indices.size()) { + return -1; + } + + const int vertex = p_polygon_indices[p_index]; + if (vertex >= 0) { + return vertex; + } else { + // Negative numbers are the end of the face, reversing the bits is + // possible to obtain the positive correct vertex number. + return ~vertex; + } +} + +bool FBXMeshData::is_end_of_polygon(const std::vector<int> &p_polygon_indices, int p_index) const { + if (p_index < 0 || p_index >= (int)p_polygon_indices.size()) { + return false; + } + + const int vertex = p_polygon_indices[p_index]; + + // If the index is negative this is the end of the Polygon. + return vertex < 0; +} + +bool FBXMeshData::is_start_of_polygon(const std::vector<int> &p_polygon_indices, int p_index) const { + if (p_index < 0 || p_index >= (int)p_polygon_indices.size()) { + return false; + } + + if (p_index == 0) { + return true; + } + + // If the previous indices is negative this is the begin of a new Polygon. + return p_polygon_indices[p_index - 1] < 0; +} + +int FBXMeshData::count_polygons(const std::vector<int> &p_polygon_indices) const { + // The negative numbers define the end of the polygon. Counting the amount of + // negatives the numbers of polygons are obtained. + int count = 0; + for (size_t i = 0; i < p_polygon_indices.size(); i += 1) { + if (p_polygon_indices[i] < 0) { + count += 1; + } + } + return count; +} + +template <class R, class T> +HashMap<int, R> FBXMeshData::extract_per_vertex_data( + int p_vertex_count, + const std::vector<FBXDocParser::MeshGeometry::Edge> &p_edge_map, + const std::vector<int> &p_mesh_indices, + const FBXDocParser::MeshGeometry::MappingData<T> &p_mapping_data, + R (*collector_function)(const Vector<VertexData<T>> *p_vertex_data, R p_fall_back), + R p_fall_back) const { + /* When index_to_direct is set + * index size is 184 ( contains index for the data array [values 0, 96] ) + * data size is 96 (contains uv coordinates) + * this means index is simple data reduction basically + */ + //// + if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct && p_mapping_data.index.size() == 0) { + print_verbose("debug count: index size: " + itos(p_mapping_data.index.size()) + ", data size: " + itos(p_mapping_data.data.size())); + print_verbose("vertex indices count: " + itos(p_mesh_indices.size())); + print_verbose("Edge map size: " + itos(p_edge_map.size())); + } + + ERR_FAIL_COND_V_MSG(p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct && p_mapping_data.index.size() == 0, (HashMap<int, R>()), "FBX importer needs to map correctly to this field, please specify the override index name to fix this problem!"); + ERR_FAIL_COND_V_MSG(p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index && p_mapping_data.index.size() == 0, (HashMap<int, R>()), "The FBX seems corrupted"); + + // Aggregate vertex data. + HashMap<Vertex, Vector<VertexData<T>>> aggregate_vertex_data; + + switch (p_mapping_data.map_type) { + case FBXDocParser::MeshGeometry::MapType::none: { + // No data nothing to do. + return (HashMap<int, R>()); + } + case FBXDocParser::MeshGeometry::MapType::vertex: { + ERR_FAIL_COND_V_MSG(p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct, (HashMap<int, R>()), "We will support in future"); + + if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::direct) { + // The data is mapped per vertex directly. + ERR_FAIL_COND_V_MSG((int)p_mapping_data.data.size() != p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR01"); + for (size_t vertex_index = 0; vertex_index < p_mapping_data.data.size(); vertex_index += 1) { + aggregate_vertex_data[vertex_index].push_back({ -1, p_mapping_data.data[vertex_index] }); + } + } else { + // The data is mapped per vertex using a reference. + // The indices array, contains a *reference_id for each vertex. + // * Note that the reference_id is the id of data into the data array. + // + // https://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_class_fbx_layer_element_html + ERR_FAIL_COND_V_MSG((int)p_mapping_data.index.size() != p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR02"); + for (size_t vertex_index = 0; vertex_index < p_mapping_data.index.size(); vertex_index += 1) { + ERR_FAIL_INDEX_V_MSG(p_mapping_data.index[vertex_index], (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR03."); + aggregate_vertex_data[vertex_index].push_back({ -1, p_mapping_data.data[p_mapping_data.index[vertex_index]] }); + } + } + } break; + case FBXDocParser::MeshGeometry::MapType::polygon_vertex: { + if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct) { + // The data is mapped using each index from the indexes array then direct to the data (data reduction algorithm) + ERR_FAIL_COND_V_MSG((int)p_mesh_indices.size() != (int)p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR04"); + int polygon_id = -1; + for (size_t polygon_vertex_index = 0; polygon_vertex_index < p_mapping_data.index.size(); polygon_vertex_index += 1) { + if (is_start_of_polygon(p_mesh_indices, polygon_vertex_index)) { + polygon_id += 1; + } + const int vertex_index = get_vertex_from_polygon_vertex(p_mesh_indices, polygon_vertex_index); + ERR_FAIL_COND_V_MSG(vertex_index < 0, (HashMap<int, R>()), "FBX file corrupted: #ERR05"); + ERR_FAIL_COND_V_MSG(vertex_index >= p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR06"); + const int index_to_direct = p_mapping_data.index[polygon_vertex_index]; + T value = p_mapping_data.data[index_to_direct]; + aggregate_vertex_data[vertex_index].push_back({ polygon_id, value }); + } + } else if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::direct) { + // The data are mapped per polygon vertex directly. + ERR_FAIL_COND_V_MSG((int)p_mesh_indices.size() != (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR04"); + int polygon_id = -1; + for (size_t polygon_vertex_index = 0; polygon_vertex_index < p_mapping_data.data.size(); polygon_vertex_index += 1) { + if (is_start_of_polygon(p_mesh_indices, polygon_vertex_index)) { + polygon_id += 1; + } + const int vertex_index = get_vertex_from_polygon_vertex(p_mesh_indices, polygon_vertex_index); + ERR_FAIL_COND_V_MSG(vertex_index < 0, (HashMap<int, R>()), "FBX file corrupted: #ERR05"); + ERR_FAIL_COND_V_MSG(vertex_index >= p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR06"); + + aggregate_vertex_data[vertex_index].push_back({ polygon_id, p_mapping_data.data[polygon_vertex_index] }); + } + } else { + // The data is mapped per polygon_vertex using a reference. + // The indices array, contains a *reference_id for each polygon_vertex. + // * Note that the reference_id is the id of data into the data array. + // + // https://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_class_fbx_layer_element_html + ERR_FAIL_COND_V_MSG(p_mesh_indices.size() != p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR7"); + int polygon_id = -1; + for (size_t polygon_vertex_index = 0; polygon_vertex_index < p_mapping_data.index.size(); polygon_vertex_index += 1) { + if (is_start_of_polygon(p_mesh_indices, polygon_vertex_index)) { + polygon_id += 1; + } + const int vertex_index = get_vertex_from_polygon_vertex(p_mesh_indices, polygon_vertex_index); + ERR_FAIL_COND_V_MSG(vertex_index < 0, (HashMap<int, R>()), "FBX file corrupted: #ERR8"); + ERR_FAIL_COND_V_MSG(vertex_index >= p_vertex_count, (HashMap<int, R>()), "FBX file seems corrupted: #ERR9."); + ERR_FAIL_COND_V_MSG(p_mapping_data.index[polygon_vertex_index] < 0, (HashMap<int, R>()), "FBX file seems corrupted: #ERR10."); + ERR_FAIL_COND_V_MSG(p_mapping_data.index[polygon_vertex_index] >= (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR11."); + aggregate_vertex_data[vertex_index].push_back({ polygon_id, p_mapping_data.data[p_mapping_data.index[polygon_vertex_index]] }); + } + } + } break; + case FBXDocParser::MeshGeometry::MapType::polygon: { + if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::direct) { + // The data are mapped per polygon directly. + const int polygon_count = count_polygons(p_mesh_indices); + ERR_FAIL_COND_V_MSG(polygon_count != (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR12"); + + // Advance each polygon vertex, each new polygon advance the polygon index. + int polygon_index = -1; + for (size_t polygon_vertex_index = 0; + polygon_vertex_index < p_mesh_indices.size(); + polygon_vertex_index += 1) { + if (is_start_of_polygon(p_mesh_indices, polygon_vertex_index)) { + polygon_index += 1; + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR13"); + } + + const int vertex_index = get_vertex_from_polygon_vertex(p_mesh_indices, polygon_vertex_index); + ERR_FAIL_INDEX_V_MSG(vertex_index, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR14"); + + aggregate_vertex_data[vertex_index].push_back({ polygon_index, p_mapping_data.data[polygon_index] }); + } + ERR_FAIL_COND_V_MSG((polygon_index + 1) != polygon_count, (HashMap<int, R>()), "FBX file seems corrupted: #ERR16. Not all Polygons are present in the file."); + } else { + // The data is mapped per polygon using a reference. + // The indices array, contains a *reference_id for each polygon. + // * Note that the reference_id is the id of data into the data array. + // + // https://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_class_fbx_layer_element_html + const int polygon_count = count_polygons(p_mesh_indices); + ERR_FAIL_COND_V_MSG(polygon_count != (int)p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR17"); + + // Advance each polygon vertex, each new polygon advance the polygon index. + int polygon_index = -1; + for (size_t polygon_vertex_index = 0; + polygon_vertex_index < p_mesh_indices.size(); + polygon_vertex_index += 1) { + if (is_start_of_polygon(p_mesh_indices, polygon_vertex_index)) { + polygon_index += 1; + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR18"); + ERR_FAIL_INDEX_V_MSG(p_mapping_data.index[polygon_index], (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR19"); + } + + const int vertex_index = get_vertex_from_polygon_vertex(p_mesh_indices, polygon_vertex_index); + ERR_FAIL_INDEX_V_MSG(vertex_index, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR20"); + + aggregate_vertex_data[vertex_index].push_back({ polygon_index, p_mapping_data.data[p_mapping_data.index[polygon_index]] }); + } + ERR_FAIL_COND_V_MSG((polygon_index + 1) != polygon_count, (HashMap<int, R>()), "FBX file seems corrupted: #ERR22. Not all Polygons are present in the file."); + } + } break; + case FBXDocParser::MeshGeometry::MapType::edge: { + if (p_mapping_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::direct) { + // The data are mapped per edge directly. + ERR_FAIL_COND_V_MSG(p_edge_map.size() != p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR23"); + for (size_t edge_index = 0; edge_index < p_mapping_data.data.size(); edge_index += 1) { + const FBXDocParser::MeshGeometry::Edge edge = FBXDocParser::MeshGeometry::get_edge(p_edge_map, edge_index); + ERR_FAIL_INDEX_V_MSG(edge.vertex_0, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR24"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_1, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR25"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_0, (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR26"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_1, (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR27"); + aggregate_vertex_data[edge.vertex_0].push_back({ -1, p_mapping_data.data[edge_index] }); + aggregate_vertex_data[edge.vertex_1].push_back({ -1, p_mapping_data.data[edge_index] }); + } + } else { + // The data is mapped per edge using a reference. + // The indices array, contains a *reference_id for each polygon. + // * Note that the reference_id is the id of data into the data array. + // + // https://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_class_fbx_layer_element_html + ERR_FAIL_COND_V_MSG(p_edge_map.size() != p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file seems corrupted: #ERR28"); + for (size_t edge_index = 0; edge_index < p_mapping_data.data.size(); edge_index += 1) { + const FBXDocParser::MeshGeometry::Edge edge = FBXDocParser::MeshGeometry::get_edge(p_edge_map, edge_index); + ERR_FAIL_INDEX_V_MSG(edge.vertex_0, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR29"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_1, p_vertex_count, (HashMap<int, R>()), "FBX file corrupted: #ERR30"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_0, (int)p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR31"); + ERR_FAIL_INDEX_V_MSG(edge.vertex_1, (int)p_mapping_data.index.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR32"); + ERR_FAIL_INDEX_V_MSG(p_mapping_data.index[edge.vertex_0], (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR33"); + ERR_FAIL_INDEX_V_MSG(p_mapping_data.index[edge.vertex_1], (int)p_mapping_data.data.size(), (HashMap<int, R>()), "FBX file corrupted: #ERR34"); + aggregate_vertex_data[edge.vertex_0].push_back({ -1, p_mapping_data.data[p_mapping_data.index[edge_index]] }); + aggregate_vertex_data[edge.vertex_1].push_back({ -1, p_mapping_data.data[p_mapping_data.index[edge_index]] }); + } + } + } break; + case FBXDocParser::MeshGeometry::MapType::all_the_same: { + // No matter the mode, no matter the data size; The first always win + // and is set to all the vertices. + ERR_FAIL_COND_V_MSG(p_mapping_data.data.size() <= 0, (HashMap<int, R>()), "FBX file seems corrupted: #ERR35"); + if (p_mapping_data.data.size() > 0) { + for (int vertex_index = 0; vertex_index < p_vertex_count; vertex_index += 1) { + aggregate_vertex_data[vertex_index].push_back({ -1, p_mapping_data.data[0] }); + } + } + } break; + } + + if (aggregate_vertex_data.size() == 0) { + return (HashMap<int, R>()); + } + + // A map is used because turns out that the some FBX file are not well organized + // with vertices well compacted. Using a map allows avoid those issues. + HashMap<Vertex, R> result; + + // Aggregate the collected data. + for (const Vertex *index = aggregate_vertex_data.next(nullptr); index != nullptr; index = aggregate_vertex_data.next(index)) { + Vector<VertexData<T>> *aggregated_vertex = aggregate_vertex_data.getptr(*index); + // This can't be null because we are just iterating. + CRASH_COND(aggregated_vertex == nullptr); + + ERR_FAIL_INDEX_V_MSG(0, aggregated_vertex->size(), (HashMap<int, R>()), "The FBX file is corrupted, No valid data for this vertex index."); + result[*index] = collector_function(aggregated_vertex, p_fall_back); + } + + // Sanitize the data now, if the file is broken we can try import it anyway. + bool problem_found = false; + for (size_t i = 0; i < p_mesh_indices.size(); i += 1) { + const Vertex vertex = get_vertex_from_polygon_vertex(p_mesh_indices, i); + if (result.has(vertex) == false) { + result[vertex] = p_fall_back; + problem_found = true; + } + } + if (problem_found) { + WARN_PRINT("Some data is missing, this FBX file may be corrupted: #WARN0."); + } + + return result; +} + +template <class T> +HashMap<int, T> FBXMeshData::extract_per_polygon( + int p_vertex_count, + const std::vector<int> &p_polygon_indices, + const FBXDocParser::MeshGeometry::MappingData<T> &p_fbx_data, + T p_fallback_value) const { + ERR_FAIL_COND_V_MSG(p_fbx_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct && p_fbx_data.data.size() == 0, (HashMap<int, T>()), "invalid index to direct array"); + ERR_FAIL_COND_V_MSG(p_fbx_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index && p_fbx_data.index.size() == 0, (HashMap<int, T>()), "The FBX seems corrupted"); + + const int polygon_count = count_polygons(p_polygon_indices); + + // Aggregate vertex data. + HashMap<int, Vector<T>> aggregate_polygon_data; + + switch (p_fbx_data.map_type) { + case FBXDocParser::MeshGeometry::MapType::none: { + // No data nothing to do. + return (HashMap<int, T>()); + } + case FBXDocParser::MeshGeometry::MapType::vertex: { + ERR_FAIL_V_MSG((HashMap<int, T>()), "This data can't be extracted and organized per polygon, since into the FBX is mapped per vertex. This should not happen."); + } break; + case FBXDocParser::MeshGeometry::MapType::polygon_vertex: { + ERR_FAIL_V_MSG((HashMap<int, T>()), "This data can't be extracted and organized per polygon, since into the FBX is mapped per polygon vertex. This should not happen."); + } break; + case FBXDocParser::MeshGeometry::MapType::polygon: { + if (p_fbx_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::index_to_direct) { + // The data is stored efficiently index_to_direct allows less data in the FBX file. + for (int polygon_index = 0; + polygon_index < polygon_count; + polygon_index += 1) { + if (p_fbx_data.index.size() == 0) { + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_fbx_data.data.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR62"); + aggregate_polygon_data[polygon_index].push_back(p_fbx_data.data[polygon_index]); + } else { + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_fbx_data.index.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR62"); + + const int index_to_direct = p_fbx_data.index[polygon_index]; + T value = p_fbx_data.data[index_to_direct]; + aggregate_polygon_data[polygon_index].push_back(value); + } + } + } else if (p_fbx_data.ref_type == FBXDocParser::MeshGeometry::ReferenceType::direct) { + // The data are mapped per polygon directly. + ERR_FAIL_COND_V_MSG(polygon_count != (int)p_fbx_data.data.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR51"); + + // Advance each polygon vertex, each new polygon advance the polygon index. + for (int polygon_index = 0; + polygon_index < polygon_count; + polygon_index += 1) { + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_fbx_data.data.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR52"); + aggregate_polygon_data[polygon_index].push_back(p_fbx_data.data[polygon_index]); + } + } else { + // The data is mapped per polygon using a reference. + // The indices array, contains a *reference_id for each polygon. + // * Note that the reference_id is the id of data into the data array. + // + // https://help.autodesk.com/view/FBX/2017/ENU/?guid=__cpp_ref_class_fbx_layer_element_html + ERR_FAIL_COND_V_MSG(polygon_count != (int)p_fbx_data.index.size(), (HashMap<int, T>()), "FBX file seems corrupted: #ERR52"); + + // Advance each polygon vertex, each new polygon advance the polygon index. + for (int polygon_index = 0; + polygon_index < polygon_count; + polygon_index += 1) { + ERR_FAIL_INDEX_V_MSG(polygon_index, (int)p_fbx_data.index.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR53"); + ERR_FAIL_INDEX_V_MSG(p_fbx_data.index[polygon_index], (int)p_fbx_data.data.size(), (HashMap<int, T>()), "FBX file is corrupted: #ERR54"); + aggregate_polygon_data[polygon_index].push_back(p_fbx_data.data[p_fbx_data.index[polygon_index]]); + } + } + } break; + case FBXDocParser::MeshGeometry::MapType::edge: { + ERR_FAIL_V_MSG((HashMap<int, T>()), "This data can't be extracted and organized per polygon, since into the FBX is mapped per edge. This should not happen."); + } break; + case FBXDocParser::MeshGeometry::MapType::all_the_same: { + // No matter the mode, no matter the data size; The first always win + // and is set to all the vertices. + ERR_FAIL_COND_V_MSG(p_fbx_data.data.size() <= 0, (HashMap<int, T>()), "FBX file seems corrupted: #ERR55"); + if (p_fbx_data.data.size() > 0) { + for (int polygon_index = 0; polygon_index < polygon_count; polygon_index += 1) { + aggregate_polygon_data[polygon_index].push_back(p_fbx_data.data[0]); + } + } + } break; + } + + if (aggregate_polygon_data.size() == 0) { + return (HashMap<int, T>()); + } + + // A map is used because turns out that the some FBX file are not well organized + // with vertices well compacted. Using a map allows avoid those issues. + HashMap<int, T> polygons; + + // Take the first value for each vertex. + for (const Vertex *index = aggregate_polygon_data.next(nullptr); index != nullptr; index = aggregate_polygon_data.next(index)) { + Vector<T> *aggregated_polygon = aggregate_polygon_data.getptr(*index); + // This can't be null because we are just iterating. + CRASH_COND(aggregated_polygon == nullptr); + + ERR_FAIL_INDEX_V_MSG(0, (int)aggregated_polygon->size(), (HashMap<int, T>()), "The FBX file is corrupted, No valid data for this polygon index."); + + // Validate the final value. + polygons[*index] = (*aggregated_polygon)[0]; + } + + // Sanitize the data now, if the file is broken we can try import it anyway. + bool problem_found = false; + for (int polygon_i = 0; polygon_i < polygon_count; polygon_i += 1) { + if (polygons.has(polygon_i) == false) { + polygons[polygon_i] = p_fallback_value; + problem_found = true; + } + } + if (problem_found) { + WARN_PRINT("Some data is missing, this FBX file may be corrupted: #WARN1."); + } + + return polygons; +} + +void FBXMeshData::extract_morphs(const FBXDocParser::MeshGeometry *mesh_geometry, HashMap<String, MorphVertexData> &r_data) { + r_data.clear(); + + const int vertex_count = mesh_geometry->get_vertices().size(); + + for (const FBXDocParser::BlendShape *blend_shape : mesh_geometry->get_blend_shapes()) { + for (const FBXDocParser::BlendShapeChannel *blend_shape_channel : blend_shape->BlendShapeChannels()) { + const std::vector<const FBXDocParser::ShapeGeometry *> &shape_geometries = blend_shape_channel->GetShapeGeometries(); + for (const FBXDocParser::ShapeGeometry *shape_geometry : shape_geometries) { + String morph_name = ImportUtils::FBXAnimMeshName(shape_geometry->Name()).c_str(); + if (morph_name.empty()) { + morph_name = "morph"; + } + + // TODO we have only these?? + const std::vector<unsigned int> &morphs_vertex_indices = shape_geometry->GetIndices(); + const std::vector<Vector3> &morphs_vertices = shape_geometry->GetVertices(); + const std::vector<Vector3> &morphs_normals = shape_geometry->GetNormals(); + + ERR_FAIL_COND_MSG((int)morphs_vertex_indices.size() > vertex_count, "The FBX file is corrupted: #ERR103"); + ERR_FAIL_COND_MSG(morphs_vertex_indices.size() != morphs_vertices.size(), "The FBX file is corrupted: #ERR104"); + ERR_FAIL_COND_MSG((int)morphs_vertices.size() > vertex_count, "The FBX file is corrupted: #ERR105"); + ERR_FAIL_COND_MSG(morphs_normals.size() != 0 && morphs_normals.size() != morphs_vertices.size(), "The FBX file is corrupted: #ERR106"); + + if (r_data.has(morph_name) == false) { + // This morph doesn't exist yet. + // Create it. + MorphVertexData md; + md.vertices.resize(vertex_count); + md.normals.resize(vertex_count); + r_data.set(morph_name, md); + } + + MorphVertexData *data = r_data.getptr(morph_name); + Vector3 *data_vertices_ptr = data->vertices.ptrw(); + Vector3 *data_normals_ptr = data->normals.ptrw(); + + for (int i = 0; i < (int)morphs_vertex_indices.size(); i += 1) { + const Vertex vertex = morphs_vertex_indices[i]; + + ERR_FAIL_INDEX_MSG(vertex, vertex_count, "The blend shapes of this FBX file are corrupted. It has a not valid vertex."); + + data_vertices_ptr[vertex] = morphs_vertices[i]; + + if (morphs_normals.size() != 0) { + data_normals_ptr[vertex] = morphs_normals[i]; + } + } + } + } + } +} diff --git a/modules/fbx/data/fbx_mesh_data.h b/modules/fbx/data/fbx_mesh_data.h new file mode 100644 index 0000000000..b6e87bb9e2 --- /dev/null +++ b/modules/fbx/data/fbx_mesh_data.h @@ -0,0 +1,184 @@ +/*************************************************************************/ +/* fbx_mesh_data.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_MESH_DATA_H +#define FBX_MESH_DATA_H + +#include "core/templates/hash_map.h" +#include "editor/import/resource_importer_scene.h" +#include "editor/import/scene_importer_mesh_node_3d.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/resources/surface_tool.h" + +#include "fbx_bone.h" +#include "fbx_parser/FBXMeshGeometry.h" +#include "import_state.h" +#include "tools/import_utils.h" + +struct FBXNode; +struct FBXMeshData; +struct FBXBone; +struct ImportState; + +struct VertexWeightMapping { + Vector<real_t> weights; + Vector<int> bones; + // This extra vector is used because the bone id is computed in a second step. + // TODO Get rid of this extra step is a good idea. + Vector<Ref<FBXBone>> bones_ref; +}; + +template <class T> +struct VertexData { + int polygon_index; + T data; +}; + +// Caches mesh information and instantiates meshes for you using helper functions. +struct FBXMeshData : Reference { + struct MorphVertexData { + // TODO we have only these?? + /// Each element is a vertex. Not supposed to be void. + Vector<Vector3> vertices; + /// Each element is a vertex. Not supposed to be void. + Vector<Vector3> normals; + }; + + // FIXME: remove this is a hack for testing only + mutable const FBXDocParser::MeshGeometry *mesh_geometry = nullptr; + + Ref<FBXNode> mesh_node = nullptr; + /// vertex id, Weight Info + /// later: perf we can use array here + HashMap<int, VertexWeightMapping> vertex_weights; + + // translate fbx mesh data from document context to FBX Mesh Geometry Context + bool valid_weight_indexes = false; + + EditorSceneImporterMeshNode3D *create_fbx_mesh(const ImportState &state, const FBXDocParser::MeshGeometry *p_mesh_geometry, const FBXDocParser::Model *model, bool use_compression); + + void gen_weight_info(Ref<SurfaceTool> st, int vertex_id) const; + + /* mesh maximum weight count */ + bool valid_weight_count = false; + int max_weight_count = 0; + uint64_t armature_id = 0; + bool valid_armature_id = false; + EditorSceneImporterMeshNode3D *godot_mesh_instance = nullptr; + +private: + void sanitize_vertex_weights(const ImportState &state); + + /// Make sure to reorganize the vertices so that the correct UV is taken. + /// This step is needed because differently from the normal, that can be + /// combined, the UV may need its own triangle because sometimes they have + /// really different UV for the same vertex but different polygon. + /// This function make sure to add another vertex for those UVS. + void reorganize_vertices( + std::vector<int> &r_polygon_indices, + std::vector<Vector3> &r_vertices, + HashMap<int, Vector3> &r_normals, + HashMap<int, Vector2> &r_uv_1, + HashMap<int, Vector2> &r_uv_2, + HashMap<int, Color> &r_color, + HashMap<String, MorphVertexData> &r_morphs, + HashMap<int, HashMap<int, Vector3>> &r_normals_raw, + HashMap<int, HashMap<int, Color>> &r_colors_raw, + HashMap<int, HashMap<int, Vector2>> &r_uv_1_raw, + HashMap<int, HashMap<int, Vector2>> &r_uv_2_raw); + + void add_vertex( + const ImportState &state, + Ref<SurfaceTool> p_surface_tool, + real_t p_scale, + int p_vertex, + const std::vector<Vector3> &p_vertices_position, + const HashMap<int, Vector3> &p_normals, + const HashMap<int, Vector2> &p_uvs_0, + const HashMap<int, Vector2> &p_uvs_1, + const HashMap<int, Color> &p_colors, + const Vector3 &p_morph_value = Vector3(), + const Vector3 &p_morph_normal = Vector3()); + + void triangulate_polygon(Ref<SurfaceTool> st, Vector<int> p_polygon_vertex, Vector<int> p_surface_vertex_map, const std::vector<Vector3> &p_vertices) const; + + /// This function is responsible to convert the FBX polygon vertex to + /// vertex index. + /// The polygon vertices are stored in an array with some negative + /// values. The negative values define the last face index. + /// For example the following `face_array` contains two faces, the former + /// with 3 vertices and the latter with a line: + /// [0,2,-2,3,-5] + /// Parsed as: + /// [0, 2, 1, 3, 4] + /// The negative values are computed using this formula: `(-value) - 1` + /// + /// Returns the vertex index from the poligon vertex. + /// Returns -1 if `p_index` is invalid. + int get_vertex_from_polygon_vertex(const std::vector<int> &p_face_indices, int p_index) const; + + /// Returns true if this polygon_vertex_index is the end of a new polygon. + bool is_end_of_polygon(const std::vector<int> &p_face_indices, int p_index) const; + + /// Returns true if this polygon_vertex_index is the begin of a new polygon. + bool is_start_of_polygon(const std::vector<int> &p_face_indices, int p_index) const; + + /// Returns the number of polygons. + int count_polygons(const std::vector<int> &p_face_indices) const; + + /// Used to extract data from the `MappingData` aligned with vertex. + /// Useful to extract normal/uvs/colors/tangents/etc... + /// If the function fails somehow, it returns an hollow vector and print an error. + template <class R, class T> + HashMap<int, R> extract_per_vertex_data( + int p_vertex_count, + const std::vector<FBXDocParser::MeshGeometry::Edge> &p_edges, + const std::vector<int> &p_mesh_indices, + const FBXDocParser::MeshGeometry::MappingData<T> &p_mapping_data, + R (*collector_function)(const Vector<VertexData<T>> *p_vertex_data, R p_fall_back), + R p_fall_back) const; + + /// Used to extract data from the `MappingData` organized per polygon. + /// Useful to extract the material + /// If the function fails somehow, it returns an hollow vector and print an error. + template <class T> + HashMap<int, T> extract_per_polygon( + int p_vertex_count, + const std::vector<int> &p_face_indices, + const FBXDocParser::MeshGeometry::MappingData<T> &p_fbx_data, + T p_fallback_value) const; + + /// Extracts the morph data and organizes it per vertices. + /// The returned `MorphVertexData` arrays are never something different + /// then the `vertex_count`. + void extract_morphs(const FBXDocParser::MeshGeometry *mesh_geometry, HashMap<String, MorphVertexData> &r_data); +}; + +#endif // FBX_MESH_DATA_H diff --git a/modules/fbx/data/fbx_node.h b/modules/fbx/data/fbx_node.h new file mode 100644 index 0000000000..d8efcaa982 --- /dev/null +++ b/modules/fbx/data/fbx_node.h @@ -0,0 +1,63 @@ +/*************************************************************************/ +/* fbx_node.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_NODE_H +#define FBX_NODE_H + +#include "fbx_skeleton.h" +#include "model_abstraction.h" +#include "pivot_transform.h" + +#include "fbx_parser/FBXDocument.h" + +class Node3D; +struct PivotTransform; + +struct FBXNode : Reference, ModelAbstraction { + uint64_t current_node_id = 0; + String node_name = String(); + Node3D *godot_node = nullptr; + + // used to parent the skeleton once the tree is built. + Ref<FBXSkeleton> skeleton_node = Ref<FBXSkeleton>(); + + void set_parent(Ref<FBXNode> p_parent) { + fbx_parent = p_parent; + } + + void set_pivot_transform(Ref<PivotTransform> p_pivot_transform) { + pivot_transform = p_pivot_transform; + } + + Ref<PivotTransform> pivot_transform = Ref<PivotTransform>(); // local and global xform data + Ref<FBXNode> fbx_parent = Ref<FBXNode>(); // parent node +}; + +#endif // FBX_NODE_H diff --git a/modules/fbx/data/fbx_skeleton.cpp b/modules/fbx/data/fbx_skeleton.cpp new file mode 100644 index 0000000000..93aeffc132 --- /dev/null +++ b/modules/fbx/data/fbx_skeleton.cpp @@ -0,0 +1,123 @@ +/*************************************************************************/ +/* fbx_skeleton.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "fbx_skeleton.h" + +#include "import_state.h" + +#include "tools/import_utils.h" + +void FBXSkeleton::init_skeleton(const ImportState &state) { + int skeleton_bone_count = skeleton_bones.size(); + + if (skeleton == nullptr && skeleton_bone_count > 0) { + skeleton = memnew(Skeleton3D); + + if (fbx_node.is_valid()) { + // cache skeleton attachment for later during node creation + // can't be done until after node hierarchy is built + if (fbx_node->godot_node != state.root) { + fbx_node->skeleton_node = Ref<FBXSkeleton>(this); + print_verbose("cached armature skeleton attachment for node " + fbx_node->node_name); + } else { + // root node must never be a skeleton to prevent cyclic skeletons from being allowed (skeleton in a skeleton) + fbx_node->godot_node->add_child(skeleton); + skeleton->set_owner(state.root_owner); + skeleton->set_name("Skeleton3D"); + print_verbose("created armature skeleton for root"); + } + } else { + memfree(skeleton); + skeleton = nullptr; + print_error("[doc] skeleton has no valid node to parent nodes to - erasing"); + skeleton_bones.clear(); + return; + } + } + + // Make the bone name uniques. + for (int x = 0; x < skeleton_bone_count; x++) { + Ref<FBXBone> bone = skeleton_bones[x]; + if (bone.is_valid()) { + // Make sure the bone name is unique. + const String bone_name = bone->bone_name; + int same_name_count = 0; + for (int y = x; y < skeleton_bone_count; y++) { + Ref<FBXBone> other_bone = skeleton_bones[y]; + if (other_bone.is_valid()) { + if (other_bone->bone_name == bone_name) { + same_name_count += 1; + other_bone->bone_name += "_" + itos(same_name_count); + } + } + } + } + } + + Map<int, Ref<FBXBone>> bone_map; + // implement fbx cluster skin logic here this is where it goes + int bone_count = 0; + for (int x = 0; x < skeleton_bone_count; x++) { + Ref<FBXBone> bone = skeleton_bones[x]; + if (bone.is_valid()) { + skeleton->add_bone(bone->bone_name); + bone->godot_bone_id = bone_count; + bone->fbx_skeleton = Ref<FBXSkeleton>(this); + bone_map.insert(bone_count, bone); + print_verbose("added bone " + itos(bone->bone_id) + " " + bone->bone_name); + bone_count++; + } + } + + ERR_FAIL_COND_MSG(skeleton->get_bone_count() != bone_count, "Not all bones got added, is the file corrupted?"); + + for (Map<int, Ref<FBXBone>>::Element *bone_element = bone_map.front(); bone_element; bone_element = bone_element->next()) { + const Ref<FBXBone> bone = bone_element->value(); + int bone_index = bone_element->key(); + print_verbose("working on bone: " + itos(bone_index) + " bone name:" + bone->bone_name); + + skeleton->set_bone_rest(bone->godot_bone_id, get_unscaled_transform(bone->node->pivot_transform->LocalTransform, state.scale)); + + // lookup parent ID + if (bone->valid_parent && state.fbx_bone_map.has(bone->parent_bone_id)) { + Ref<FBXBone> parent_bone = state.fbx_bone_map[bone->parent_bone_id]; + int bone_id = skeleton->find_bone(parent_bone->bone_name); + if (bone_id != -1) { + skeleton->set_bone_parent(bone_index, bone_id); + } else { + print_error("invalid bone parent: " + parent_bone->bone_name); + } + } else { + if (bone->godot_bone_id != -1) { + skeleton->set_bone_parent(bone_index, -1); // no parent for this bone + } + } + } +} diff --git a/modules/fbx/data/fbx_skeleton.h b/modules/fbx/data/fbx_skeleton.h new file mode 100644 index 0000000000..a2e330114c --- /dev/null +++ b/modules/fbx/data/fbx_skeleton.h @@ -0,0 +1,53 @@ +/*************************************************************************/ +/* fbx_skeleton.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_SKELETON_H +#define FBX_SKELETON_H + +#include "fbx_bone.h" +#include "fbx_node.h" +#include "model_abstraction.h" + +#include "core/object/reference.h" +#include "scene/3d/skeleton_3d.h" + +struct FBXNode; +struct ImportState; +struct FBXBone; + +struct FBXSkeleton : Reference { + Ref<FBXNode> fbx_node = Ref<FBXNode>(); + Vector<Ref<FBXBone>> skeleton_bones = Vector<Ref<FBXBone>>(); + Skeleton3D *skeleton = nullptr; + + void init_skeleton(const ImportState &state); +}; + +#endif // FBX_SKELETON_H diff --git a/modules/assimp/import_state.h b/modules/fbx/data/import_state.h index a1cce6968b..34021ca7b1 100644 --- a/modules/assimp/import_state.h +++ b/modules/fbx/data/import_state.h @@ -28,8 +28,14 @@ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ -#ifndef EDITOR_SCENE_IMPORT_STATE_H -#define EDITOR_SCENE_IMPORT_STATE_H +#ifndef IMPORT_STATE_H +#define IMPORT_STATE_H + +#include "fbx_mesh_data.h" +#include "tools/import_utils.h" +#include "tools/validation_tools.h" + +#include "pivot_transform.h" #include "core/core_bind.h" #include "core/io/resource_importer.h" @@ -43,90 +49,64 @@ #include "scene/resources/animation.h" #include "scene/resources/surface_tool.h" -#include <assimp/matrix4x4.h> -#include <assimp/scene.h> -#include <assimp/types.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/LogStream.hpp> -#include <assimp/Logger.hpp> - -namespace AssimpImporter { -/** Import state is for global scene import data - * This makes the code simpler and contains useful lookups. - */ +#include "modules/fbx/fbx_parser/FBXDocument.h" +#include "modules/fbx/fbx_parser/FBXImportSettings.h" +#include "modules/fbx/fbx_parser/FBXMeshGeometry.h" +#include "modules/fbx/fbx_parser/FBXParser.h" +#include "modules/fbx/fbx_parser/FBXTokenizer.h" +#include "modules/fbx/fbx_parser/FBXUtil.h" + +struct FBXBone; +struct FBXMeshData; +struct FBXNode; +struct FBXSkeleton; + struct ImportState { - String path; - Node3D *root; - const aiScene *assimp_scene; - uint32_t max_bone_weights; - - Map<String, Ref<Mesh>> mesh_cache; - Map<int, Ref<Material>> material_cache; - Map<String, int> light_cache; - Map<String, int> camera_cache; - - // very useful for when you need to ask assimp for the bone mesh - - Map<const aiNode *, Node *> assimp_node_map; - Map<String, Ref<Image>> path_to_image_cache; - - // Generation 3 - determinisitic iteration - // to lower potential recursion errors - List<const aiNode *> nodes; - Map<const aiNode *, Node3D *> flat_node_map; - AnimationPlayer *animation_player; - - // Generation 3 - deterministic armatures - // list of armature nodes - flat and simple to parse - // assimp node, node in godot - List<aiNode *> armature_nodes; - Map<const aiNode *, Skeleton3D *> armature_skeletons; - Map<aiBone *, Skeleton3D *> skeleton_bone_map; - // Generation 3 - deterministic bone handling - // bones from the stack are popped when found - // this means we can detect - // what bones are for other armatures - List<aiBone *> bone_stack; - - // EditorSceneImporter::ImportFlags - uint32_t import_flags; -}; + bool enable_material_import = true; + bool enable_animation_import = true; -struct AssimpImageData { - Ref<Image> raw_image; - Ref<ImageTexture> texture; - aiTextureMapMode map_mode[2]; -}; + Map<StringName, Ref<Texture>> cached_image_searches; + Map<uint64_t, Ref<Material>> cached_materials; + + String path = String(); + Node3D *root_owner = nullptr; + Node3D *root = nullptr; + real_t scale = 0.01; + Ref<FBXNode> fbx_root_node = Ref<FBXNode>(); + // skeleton map - merged automatically when they are on the same x node in the tree so we can merge them automatically. + Map<uint64_t, Ref<FBXSkeleton>> skeleton_map = Map<uint64_t, Ref<FBXSkeleton>>(); + + // nodes on the same level get merged automatically. + //Map<uint64_t, Skeleton3D *> armature_map; + AnimationPlayer *animation_player = nullptr; + + // Generation 4 - Raw document accessing for bone/skin/joint/kLocators + // joints are not necessarily bones but must be merged into the skeleton + // (bone id), bone + Map<uint64_t, Ref<FBXBone>> fbx_bone_map = Map<uint64_t, Ref<FBXBone>>(); // this is the bone name and setup information required for joints + // this will never contain joints only bones attached to a mesh. + + // Generation 4 - Raw document for creating the nodes transforms in the scene + // this is a list of the nodes in the scene + // (id, node) + List<Ref<FBXNode>> fbx_node_list = List<Ref<FBXNode>>(); + + // All nodes which have been created in the scene + // this will not contain the root node of the scene + Map<uint64_t, Ref<FBXNode>> fbx_target_map = Map<uint64_t, Ref<FBXNode>>(); + + // mesh nodes which are created in node / mesh step - used for populating skin poses in MeshSkins + Map<uint64_t, Ref<FBXNode>> MeshNodes = Map<uint64_t, Ref<FBXNode>>(); + // mesh skin map + Map<uint64_t, Ref<Skin>> MeshSkins = Map<uint64_t, Ref<Skin>>(); -/** Recursive state is used to push state into functions instead of specifying them - * This makes the code easier to handle too and add extra arguments without breaking things - */ -struct RecursiveState { - RecursiveState() {} // do not construct :) - RecursiveState( - Transform &_node_transform, - Skeleton3D *_skeleton, - Node3D *_new_node, - String &_node_name, - aiNode *_assimp_node, - Node *_parent_node, - aiBone *_bone) : - node_transform(_node_transform), - skeleton(_skeleton), - new_node(_new_node), - node_name(_node_name), - assimp_node(_assimp_node), - parent_node(_parent_node), - bone(_bone) {} - - Transform node_transform; - Skeleton3D *skeleton = nullptr; - Node3D *new_node = nullptr; - String node_name; - aiNode *assimp_node = nullptr; - Node *parent_node = nullptr; - aiBone *bone = nullptr; + // this is the container for the mesh weight information and eventually + // any mesh data + // but not the skin, just stuff important for rendering + // skin is applied to mesh instance so not really required to be in here yet. + // maybe later + // fbx mesh id, FBXMeshData + Map<uint64_t, Ref<FBXMeshData>> renderer_mesh_data = Map<uint64_t, Ref<FBXMeshData>>(); }; -} // namespace AssimpImporter -#endif // EDITOR_SCENE_IMPORT_STATE_H +#endif // IMPORT_STATE_H diff --git a/modules/fbx/data/model_abstraction.h b/modules/fbx/data/model_abstraction.h new file mode 100644 index 0000000000..b21ab0badb --- /dev/null +++ b/modules/fbx/data/model_abstraction.h @@ -0,0 +1,52 @@ +/*************************************************************************/ +/* model_abstraction.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef MODEL_ABSTRACTION_H +#define MODEL_ABSTRACTION_H + +#include "modules/fbx/fbx_parser/FBXDocument.h" + +struct ModelAbstraction { + mutable const FBXDocParser::Model *fbx_model = nullptr; + + void set_model(const FBXDocParser::Model *p_model) { + fbx_model = p_model; + } + + bool has_model() const { + return fbx_model != nullptr; + } + + const FBXDocParser::Model *get_model() const { + return fbx_model; + } +}; + +#endif // MODEL_ABSTRACTION_H diff --git a/modules/fbx/data/pivot_transform.cpp b/modules/fbx/data/pivot_transform.cpp new file mode 100644 index 0000000000..c12e94097f --- /dev/null +++ b/modules/fbx/data/pivot_transform.cpp @@ -0,0 +1,294 @@ +/*************************************************************************/ +/* pivot_transform.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "pivot_transform.h" + +#include "tools/import_utils.h" + +void PivotTransform::ReadTransformChain() { + const FBXDocParser::PropertyTable *props = fbx_model->Props(); + const FBXDocParser::Model::RotOrder &rot = fbx_model->RotationOrder(); + const FBXDocParser::TransformInheritance &inheritType = fbx_model->InheritType(); + inherit_type = inheritType; // copy the inherit type we need it in the second step. + print_verbose("Model: " + String(fbx_model->Name().c_str()) + " Has inherit type: " + itos(fbx_model->InheritType())); + bool ok = false; + raw_pre_rotation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "PreRotation", ok)); + if (ok) { + pre_rotation = ImportUtils::EulerToQuaternion(rot, ImportUtils::deg2rad(raw_pre_rotation)); + print_verbose("valid pre_rotation: " + raw_pre_rotation + " euler conversion: " + (pre_rotation.get_euler() * (180 / Math_PI))); + } + raw_post_rotation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "PostRotation", ok)); + if (ok) { + post_rotation = ImportUtils::EulerToQuaternion(FBXDocParser::Model::RotOrder_EulerXYZ, ImportUtils::deg2rad(raw_post_rotation)); + print_verbose("valid post_rotation: " + raw_post_rotation + " euler conversion: " + (pre_rotation.get_euler() * (180 / Math_PI))); + } + const Vector3 &RotationPivot = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "RotationPivot", ok)); + if (ok) { + rotation_pivot = ImportUtils::FixAxisConversions(RotationPivot); + } + const Vector3 &RotationOffset = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "RotationOffset", ok)); + if (ok) { + rotation_offset = ImportUtils::FixAxisConversions(RotationOffset); + } + const Vector3 &ScalingOffset = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "ScalingOffset", ok)); + if (ok) { + scaling_offset = ImportUtils::FixAxisConversions(ScalingOffset); + } + const Vector3 &ScalingPivot = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "ScalingPivot", ok)); + if (ok) { + scaling_pivot = ImportUtils::FixAxisConversions(ScalingPivot); + } + const Vector3 &Translation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "Lcl Translation", ok)); + if (ok) { + translation = ImportUtils::FixAxisConversions(Translation); + } + raw_rotation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "Lcl Rotation", ok)); + if (ok) { + rotation = ImportUtils::EulerToQuaternion(rot, ImportUtils::deg2rad(raw_rotation)); + } + const Vector3 &Scaling = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "Lcl Scaling", ok)); + if (ok) { + scaling = Scaling; + } + const Vector3 &GeometricScaling = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "GeometricScaling", ok)); + if (ok) { + geometric_scaling = GeometricScaling; + } else { + geometric_scaling = Vector3(0, 0, 0); + } + + const Vector3 &GeometricRotation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "GeometricRotation", ok)); + if (ok) { + geometric_rotation = ImportUtils::EulerToQuaternion(rot, ImportUtils::deg2rad(GeometricRotation)); + } else { + geometric_rotation = Quat(); + } + + const Vector3 &GeometricTranslation = ImportUtils::safe_import_vector3(FBXDocParser::PropertyGet<Vector3>(props, "GeometricTranslation", ok)); + if (ok) { + geometric_translation = ImportUtils::FixAxisConversions(GeometricTranslation); + } else { + geometric_translation = Vector3(0, 0, 0); + } + + if (geometric_rotation != Quat()) { + print_error("geometric rotation is unsupported!"); + //CRASH_COND(true); + } + + if (!geometric_scaling.is_equal_approx(Vector3(1, 1, 1))) { + print_error("geometric scaling is unsupported!"); + //CRASH_COND(true); + } + + if (!geometric_translation.is_equal_approx(Vector3(0, 0, 0))) { + print_error("geometric translation is unsupported."); + //CRASH_COND(true); + } +} + +Transform PivotTransform::ComputeLocalTransform(Vector3 p_translation, Quat p_rotation, Vector3 p_scaling) const { + Transform T, Roff, Rp, Soff, Sp, S; + + // Here I assume this is the operation which needs done. + // Its WorldTransform * V + + // Origin pivots + T.set_origin(p_translation); + Roff.set_origin(rotation_offset); + Rp.set_origin(rotation_pivot); + Soff.set_origin(scaling_offset); + Sp.set_origin(scaling_pivot); + + // Scaling node + S.scale(p_scaling); + // Rotation pivots + Transform Rpre = Transform(pre_rotation); + Transform R = Transform(p_rotation); + Transform Rpost = Transform(post_rotation); + + return T * Roff * Rp * Rpre * R * Rpost.affine_inverse() * Rp.affine_inverse() * Soff * Sp * S * Sp.affine_inverse(); +} + +Transform PivotTransform::ComputeGlobalTransform(Transform t) const { + Vector3 pos = t.origin; + Vector3 scale = t.basis.get_scale(); + Quat rot = t.basis.get_rotation_quat(); + return ComputeGlobalTransform(pos, rot, scale); +} + +Transform PivotTransform::ComputeLocalTransform(Transform t) const { + Vector3 pos = t.origin; + Vector3 scale = t.basis.get_scale(); + Quat rot = t.basis.get_rotation_quat(); + return ComputeLocalTransform(pos, rot, scale); +} + +Transform PivotTransform::ComputeGlobalTransform(Vector3 p_translation, Quat p_rotation, Vector3 p_scaling) const { + Transform T, Roff, Rp, Soff, Sp, S; + + // Here I assume this is the operation which needs done. + // Its WorldTransform * V + + // Origin pivots + T.set_origin(p_translation); + Roff.set_origin(rotation_offset); + Rp.set_origin(rotation_pivot); + Soff.set_origin(scaling_offset); + Sp.set_origin(scaling_pivot); + + // Scaling node + S.scale(p_scaling); + + // Rotation pivots + Transform Rpre = Transform(pre_rotation); + Transform R = Transform(p_rotation); + Transform Rpost = Transform(post_rotation); + + Transform parent_global_xform; + Transform parent_local_scaling_m; + + if (parent_transform.is_valid()) { + parent_global_xform = parent_transform->GlobalTransform; + parent_local_scaling_m = parent_transform->Local_Scaling_Matrix; + } + + Transform local_rotation_m, parent_global_rotation_m; + Quat parent_global_rotation = parent_global_xform.basis.get_rotation_quat(); + parent_global_rotation_m.basis.set_quat(parent_global_rotation); + local_rotation_m = Rpre * R * Rpost; + + //Basis parent_global_rotation = Basis(parent_global_xform.get_basis().get_rotation_quat().normalized()); + + Transform local_shear_scaling, parent_shear_scaling, parent_shear_rotation, parent_shear_translation; + Vector3 parent_translation = parent_global_xform.get_origin(); + parent_shear_translation.origin = parent_translation; + parent_shear_rotation = parent_shear_translation.affine_inverse() * parent_global_xform; + parent_shear_scaling = parent_global_rotation_m.affine_inverse() * parent_shear_rotation; + local_shear_scaling = S; + + // Inherit type handler - we don't care about T here, just reordering RSrs etc. + Transform global_rotation_scale; + if (inherit_type == FBXDocParser::Transform_RrSs) { + global_rotation_scale = parent_global_rotation_m * local_rotation_m * parent_shear_scaling * local_shear_scaling; + } else if (inherit_type == FBXDocParser::Transform_RSrs) { + global_rotation_scale = parent_global_rotation_m * parent_shear_scaling * local_rotation_m * local_shear_scaling; + } else if (inherit_type == FBXDocParser::Transform_Rrs) { + Transform parent_global_shear_m_noLocal = parent_shear_scaling * parent_local_scaling_m.affine_inverse(); + global_rotation_scale = parent_global_rotation_m * local_rotation_m * parent_global_shear_m_noLocal * local_shear_scaling; + } + Transform local_transform = T * Roff * Rp * Rpre * R * Rpost.affine_inverse() * Rp.affine_inverse() * Soff * Sp * S * Sp.affine_inverse(); + //Transform local_translation_pivoted = Transform(Basis(), LocalTransform.origin); + + // manual hack to force SSC not to be compensated for - until we can handle it properly with tests + return parent_global_xform * local_transform; +} + +void PivotTransform::ComputePivotTransform() { + Transform T, Roff, Rp, Soff, Sp, S; + + // Here I assume this is the operation which needs done. + // Its WorldTransform * V + + // Origin pivots + T.set_origin(translation); + Roff.set_origin(rotation_offset); + Rp.set_origin(rotation_pivot); + Soff.set_origin(scaling_offset); + Sp.set_origin(scaling_pivot); + + // Scaling node + if (!scaling.is_equal_approx(Vector3())) { + S.scale(scaling); + } else { + S.scale(Vector3(1, 1, 1)); + } + Local_Scaling_Matrix = S; // copy for when node / child is looking for the value of this. + + // Rotation pivots + Transform Rpre = Transform(pre_rotation); + Transform R = Transform(rotation); + Transform Rpost = Transform(post_rotation); + + Transform parent_global_xform; + Transform parent_local_scaling_m; + + if (parent_transform.is_valid()) { + parent_global_xform = parent_transform->GlobalTransform; + parent_local_scaling_m = parent_transform->Local_Scaling_Matrix; + } + + Transform local_rotation_m, parent_global_rotation_m; + Quat parent_global_rotation = parent_global_xform.basis.get_rotation_quat(); + parent_global_rotation_m.basis.set_quat(parent_global_rotation); + local_rotation_m = Rpre * R * Rpost; + + //Basis parent_global_rotation = Basis(parent_global_xform.get_basis().get_rotation_quat().normalized()); + + Transform local_shear_scaling, parent_shear_scaling, parent_shear_rotation, parent_shear_translation; + Vector3 parent_translation = parent_global_xform.get_origin(); + parent_shear_translation.origin = parent_translation; + parent_shear_rotation = parent_shear_translation.affine_inverse() * parent_global_xform; + parent_shear_scaling = parent_global_rotation_m.affine_inverse() * parent_shear_rotation; + local_shear_scaling = S; + + // Inherit type handler - we don't care about T here, just reordering RSrs etc. + Transform global_rotation_scale; + if (inherit_type == FBXDocParser::Transform_RrSs) { + global_rotation_scale = parent_global_rotation_m * local_rotation_m * parent_shear_scaling * local_shear_scaling; + } else if (inherit_type == FBXDocParser::Transform_RSrs) { + global_rotation_scale = parent_global_rotation_m * parent_shear_scaling * local_rotation_m * local_shear_scaling; + } else if (inherit_type == FBXDocParser::Transform_Rrs) { + Transform parent_global_shear_m_noLocal = parent_shear_scaling * parent_local_scaling_m.inverse(); + global_rotation_scale = parent_global_rotation_m * local_rotation_m * parent_global_shear_m_noLocal * local_shear_scaling; + } + LocalTransform = Transform(); + LocalTransform = T * Roff * Rp * Rpre * R * Rpost.affine_inverse() * Rp.affine_inverse() * Soff * Sp * S * Sp.affine_inverse(); + + ERR_FAIL_COND_MSG(LocalTransform.basis.determinant() == 0, "invalid scale reset"); + + Transform local_translation_pivoted = Transform(Basis(), LocalTransform.origin); + GlobalTransform = Transform(); + //GlobalTransform = parent_global_xform * LocalTransform; + Transform global_origin = Transform(Basis(), parent_translation); + GlobalTransform = (global_origin * local_translation_pivoted) * global_rotation_scale; + + ImportUtils::debug_xform("local xform calculation", LocalTransform); + print_verbose("scale of node: " + S.basis.get_scale_local()); + print_verbose("---------------------------------------------------------------"); +} + +void PivotTransform::Execute() { + ReadTransformChain(); + ComputePivotTransform(); + + ImportUtils::debug_xform("global xform: ", GlobalTransform); + computed_global_xform = true; +} diff --git a/modules/fbx/data/pivot_transform.h b/modules/fbx/data/pivot_transform.h new file mode 100644 index 0000000000..025c8dd58d --- /dev/null +++ b/modules/fbx/data/pivot_transform.h @@ -0,0 +1,115 @@ +/*************************************************************************/ +/* pivot_transform.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef PIVOT_TRANSFORM_H +#define PIVOT_TRANSFORM_H + +#include "core/math/transform.h" +#include "core/object/reference.h" + +#include "model_abstraction.h" + +#include "fbx_parser/FBXDocument.h" +#include "tools/import_utils.h" + +enum TransformationComp { + TransformationComp_Translation, + TransformationComp_Scaling, + TransformationComp_Rotation, + TransformationComp_RotationOffset, + TransformationComp_RotationPivot, + TransformationComp_PreRotation, + TransformationComp_PostRotation, + TransformationComp_ScalingOffset, + TransformationComp_ScalingPivot, + TransformationComp_GeometricTranslation, + TransformationComp_GeometricRotation, + TransformationComp_GeometricScaling, + TransformationComp_MAXIMUM +}; +// Abstract away pivot data so its simpler to handle +struct PivotTransform : Reference, ModelAbstraction { + // at the end we want to keep geometric_ everything, post and pre rotation + // these are used during animation data processing / keyframe ingestion the rest can be simplified down / out. + Quat pre_rotation = Quat(); + Quat post_rotation = Quat(); + Quat rotation = Quat(); + Quat geometric_rotation = Quat(); + Vector3 rotation_pivot = Vector3(); + Vector3 rotation_offset = Vector3(); + Vector3 scaling_offset = Vector3(1.0, 1.0, 1.0); + Vector3 scaling_pivot = Vector3(1.0, 1.0, 1.0); + Vector3 translation = Vector3(); + Vector3 scaling = Vector3(1.0, 1.0, 1.0); + Vector3 geometric_scaling = Vector3(1.0, 1.0, 1.0); + Vector3 geometric_translation = Vector3(); + + Vector3 raw_rotation = Vector3(); + Vector3 raw_post_rotation = Vector3(); + Vector3 raw_pre_rotation = Vector3(); + + /* Read pivots from the document */ + void ReadTransformChain(); + + void debug_pivot_xform(String p_name) { + print_verbose("debugging node name: " + p_name); + print_verbose("raw rotation: " + raw_rotation * (180 / Math_PI)); + print_verbose("raw pre_rotation " + raw_pre_rotation * (180 / Math_PI)); + print_verbose("raw post_rotation " + raw_post_rotation * (180 / Math_PI)); + } + + Transform ComputeGlobalTransform(Transform t) const; + Transform ComputeLocalTransform(Transform t) const; + Transform ComputeGlobalTransform(Vector3 p_translation, Quat p_rotation, Vector3 p_scaling) const; + Transform ComputeLocalTransform(Vector3 p_translation, Quat p_rotation, Vector3 p_scaling) const; + + /* Extract into xforms and calculate once */ + void ComputePivotTransform(); + + /* Execute the command for the pivot generation */ + void Execute(); + + void set_parent(Ref<PivotTransform> p_parent) { + parent_transform = p_parent; + } + + bool computed_global_xform = false; + Ref<PivotTransform> parent_transform = Ref<PivotTransform>(); + //Transform chain[TransformationComp_MAXIMUM]; + + // cached for later use + Transform GlobalTransform = Transform(); + Transform LocalTransform = Transform(); + Transform Local_Scaling_Matrix = Transform(); // used for inherit type. + Transform GeometricTransform = Transform(); // 3DS max only + FBXDocParser::TransformInheritance inherit_type = FBXDocParser::TransformInheritance_MAX; // maya fbx requires this - sorry <3 +}; + +#endif // PIVOT_TRANSFORM_H diff --git a/modules/fbx/doc_classes/EditorSceneImporterFBX.xml b/modules/fbx/doc_classes/EditorSceneImporterFBX.xml new file mode 100644 index 0000000000..72b6e0f5fd --- /dev/null +++ b/modules/fbx/doc_classes/EditorSceneImporterFBX.xml @@ -0,0 +1,36 @@ +<?xml version="1.0" encoding="UTF-8" ?> +<class name="EditorSceneImporterFBX" inherits="EditorSceneImporter" version="3.2"> + <brief_description> + FBX 3D asset importer. + </brief_description> + <description> + This is an FBX 3D asset importer with full support for most FBX features. + If exporting a FBX scene from Autodesk Maya, use these FBX export settings: + [codeblock] + - Smoothing Groups + - Smooth Mesh + - Triangluate (for meshes with blend shapes) + - Bake Animation + - Resample All + - Deformed Models + - Skins + - Blend Shapes + - Curve Filters + - Constant Key Reducer + - Auto Tangents Only + - *Do not check* Constraints (as it will break the file) + - Can check Embed Media (embeds textures into the exported FBX file) + - Note that when importing embedded media, the texture and mesh will be a single immutable file. + - You will have to re-export then re-import the FBX if the texture has changed. + - Units: Centimeters + - Up Axis: Y + - Binary format in FBX 2017 + [/codeblock] + </description> + <tutorials> + </tutorials> + <methods> + </methods> + <constants> + </constants> +</class> diff --git a/modules/fbx/editor_scene_importer_fbx.cpp b/modules/fbx/editor_scene_importer_fbx.cpp new file mode 100644 index 0000000000..d37befbbfd --- /dev/null +++ b/modules/fbx/editor_scene_importer_fbx.cpp @@ -0,0 +1,1424 @@ +/*************************************************************************/ +/* editor_scene_importer_fbx.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "editor_scene_importer_fbx.h" + +#include "data/fbx_anim_container.h" +#include "data/fbx_material.h" +#include "data/fbx_mesh_data.h" +#include "data/fbx_skeleton.h" +#include "tools/import_utils.h" + +#include "core/io/image_loader.h" +#include "editor/editor_log.h" +#include "editor/editor_node.h" +#include "editor/import/resource_importer_scene.h" +#include "editor/import/scene_importer_mesh_node_3d.h" +#include "scene/3d/bone_attachment_3d.h" +#include "scene/3d/camera_3d.h" +#include "scene/3d/light_3d.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/main/node.h" +#include "scene/resources/material.h" + +#include "fbx_parser/FBXDocument.h" +#include "fbx_parser/FBXImportSettings.h" +#include "fbx_parser/FBXMeshGeometry.h" +#include "fbx_parser/FBXParser.h" +#include "fbx_parser/FBXProperties.h" +#include "fbx_parser/FBXTokenizer.h" + +#include <string> + +void EditorSceneImporterFBX::get_extensions(List<String> *r_extensions) const { + // register FBX as the one and only format for FBX importing + const String import_setting_string = "filesystem/import/fbx/"; + const String fbx_str = "fbx"; + Vector<String> exts; + exts.push_back(fbx_str); + _register_project_setting_import(fbx_str, import_setting_string, exts, r_extensions, + true); +} + +void EditorSceneImporterFBX::_register_project_setting_import(const String generic, + const String import_setting_string, + const Vector<String> &exts, + List<String> *r_extensions, + const bool p_enabled) const { + const String use_generic = "use_" + generic; + _GLOBAL_DEF(import_setting_string + use_generic, p_enabled, true); + if (ProjectSettings::get_singleton()->get(import_setting_string + use_generic)) { + for (int32_t i = 0; i < exts.size(); i++) { + r_extensions->push_back(exts[i]); + } + } +} + +uint32_t EditorSceneImporterFBX::get_import_flags() const { + return IMPORT_SCENE; +} + +Node3D *EditorSceneImporterFBX::import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err) { + // done for performance when re-importing lots of files when testing importer in verbose only! + if (OS::get_singleton()->is_stdout_verbose()) { + EditorLog *log = EditorNode::get_log(); + log->clear(); + } + Error err; + FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); + + ERR_FAIL_COND_V(!f, NULL); + + { + PackedByteArray data; + // broadphase tokenizing pass in which we identify the core + // syntax elements of FBX (brackets, commas, key:value mappings) + FBXDocParser::TokenList tokens; + + bool is_binary = false; + data.resize(f->get_len()); + f->get_buffer(data.ptrw(), data.size()); + PackedByteArray fbx_header; + fbx_header.resize(64); + for (int32_t byte_i = 0; byte_i < 64; byte_i++) { + fbx_header.ptrw()[byte_i] = data.ptr()[byte_i]; + } + + String fbx_header_string; + if (fbx_header.size() >= 0) { + fbx_header_string.parse_utf8((const char *)fbx_header.ptr(), fbx_header.size()); + } + + print_verbose("[doc] opening fbx file: " + p_path); + print_verbose("[doc] fbx header: " + fbx_header_string); + + // safer to check this way as there can be different formatted headers + if (fbx_header_string.find("Kaydara FBX Binary", 0) != -1) { + is_binary = true; + print_verbose("[doc] is binary"); + FBXDocParser::TokenizeBinary(tokens, (const char *)data.ptrw(), (size_t)data.size()); + } else { + print_verbose("[doc] is ascii"); + FBXDocParser::Tokenize(tokens, (const char *)data.ptrw(), (size_t)data.size()); + } + + // The import process explained: + // 1. Tokens are made, these are then taken into the 'parser' below + // 2. The parser constructs 'Elements' and all 'real' FBX Types. + // 3. This creates a problem: shared_ptr ownership, should Elements later 'take ownership' + // 4. No, it shouldn't so we should either a.) use weak ref for elements; but this is not correct. + + // use this information to construct a very rudimentary + // parse-tree representing the FBX scope structure + FBXDocParser::Parser parser(tokens, is_binary); + FBXDocParser::ImportSettings settings; + settings.strictMode = false; + + // this function leaks a lot + FBXDocParser::Document doc(parser, settings); + + // yeah so closing the file is a good idea (prevents readonly states) + f->close(); + + // safety for version handling + if (doc.IsSafeToImport()) { + bool is_blender_fbx = false; + //const FBXDocParser::PropertyPtr app_vendor = p_document->GlobalSettingsPtr()->Props() + // p_document->Creator() + const FBXDocParser::PropertyTable *import_props = doc.GetMetadataProperties(); + const FBXDocParser::PropertyPtr app_name = import_props->Get("Original|ApplicationName"); + const FBXDocParser::PropertyPtr app_vendor = import_props->Get("Original|ApplicationVendor"); + const FBXDocParser::PropertyPtr app_version = import_props->Get("Original|ApplicationVersion"); + // + if (app_name) { + const FBXDocParser::TypedProperty<std::string> *app_name_string = dynamic_cast<const FBXDocParser::TypedProperty<std::string> *>(app_name); + if (app_name_string) { + print_verbose("FBX App Name: " + String(app_name_string->Value().c_str())); + } + } + + if (app_vendor) { + const FBXDocParser::TypedProperty<std::string> *app_vendor_string = dynamic_cast<const FBXDocParser::TypedProperty<std::string> *>(app_vendor); + if (app_vendor_string) { + print_verbose("FBX App Vendor: " + String(app_vendor_string->Value().c_str())); + is_blender_fbx = app_vendor_string->Value().find("Blender") != std::string::npos; + } + } + + if (app_version) { + const FBXDocParser::TypedProperty<std::string> *app_version_string = dynamic_cast<const FBXDocParser::TypedProperty<std::string> *>(app_version); + if (app_version_string) { + print_verbose("FBX App Version: " + String(app_version_string->Value().c_str())); + } + } + + if (is_blender_fbx) { + WARN_PRINT("Blender FBX files will not work properly with keyframes or skeletons until we make fixes stand by."); + } + + Node3D *spatial = _generate_scene(p_path, &doc, p_flags, p_bake_fps, 8); + // todo: move to document shutdown (will need to be validated after moving; this code has been validated already) + for (FBXDocParser::TokenPtr token : tokens) { + if (token) { + delete token; + token = nullptr; + } + } + + return spatial; + + } else { + print_error("Cannot import file: " + p_path + " version of file is unsupported, please re-export in your modelling package file version is: " + itos(doc.FBXVersion())); + } + } + + return memnew(Node3D); +} + +template <class T> +struct EditorSceneImporterAssetImportInterpolate { + T lerp(const T &a, const T &b, float c) const { + return a + (b - a) * c; + } + + T catmull_rom(const T &p0, const T &p1, const T &p2, const T &p3, float t) { + const float t2 = t * t; + const float t3 = t2 * t; + + return 0.5f * ((2.0f * p1) + (-p0 + p2) * t + (2.0f * p0 - 5.0f * p1 + 4.0f * p2 - p3) * t2 + (-p0 + 3.0f * p1 - 3.0f * p2 + p3) * t3); + } + + T bezier(T start, T control_1, T control_2, T end, float t) { + /* Formula from Wikipedia article on Bezier curves. */ + const real_t omt = (1.0 - t); + const real_t omt2 = omt * omt; + const real_t omt3 = omt2 * omt; + const real_t t2 = t * t; + const real_t t3 = t2 * t; + + return start * omt3 + control_1 * omt2 * t * 3.0 + control_2 * omt * t2 * 3.0 + end * t3; + } +}; + +//thank you for existing, partial specialization +template <> +struct EditorSceneImporterAssetImportInterpolate<Quat> { + Quat lerp(const Quat &a, const Quat &b, float c) const { + ERR_FAIL_COND_V(!a.is_normalized(), Quat()); + ERR_FAIL_COND_V(!b.is_normalized(), Quat()); + + return a.slerp(b, c).normalized(); + } + + Quat catmull_rom(const Quat &p0, const Quat &p1, const Quat &p2, const Quat &p3, float c) { + ERR_FAIL_COND_V(!p1.is_normalized(), Quat()); + ERR_FAIL_COND_V(!p2.is_normalized(), Quat()); + + return p1.slerp(p2, c).normalized(); + } + + Quat bezier(Quat start, Quat control_1, Quat control_2, Quat end, float t) { + ERR_FAIL_COND_V(!start.is_normalized(), Quat()); + ERR_FAIL_COND_V(!end.is_normalized(), Quat()); + + return start.slerp(end, t).normalized(); + } +}; + +template <class T> +T EditorSceneImporterFBX::_interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, float p_time, + AssetImportAnimation::Interpolation p_interp) { + //could use binary search, worth it? + int idx = -1; + for (int i = 0; i < p_times.size(); i++) { + if (p_times[i] > p_time) + break; + idx++; + } + + EditorSceneImporterAssetImportInterpolate<T> interp; + + switch (p_interp) { + case AssetImportAnimation::INTERP_LINEAR: { + if (idx == -1) { + return p_values[0]; + } else if (idx >= p_times.size() - 1) { + return p_values[p_times.size() - 1]; + } + + float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + return interp.lerp(p_values[idx], p_values[idx + 1], c); + + } break; + case AssetImportAnimation::INTERP_STEP: { + if (idx == -1) { + return p_values[0]; + } else if (idx >= p_times.size() - 1) { + return p_values[p_times.size() - 1]; + } + + return p_values[idx]; + + } break; + case AssetImportAnimation::INTERP_CATMULLROMSPLINE: { + if (idx == -1) { + return p_values[1]; + } else if (idx >= p_times.size() - 1) { + return p_values[1 + p_times.size() - 1]; + } + + float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + return interp.catmull_rom(p_values[idx - 1], p_values[idx], p_values[idx + 1], p_values[idx + 3], c); + + } break; + case AssetImportAnimation::INTERP_CUBIC_SPLINE: { + if (idx == -1) { + return p_values[1]; + } else if (idx >= p_times.size() - 1) { + return p_values[(p_times.size() - 1) * 3 + 1]; + } + + float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + T from = p_values[idx * 3 + 1]; + T c1 = from + p_values[idx * 3 + 2]; + T to = p_values[idx * 3 + 4]; + T c2 = to + p_values[idx * 3 + 3]; + + return interp.bezier(from, c1, c2, to, c); + + } break; + } + + ERR_FAIL_V(p_values[0]); +} + +Node3D *EditorSceneImporterFBX::_generate_scene( + const String &p_path, + const FBXDocParser::Document *p_document, + const uint32_t p_flags, + int p_bake_fps, + const int32_t p_max_bone_weights) { + ImportState state; + state.path = p_path; + state.animation_player = NULL; + + // create new root node for scene + Node3D *scene_root = memnew(Node3D); + state.root = memnew(Node3D); + state.root_owner = scene_root; // the real scene root... sorry compatibility code is painful... + + state.root->set_name("RootNode"); + scene_root->add_child(state.root); + state.root->set_owner(scene_root); + + state.fbx_root_node.instance(); + state.fbx_root_node->godot_node = state.root; + + // Size relative to cm. + const real_t fbx_unit_scale = p_document->GlobalSettingsPtr()->UnitScaleFactor(); + + print_verbose("FBX unit scale import value: " + rtos(fbx_unit_scale)); + // Set FBX file scale is relative to CM must be converted to M + state.scale = fbx_unit_scale / 100.0; + print_verbose("FBX unit scale is: " + rtos(state.scale)); + + // Enabled by default. + state.enable_material_import = true; + // Enabled by default. + state.enable_animation_import = true; + Ref<FBXNode> root_node; + root_node.instance(); + + // make sure fake noFBXDocParser::PropertyPtr ptrde always has a transform too ;) + Ref<PivotTransform> pivot_transform; + pivot_transform.instance(); + root_node->pivot_transform = pivot_transform; + root_node->node_name = "root node"; + root_node->current_node_id = 0; + root_node->godot_node = state.root; + + // cache this node onto the fbx_target map. + state.fbx_target_map.insert(0, root_node); + + // cache basic node information from FBX document + // grabs all FBX bones + BuildDocumentBones(Ref<FBXBone>(), state, p_document, 0L); + BuildDocumentNodes(Ref<PivotTransform>(), state, p_document, 0L, nullptr); + + // Build document skinning information + + // Algorithm is this: + // Get Deformer: object with "Skin" class. + // Deformer:: has link to Geometry:: (correct mesh for skin) + // Deformer:: has Source which is the SubDeformer:: (e.g. the Cluster) + // Notes at the end it configures the vertex weight mapping. + + for (uint64_t skin_id : p_document->GetSkinIDs()) { + // Validate the parser + FBXDocParser::LazyObject *lazy_skin = p_document->GetObject(skin_id); + ERR_CONTINUE_MSG(lazy_skin == nullptr, "invalid lazy object [serious parser bug]"); + + // Validate the parser + const FBXDocParser::Skin *skin = lazy_skin->Get<FBXDocParser::Skin>(); + ERR_CONTINUE_MSG(skin == nullptr, "invalid skin added to skin list [parser bug]"); + + const std::vector<const FBXDocParser::Connection *> source_to_destination = p_document->GetConnectionsBySourceSequenced(skin_id); + FBXDocParser::MeshGeometry *mesh = nullptr; + uint64_t mesh_id = 0; + + // Most likely only contains the mesh link for the skin + // The mesh geometry. + for (const FBXDocParser::Connection *con : source_to_destination) { + // do something + print_verbose("src: " + itos(con->src)); + FBXDocParser::Object *ob = con->DestinationObject(); + mesh = dynamic_cast<FBXDocParser::MeshGeometry *>(ob); + + if (mesh) { + mesh_id = mesh->ID(); + break; + } + } + + // Validate the mesh exists and was retrieved + ERR_CONTINUE_MSG(mesh_id == 0, "mesh id is invalid"); + const std::vector<const FBXDocParser::Cluster *> clusters = skin->Clusters(); + + // NOTE: this will ONLY work on skinned bones (it is by design.) + // A cluster is a skinned bone so SKINS won't contain unskinned bones so we need to pre-add all bones and parent them in a step beforehand. + for (const FBXDocParser::Cluster *cluster : clusters) { + ERR_CONTINUE_MSG(cluster == nullptr, "invalid bone cluster"); + const uint64_t deformer_id = cluster->ID(); + std::vector<const FBXDocParser::Connection *> connections = p_document->GetConnectionsByDestinationSequenced(deformer_id); + + // Weight data always has a node in the scene lets grab the limb's node in the scene :) (reverse set to true since it's the opposite way around) + const FBXDocParser::ModelLimbNode *limb_node = ProcessDOMConnection<FBXDocParser::ModelLimbNode>(p_document, deformer_id, true); + + ERR_CONTINUE_MSG(limb_node == nullptr, "unable to resolve model for skinned bone"); + + const uint64_t model_id = limb_node->ID(); + + // This will never happen, so if it does you know you fucked up. + ERR_CONTINUE_MSG(!state.fbx_bone_map.has(model_id), "missing LimbNode detected"); + + // new bone instance + Ref<FBXBone> bone_element = state.fbx_bone_map[model_id]; + + // + // Bone Weight Information Configuration + // + + // Cache Weight Information into bone for later usage if you want the raw data. + const std::vector<unsigned int> &indexes = cluster->GetIndices(); + const std::vector<float> &weights = cluster->GetWeights(); + Ref<FBXMeshData> mesh_vertex_data; + + // this data will pre-exist if vertex weight information is found + if (state.renderer_mesh_data.has(mesh_id)) { + mesh_vertex_data = state.renderer_mesh_data[mesh_id]; + } else { + mesh_vertex_data.instance(); + state.renderer_mesh_data.insert(mesh_id, mesh_vertex_data); + } + + mesh_vertex_data->armature_id = bone_element->armature_id; + mesh_vertex_data->valid_armature_id = true; + + //print_verbose("storing mesh vertex data for mesh to use later"); + ERR_CONTINUE_MSG(indexes.size() != weights.size(), "[doc] error mismatch between weight info"); + + for (size_t idx = 0; idx < indexes.size(); idx++) { + const size_t vertex_index = indexes[idx]; + const real_t influence_weight = weights[idx]; + + VertexWeightMapping &vm = mesh_vertex_data->vertex_weights[vertex_index]; + vm.weights.push_back(influence_weight); + vm.bones.push_back(0); // bone id is pushed on here during sanitization phase + vm.bones_ref.push_back(bone_element); + } + + for (const int *vertex_index = mesh_vertex_data->vertex_weights.next(nullptr); + vertex_index != nullptr; + vertex_index = mesh_vertex_data->vertex_weights.next(vertex_index)) { + VertexWeightMapping *vm = mesh_vertex_data->vertex_weights.getptr(*vertex_index); + const int influence_count = vm->weights.size(); + if (influence_count > mesh_vertex_data->max_weight_count) { + mesh_vertex_data->max_weight_count = influence_count; + mesh_vertex_data->valid_weight_count = true; + } + } + + if (mesh_vertex_data->max_weight_count > 4) { + if (mesh_vertex_data->max_weight_count > 8) { + ERR_PRINT("[doc] Serious: maximum bone influences is 8 in this branch."); + } + // Clamp to 8 bone vertex influences. + mesh_vertex_data->max_weight_count = 8; + print_verbose("[doc] Using 8 vertex bone influences configuration."); + } else { + mesh_vertex_data->max_weight_count = 4; + print_verbose("[doc] Using 4 vertex bone influences configuration."); + } + } + } + + // do we globally allow for import of materials + // (prevents overwrite of materials; so you can handle them explicitly) + if (state.enable_material_import) { + const std::vector<uint64_t> &materials = p_document->GetMaterialIDs(); + + for (uint64_t material_id : materials) { + FBXDocParser::LazyObject *lazy_material = p_document->GetObject(material_id); + FBXDocParser::Material *mat = (FBXDocParser::Material *)lazy_material->Get<FBXDocParser::Material>(); + ERR_CONTINUE_MSG(!mat, "Could not convert fbx material by id: " + itos(material_id)); + + Ref<FBXMaterial> material; + material.instance(); + material->set_imported_material(mat); + + Ref<StandardMaterial3D> godot_material = material->import_material(state); + + state.cached_materials.insert(material_id, godot_material); + } + } + + // build skin and skeleton information + print_verbose("[doc] Skeleton3D Bone count: " + itos(state.fbx_bone_map.size())); + + // Importing bones using document based method from FBX directly + // We do not use the assimp bone format to determine this information anymore. + if (state.fbx_bone_map.size() > 0) { + // We are using a single skeleton only method here + // this is because we really have no concept of skeletons in FBX + // their are bones in a scene but they have no specific armature + // we can detect armatures but the issue lies in the complexity + // we opted to merge the entire scene onto one skeleton for now + // if we need to change this we have an archive of the old code. + + // bind pose normally only has 1 per mesh but can have more than one + // this is the point of skins + // in FBX first bind pose is the master for the first skin + + // In order to handle the FBX skeleton we must also inverse any parent transforms on the bones + // just to rule out any parent node transforms in the bone data + // this is trivial to do and allows us to use the single skeleton method and merge them + // this means that the nodes from maya kLocators will be preserved as bones + // in the same rig without having to match this across skeletons and merge by detection + // we can just merge and undo any parent transforms + for (Map<uint64_t, Ref<FBXBone>>::Element *bone_element = state.fbx_bone_map.front(); bone_element; bone_element = bone_element->next()) { + Ref<FBXBone> bone = bone_element->value(); + Ref<FBXSkeleton> fbx_skeleton_inst; + + uint64_t armature_id = bone->armature_id; + if (state.skeleton_map.has(armature_id)) { + fbx_skeleton_inst = state.skeleton_map[armature_id]; + } else { + fbx_skeleton_inst.instance(); + state.skeleton_map.insert(armature_id, fbx_skeleton_inst); + } + + print_verbose("populating skeleton with bone: " + bone->bone_name); + + // // populate bone skeleton - since fbx has no DOM for the skeleton just a node. + // bone->bone_skeleton = fbx_skeleton_inst; + + // now populate bone on the armature node list + fbx_skeleton_inst->skeleton_bones.push_back(bone); + + CRASH_COND_MSG(!state.fbx_target_map.has(armature_id), "invalid armature [serious]"); + + Ref<FBXNode> node = state.fbx_target_map[armature_id]; + + CRASH_COND_MSG(node.is_null(), "invalid node [serious]"); + CRASH_COND_MSG(node->pivot_transform.is_null(), "invalid pivot transform [serious]"); + fbx_skeleton_inst->fbx_node = node; + + ERR_CONTINUE_MSG(fbx_skeleton_inst->fbx_node.is_null(), "invalid skeleton node [serious]"); + + // we need to have a valid armature id and the model configured for the bone to be assigned fully. + // happens once per skeleton + + if (state.fbx_target_map.has(armature_id) && !fbx_skeleton_inst->fbx_node->has_model()) { + print_verbose("allocated fbx skeleton primary / armature node for the level: " + fbx_skeleton_inst->fbx_node->node_name); + } else if (!state.fbx_target_map.has(armature_id) && !fbx_skeleton_inst->fbx_node->has_model()) { + print_error("bones are not mapped to an armature node for armature id: " + itos(armature_id) + " bone: " + bone->bone_name); + // this means bone will be removed and not used, which is safe actually and no skeleton will be created. + } + } + + // setup skeleton instances if required :) + for (Map<uint64_t, Ref<FBXSkeleton>>::Element *skeleton_node = state.skeleton_map.front(); skeleton_node; skeleton_node = skeleton_node->next()) { + Ref<FBXSkeleton> &skeleton = skeleton_node->value(); + skeleton->init_skeleton(state); + + ERR_CONTINUE_MSG(skeleton->fbx_node.is_null(), "invalid fbx target map, missing skeleton"); + } + + // This list is not populated + for (Map<uint64_t, Ref<FBXNode>>::Element *skin_mesh = state.MeshNodes.front(); skin_mesh; skin_mesh = skin_mesh->next()) { + } + } + + // build godot node tree + if (state.fbx_node_list.size() > 0) { + for (List<Ref<FBXNode>>::Element *node_element = state.fbx_node_list.front(); + node_element; + node_element = node_element->next()) { + Ref<FBXNode> fbx_node = node_element->get(); + EditorSceneImporterMeshNode3D *mesh_node = nullptr; + Ref<FBXMeshData> mesh_data_precached; + + // check for valid geometry + if (fbx_node->fbx_model == nullptr) { + print_error("[doc] fundamental flaw, submit bug immediately with full import log with verbose logging on"); + } else { + const std::vector<const FBXDocParser::Geometry *> &geometry = fbx_node->fbx_model->GetGeometry(); + for (const FBXDocParser::Geometry *mesh : geometry) { + print_verbose("[doc] [" + itos(mesh->ID()) + "] mesh: " + fbx_node->node_name); + + if (mesh == nullptr) + continue; + + const FBXDocParser::MeshGeometry *mesh_geometry = dynamic_cast<const FBXDocParser::MeshGeometry *>(mesh); + if (mesh_geometry) { + uint64_t mesh_id = mesh_geometry->ID(); + + // this data will pre-exist if vertex weight information is found + if (state.renderer_mesh_data.has(mesh_id)) { + mesh_data_precached = state.renderer_mesh_data[mesh_id]; + } else { + mesh_data_precached.instance(); + state.renderer_mesh_data.insert(mesh_id, mesh_data_precached); + } + + mesh_data_precached->mesh_node = fbx_node; + + // mesh node, mesh id + mesh_node = mesh_data_precached->create_fbx_mesh(state, mesh_geometry, fbx_node->fbx_model, (p_flags & IMPORT_USE_COMPRESSION) != 0); + if (!state.MeshNodes.has(mesh_id)) { + state.MeshNodes.insert(mesh_id, fbx_node); + } + } + + const FBXDocParser::ShapeGeometry *shape_geometry = dynamic_cast<const FBXDocParser::ShapeGeometry *>(mesh); + if (shape_geometry != nullptr) { + print_verbose("[doc] valid shape geometry converted"); + } + } + } + + Ref<FBXSkeleton> node_skeleton = fbx_node->skeleton_node; + + if (node_skeleton.is_valid()) { + Skeleton3D *skel = node_skeleton->skeleton; + fbx_node->godot_node = skel; + } else if (mesh_node == nullptr) { + fbx_node->godot_node = memnew(Node3D); + } else { + fbx_node->godot_node = mesh_node; + } + + fbx_node->godot_node->set_name(fbx_node->node_name); + + // assign parent if valid + if (fbx_node->fbx_parent.is_valid()) { + fbx_node->fbx_parent->godot_node->add_child(fbx_node->godot_node); + fbx_node->godot_node->set_owner(state.root_owner); + } + + // Node Transform debug, set local xform data. + fbx_node->godot_node->set_transform(get_unscaled_transform(fbx_node->pivot_transform->LocalTransform, state.scale)); + + // populate our mesh node reference + if (mesh_node != nullptr && mesh_data_precached.is_valid()) { + mesh_data_precached->godot_mesh_instance = mesh_node; + } + } + } + + for (Map<uint64_t, Ref<FBXMeshData>>::Element *mesh_data = state.renderer_mesh_data.front(); mesh_data; mesh_data = mesh_data->next()) { + const uint64_t mesh_id = mesh_data->key(); + Ref<FBXMeshData> mesh = mesh_data->value(); + + const FBXDocParser::MeshGeometry *mesh_geometry = p_document->GetObject(mesh_id)->Get<FBXDocParser::MeshGeometry>(); + + ERR_CONTINUE_MSG(mesh->mesh_node.is_null(), "invalid mesh allocation"); + + const FBXDocParser::Skin *mesh_skin = mesh_geometry->DeformerSkin(); + + if (!mesh_skin) { + continue; // safe to continue + } + + // + // Skin bone configuration + // + + // + // Get Mesh Node Xform only + // + // ERR_CONTINUE_MSG(!state.fbx_target_map.has(mesh_id), "invalid xform for the skin pose: " + itos(mesh_id)); + // Ref<FBXNode> mesh_node_xform_data = state.fbx_target_map[mesh_id]; + + if (!mesh_skin) { + continue; // not a deformer. + } + + if (mesh_skin->Clusters().size() == 0) { + continue; // possibly buggy mesh + } + + // Lookup skin or create it if it's not found. + Ref<Skin> skin; + if (!state.MeshSkins.has(mesh_id)) { + print_verbose("Created new skin"); + skin.instance(); + state.MeshSkins.insert(mesh_id, skin); + } else { + print_verbose("Grabbed skin"); + skin = state.MeshSkins[mesh_id]; + } + + for (const FBXDocParser::Cluster *cluster : mesh_skin->Clusters()) { + // node or bone this cluster targets (in theory will only be a bone target) + uint64_t skin_target_id = cluster->TargetNode()->ID(); + + print_verbose("adding cluster [" + itos(cluster->ID()) + "] " + String(cluster->Name().c_str()) + " for target: [" + itos(skin_target_id) + "] " + String(cluster->TargetNode()->Name().c_str())); + ERR_CONTINUE_MSG(!state.fbx_bone_map.has(skin_target_id), "no bone found by that ID? locator"); + + const Ref<FBXBone> bone = state.fbx_bone_map[skin_target_id]; + const Ref<FBXSkeleton> skeleton = bone->fbx_skeleton; + const Ref<FBXNode> skeleton_node = skeleton->fbx_node; + + skin->add_named_bind( + bone->bone_name, + get_unscaled_transform( + skeleton_node->pivot_transform->GlobalTransform.affine_inverse() * cluster->TransformLink().affine_inverse(), state.scale)); + } + + print_verbose("cluster name / id: " + String(mesh_skin->Name().c_str()) + " [" + itos(mesh_skin->ID()) + "]"); + print_verbose("skeleton has " + itos(state.fbx_bone_map.size()) + " binds"); + print_verbose("fbx skin has " + itos(mesh_skin->Clusters().size()) + " binds"); + } + + // mesh data iteration for populating skeleton mapping + for (Map<uint64_t, Ref<FBXMeshData>>::Element *mesh_data = state.renderer_mesh_data.front(); mesh_data; mesh_data = mesh_data->next()) { + Ref<FBXMeshData> mesh = mesh_data->value(); + const uint64_t mesh_id = mesh_data->key(); + EditorSceneImporterMeshNode3D *mesh_instance = mesh->godot_mesh_instance; + const int mesh_weights = mesh->max_weight_count; + Ref<FBXSkeleton> skeleton; + const bool valid_armature = mesh->valid_armature_id; + const uint64_t armature = mesh->armature_id; + + if (mesh_weights > 0) { + // this is a bug, it means the weights were found but the skeleton wasn't + ERR_CONTINUE_MSG(!valid_armature, "[doc] fbx armature is missing"); + } else { + continue; // safe to continue not a bug just a normal mesh + } + + if (state.skeleton_map.has(armature)) { + skeleton = state.skeleton_map[armature]; + print_verbose("[doc] armature mesh to skeleton mapping has been allocated"); + } else { + print_error("[doc] unable to find armature mapping"); + } + + ERR_CONTINUE_MSG(!mesh_instance, "[doc] invalid mesh mapping for skeleton assignment"); + ERR_CONTINUE_MSG(skeleton.is_null(), "[doc] unable to resolve the correct skeleton but we have weights!"); + + mesh_instance->set_skeleton_path(mesh_instance->get_path_to(skeleton->skeleton)); + print_verbose("[doc] allocated skeleton to mesh " + mesh_instance->get_name()); + + // do we have a mesh skin for this mesh + ERR_CONTINUE_MSG(!state.MeshSkins.has(mesh_id), "no skin found for mesh"); + + Ref<Skin> mesh_skin = state.MeshSkins[mesh_id]; + + ERR_CONTINUE_MSG(mesh_skin.is_null(), "invalid skin stored in map"); + print_verbose("[doc] allocated skin to mesh " + mesh_instance->get_name()); + mesh_instance->set_skin(mesh_skin); + } + + // build skin and skeleton information + print_verbose("[doc] Skeleton3D Bone count: " + itos(state.fbx_bone_map.size())); + const FBXDocParser::FileGlobalSettings *FBXSettings = p_document->GlobalSettingsPtr(); + + // Configure constraints + // NOTE: constraints won't be added quite yet, we don't have a real need for them *yet*. (they can be supported later on) + // const std::vector<uint64_t> fbx_constraints = p_document->GetConstraintStackIDs(); + + // get the animation FPS + float fps_setting = ImportUtils::get_fbx_fps(FBXSettings); + + // enable animation import, only if local animation is enabled + if (state.enable_animation_import && (p_flags & IMPORT_ANIMATION)) { + // document animation stack list - get by ID so we can unload any non used animation stack + const std::vector<uint64_t> animation_stack = p_document->GetAnimationStackIDs(); + + for (uint64_t anim_id : animation_stack) { + FBXDocParser::LazyObject *lazyObject = p_document->GetObject(anim_id); + const FBXDocParser::AnimationStack *stack = lazyObject->Get<FBXDocParser::AnimationStack>(); + + if (stack != nullptr) { + String animation_name = ImportUtils::FBXNodeToName(stack->Name()); + print_verbose("Valid animation stack has been found: " + animation_name); + // ReferenceTime is the same for some animations? + // LocalStop time is the start and end time + float r_start = CONVERT_FBX_TIME(stack->ReferenceStart()); + float r_stop = CONVERT_FBX_TIME(stack->ReferenceStop()); + float start_time = CONVERT_FBX_TIME(stack->LocalStart()); + float end_time = CONVERT_FBX_TIME(stack->LocalStop()); + float duration = end_time - start_time; + + print_verbose("r_start " + rtos(r_start) + ", r_stop " + rtos(r_stop)); + print_verbose("start_time" + rtos(start_time) + " end_time " + rtos(end_time)); + print_verbose("anim duration : " + rtos(duration)); + + // we can safely create the animation player + if (state.animation_player == nullptr) { + print_verbose("Creating animation player"); + state.animation_player = memnew(AnimationPlayer); + state.root->add_child(state.animation_player); + state.animation_player->set_owner(state.root_owner); + } + + Ref<Animation> animation; + animation.instance(); + animation->set_name(animation_name); + animation->set_length(duration); + + print_verbose("Animation length: " + rtos(animation->get_length()) + " seconds"); + + // i think assimp was duplicating things, this lets me know to just reference or ignore this to prevent duplicate information in tracks + // this would mean that we would be doing three times as much work per track if my theory is correct. + // this was not the case but this is a good sanity check for the animation handler from the document. + // it also lets us know if the FBX specification massively changes the animation system, in theory such a change would make this show + // an fbx specification error, so best keep it in + // the overhead is tiny. + Map<uint64_t, const FBXDocParser::AnimationCurve *> CheckForDuplication; + + const std::vector<const FBXDocParser::AnimationLayer *> &layers = stack->Layers(); + print_verbose("FBX Animation layers: " + itos(layers.size())); + for (const FBXDocParser::AnimationLayer *layer : layers) { + std::vector<const FBXDocParser::AnimationCurveNode *> node_list = layer->Nodes(); + print_verbose("Layer: " + ImportUtils::FBXNodeToName(layer->Name()) + ", " + " AnimCurveNode count " + itos(node_list.size())); + + // first thing to do here is that i need to first get the animcurvenode to a Vector3 + // we now need to put this into the track information for godot. + // to do this we need to know which track is what? + + // target id, [ track name, [time index, vector] ] + // new map needs to be [ track name, keyframe_data ] + Map<uint64_t, Map<StringName, FBXTrack>> AnimCurveNodes; + + // struct AnimTrack { + // // Animation track can be + // // visible, T, R, S + // Map<StringName, Map<uint64_t, Vector3> > animation_track; + // }; + + // Map<uint64_t, AnimTrack> AnimCurveNodes; + + // so really, what does this mean to make an animtion track. + // we need to know what object the curves are for. + // we need the target ID and the target name for the track reduction. + + FBXDocParser::Model::RotOrder quat_rotation_order = FBXDocParser::Model::RotOrder_EulerXYZ; + + // T:: R:: S:: Visible:: Custom:: + for (const FBXDocParser::AnimationCurveNode *curve_node : node_list) { + // when Curves() is called the curves are actually read, we could replace this with our own ProcessDomConnection code here if required. + // We may need to do this but ideally we use Curves + // note: when you call this there might be a delay in opening it + // uses mutable type to 'cache' the response until the AnimationCurveNode is cleaned up. + std::map<std::string, const FBXDocParser::AnimationCurve *> curves = curve_node->Curves(); + const FBXDocParser::Object *object = curve_node->Target(); + const FBXDocParser::Model *target = curve_node->TargetAsModel(); + if (target == nullptr) { + if (object != nullptr) { + print_error("[doc] warning failed to find a target Model for curve: " + String(object->Name().c_str())); + } else { + //print_error("[doc] failed to resolve object"); + continue; + } + + continue; + } else { + //print_verbose("[doc] applied rotation order: " + itos(target->RotationOrder())); + quat_rotation_order = target->RotationOrder(); + } + + uint64_t target_id = target->ID(); + String target_name = ImportUtils::FBXNodeToName(target->Name()); + + const FBXDocParser::PropertyTable *properties = curve_node->Props(); + bool got_x = false, got_y = false, got_z = false; + float offset_x = FBXDocParser::PropertyGet<float>(properties, "d|X", got_x); + float offset_y = FBXDocParser::PropertyGet<float>(properties, "d|Y", got_y); + float offset_z = FBXDocParser::PropertyGet<float>(properties, "d|Z", got_z); + + String curve_node_name = ImportUtils::FBXNodeToName(curve_node->Name()); + + // Reduce all curves for this node into a single container + // T, R, S is what we expect, although other tracks are possible + // like for example visibility tracks. + + // We are not ordered here, we don't care about ordering, this happens automagically by godot when we insert with the + // key time :), so order is unimportant because the insertion will happen at a time index + // good to know: we do not need a list of these in another format :) + //Map<String, Vector<const Assimp::FBX::AnimationCurve *> > unordered_track; + + // T + // R + // S + // Map[String, List<VECTOR>] + + // So this is a reduction of the animation curve nodes + // We build this as a lookup, this is essentially our 'animation track' + //AnimCurveNodes.insert(curve_node_name, Map<uint64_t, Vector3>()); + + // create the animation curve information with the target id + // so the point of this makes a track with the name "T" for example + // the target ID is also set here, this means we don't need to do anything extra when we are in the 'create all animation tracks' step + FBXTrack &keyframe_map = AnimCurveNodes[target_id][StringName(curve_node_name)]; + + if (got_x && got_y && got_z) { + Vector3 default_value = Vector3(offset_x, offset_y, offset_z); + keyframe_map.default_value = default_value; + keyframe_map.has_default = true; + //print_verbose("track name: " + curve_node_name); + //print_verbose("xyz default: " + default_value); + } + // target id, [ track name, [time index, vector] ] + // Map<uint64_t, Map<StringName, Map<uint64_t, Vector3> > > AnimCurveNodes; + + // we probably need the target id here. + // so map[uint64_t map]... + // Map<uint64_t, Vector3D> translation_keys, rotation_keys, scale_keys; + + // extra const required by C++11 colon/Range operator + // note: do not use C++17 syntax here for dicts. + // this is banned in Godot. + for (std::pair<const std::string, const FBXDocParser::AnimationCurve *> &kvp : curves) { + const String curve_element = ImportUtils::FBXNodeToName(kvp.first); + const FBXDocParser::AnimationCurve *curve = kvp.second; + String curve_name = ImportUtils::FBXNodeToName(curve->Name()); + uint64_t curve_id = curve->ID(); + + if (CheckForDuplication.has(curve_id)) { + print_error("(FBX spec changed?) We found a duplicate curve being used for an alternative node - report to godot issue tracker"); + } else { + CheckForDuplication.insert(curve_id, curve); + } + + // FBX has no name for AnimCurveNode::, most of the time, not seen any with valid name here. + const std::map<int64_t, float> &track_time = curve->GetValueTimeTrack(); + + if (track_time.size() > 0) { + for (std::pair<int64_t, float> keyframe : track_time) { + if (curve_element == "d|X") { + keyframe_map.keyframes[keyframe.first].x = keyframe.second; + } else if (curve_element == "d|Y") { + keyframe_map.keyframes[keyframe.first].y = keyframe.second; + } else if (curve_element == "d|Z") { + keyframe_map.keyframes[keyframe.first].z = keyframe.second; + } else { + //print_error("FBX Unsupported element: " + curve_element); + } + + //print_verbose("[" + itos(target_id) + "] Keyframe added: " + itos(keyframe_map.size())); + + //print_verbose("Keyframe t:" + rtos(animation_track_time) + " v: " + rtos(keyframe.second)); + } + } + } + } + + // Map<uint64_t, Map<StringName, Map<uint64_t, Vector3> > > AnimCurveNodes; + // add this animation track here + + // target id, [ track name, [time index, vector] ] + //std::map<uint64_t, std::map<StringName, FBXTrack > > AnimCurveNodes; + for (Map<uint64_t, Map<StringName, FBXTrack>>::Element *track = AnimCurveNodes.front(); track; track = track->next()) { + // 5 tracks + // current track index + // track count is 5 + // track count is 5. + // next track id is 5. + const uint64_t target_id = track->key(); + int track_idx = animation->add_track(Animation::TYPE_TRANSFORM); + + // animation->track_set_path(track_idx, node_path); + // animation->track_set_path(track_idx, node_path); + Ref<FBXBone> bone; + + // note we must not run the below code if the entry doesn't exist, it will create dummy entries which is very bad. + // remember that state.fbx_bone_map[target_id] will create a new entry EVEN if you only read. + // this would break node animation targets, so if you change this be warned. :) + if (state.fbx_bone_map.has(target_id)) { + bone = state.fbx_bone_map[target_id]; + } + + Transform target_transform; + + if (state.fbx_target_map.has(target_id)) { + Ref<FBXNode> node_ref = state.fbx_target_map[target_id]; + target_transform = node_ref->pivot_transform->GlobalTransform; + //print_verbose("[doc] allocated animation node transform"); + } + + //int size_targets = state.fbx_target_map.size(); + //print_verbose("Target ID map: " + itos(size_targets)); + //print_verbose("[doc] debug bone map size: " + itos(state.fbx_bone_map.size())); + + // if this is a skeleton mapped track we can just set the path for the track. + // todo: implement node paths here at some + if (state.fbx_bone_map.size() > 0 && state.fbx_bone_map.has(target_id)) { + if (bone->fbx_skeleton.is_valid() && bone.is_valid()) { + Ref<FBXSkeleton> fbx_skeleton = bone->fbx_skeleton; + String bone_path = state.root->get_path_to(fbx_skeleton->skeleton); + bone_path += ":" + fbx_skeleton->skeleton->get_bone_name(bone->godot_bone_id); + print_verbose("[doc] track bone path: " + bone_path); + NodePath path = bone_path; + animation->track_set_path(track_idx, path); + } + } else if (state.fbx_target_map.has(target_id)) { + //print_verbose("[doc] we have a valid target for a node animation"); + Ref<FBXNode> target_node = state.fbx_target_map[target_id]; + if (target_node.is_valid() && target_node->godot_node != nullptr) { + String node_path = state.root->get_path_to(target_node->godot_node); + NodePath path = node_path; + animation->track_set_path(track_idx, path); + //print_verbose("[doc] node animation path: " + node_path); + } + } else { + // note: this could actually be unsafe this means we should be careful about continuing here, if we see bizzare effects later we should disable this. + // I am not sure if this is unsafe or not, testing will tell us this. + print_error("[doc] invalid fbx target detected for this track"); + continue; + } + + // everything in FBX and Maya is a node therefore if this happens something is seriously broken. + if (!state.fbx_target_map.has(target_id)) { + print_error("unable to resolve this to an FBX object."); + continue; + } + + Ref<FBXNode> target_node = state.fbx_target_map[target_id]; + const FBXDocParser::Model *model = target_node->fbx_model; + const FBXDocParser::PropertyTable *props = model->Props(); + + Map<StringName, FBXTrack> &track_data = track->value(); + FBXTrack &translation_keys = track_data[StringName("T")]; + FBXTrack &rotation_keys = track_data[StringName("R")]; + FBXTrack &scale_keys = track_data[StringName("S")]; + + double increment = 1.0f / fps_setting; + double time = 0.0f; + + bool last = false; + + Vector<Vector3> pos_values; + Vector<float> pos_times; + Vector<Vector3> scale_values; + Vector<float> scale_times; + Vector<Quat> rot_values; + Vector<float> rot_times; + + double max_duration = 0; + double anim_length = animation->get_length(); + + for (std::pair<int64_t, Vector3> position_key : translation_keys.keyframes) { + pos_values.push_back(position_key.second * state.scale); + double animation_track_time = CONVERT_FBX_TIME(position_key.first); + + if (animation_track_time > max_duration) { + max_duration = animation_track_time; + } + + //print_verbose("pos keyframe: t:" + rtos(animation_track_time) + " value " + position_key.second); + pos_times.push_back(animation_track_time); + } + + for (std::pair<int64_t, Vector3> scale_key : scale_keys.keyframes) { + scale_values.push_back(scale_key.second); + double animation_track_time = CONVERT_FBX_TIME(scale_key.first); + + if (animation_track_time > max_duration) { + max_duration = animation_track_time; + } + //print_verbose("scale keyframe t:" + rtos(animation_track_time)); + scale_times.push_back(animation_track_time); + } + + // + // Pre and Post keyframe rotation handler + // -- Required because Maya and Autodesk <3 the pain when it comes to implementing animation code! enjoy <3 + + bool got_pre = false; + bool got_post = false; + + Quat post_rotation; + Quat pre_rotation; + + // Rotation matrix + const Vector3 &PreRotation = FBXDocParser::PropertyGet<Vector3>(props, "PreRotation", got_pre); + const Vector3 &PostRotation = FBXDocParser::PropertyGet<Vector3>(props, "PostRotation", got_post); + + FBXDocParser::Model::RotOrder rot_order = model->RotationOrder(); + if (got_pre) { + pre_rotation = ImportUtils::EulerToQuaternion(rot_order, ImportUtils::deg2rad(PreRotation)); + } + if (got_post) { + post_rotation = ImportUtils::EulerToQuaternion(rot_order, ImportUtils::deg2rad(PostRotation)); + } + + Quat lastQuat = Quat(); + + for (std::pair<int64_t, Vector3> rotation_key : rotation_keys.keyframes) { + double animation_track_time = CONVERT_FBX_TIME(rotation_key.first); + + //print_verbose("euler rotation key: " + rotation_key.second); + Quat rot_key_value = ImportUtils::EulerToQuaternion(quat_rotation_order, ImportUtils::deg2rad(rotation_key.second)); + + if (lastQuat != Quat() && rot_key_value.dot(lastQuat) < 0) { + rot_key_value.x = -rot_key_value.x; + rot_key_value.y = -rot_key_value.y; + rot_key_value.z = -rot_key_value.z; + rot_key_value.w = -rot_key_value.w; + } + // pre_post rotation possibly could fix orientation + Quat final_rotation = pre_rotation * rot_key_value * post_rotation; + + lastQuat = final_rotation; + + if (animation_track_time > max_duration) { + max_duration = animation_track_time; + } + + rot_values.push_back(final_rotation); + rot_times.push_back(animation_track_time); + } + + bool valid_rest = false; + Transform bone_rest; + int skeleton_bone = -1; + if (state.fbx_bone_map.has(target_id)) { + if (bone.is_valid() && bone->fbx_skeleton.is_valid()) { + skeleton_bone = bone->godot_bone_id; + if (skeleton_bone >= 0) { + bone_rest = bone->fbx_skeleton->skeleton->get_bone_rest(skeleton_bone); + valid_rest = true; + } + } + + if (!valid_rest) { + print_verbose("invalid rest!"); + } + } + + const Vector3 def_pos = translation_keys.has_default ? (translation_keys.default_value * state.scale) : bone_rest.origin; + const Quat def_rot = rotation_keys.has_default ? ImportUtils::EulerToQuaternion(quat_rotation_order, ImportUtils::deg2rad(rotation_keys.default_value)) : bone_rest.basis.get_rotation_quat(); + const Vector3 def_scale = scale_keys.has_default ? scale_keys.default_value : bone_rest.basis.get_scale(); + print_verbose("track defaults: p(" + def_pos + ") s(" + def_scale + ") r(" + def_rot + ")"); + + while (true) { + Vector3 pos = def_pos; + Quat rot = def_rot; + Vector3 scale = def_scale; + + if (pos_values.size()) { + pos = _interpolate_track<Vector3>(pos_times, pos_values, time, + AssetImportAnimation::INTERP_LINEAR); + } + + if (rot_values.size()) { + rot = _interpolate_track<Quat>(rot_times, rot_values, time, + AssetImportAnimation::INTERP_LINEAR); + } + + if (scale_values.size()) { + scale = _interpolate_track<Vector3>(scale_times, scale_values, time, + AssetImportAnimation::INTERP_LINEAR); + } + + // node animations must also include pivots + if (skeleton_bone >= 0) { + Transform xform = Transform(); + xform.basis.set_quat_scale(rot, scale); + xform.origin = pos; + const Transform t = bone_rest.affine_inverse() * xform; + + // populate this again + rot = t.basis.get_rotation_quat(); + rot.normalize(); + scale = t.basis.get_scale(); + pos = t.origin; + } + + animation->transform_track_insert_key(track_idx, time, pos, rot, scale); + + if (last) { + break; + } + + time += increment; + if (time > anim_length) { + last = true; + time = anim_length; + break; + } + } + } + } + state.animation_player->add_animation(animation_name, animation); + } + } + + // AnimStack elements contain start stop time and name of animation + // AnimLayer is the current active layer of the animation (multiple layers can be active we only support 1) + // AnimCurveNode has a OP link back to the model which is the real node. + // AnimCurveNode has a direct link to AnimationCurve (of which it may have more than one) + + // Store animation stack in list + // iterate over all AnimStacks like the cache node algorithm recursively + // this can then be used with ProcessDomConnection<> to link from + // AnimStack:: <-- (OO) --> AnimLayer:: <-- (OO) --> AnimCurveNode:: (which can OP resolve) to Model:: + } + + // + // Cleanup operations - explicit to prevent errors on shutdown - found that ref to ref does behave badly sometimes. + // + + state.renderer_mesh_data.clear(); + state.MeshSkins.clear(); + state.fbx_target_map.clear(); + state.fbx_node_list.clear(); + + for (Map<uint64_t, Ref<FBXBone>>::Element *element = state.fbx_bone_map.front(); element; element = element->next()) { + Ref<FBXBone> bone = element->value(); + bone->parent_bone.unref(); + bone->node.unref(); + bone->fbx_skeleton.unref(); + } + + for (Map<uint64_t, Ref<FBXSkeleton>>::Element *element = state.skeleton_map.front(); element; element = element->next()) { + Ref<FBXSkeleton> skel = element->value(); + skel->fbx_node.unref(); + skel->skeleton_bones.clear(); + } + + state.fbx_bone_map.clear(); + state.skeleton_map.clear(); + state.fbx_root_node.unref(); + + return scene_root; +} + +void EditorSceneImporterFBX::BuildDocumentBones(Ref<FBXBone> p_parent_bone, + ImportState &state, const FBXDocParser::Document *p_doc, + uint64_t p_id) { + const std::vector<const FBXDocParser::Connection *> &conns = p_doc->GetConnectionsByDestinationSequenced(p_id, "Model"); + // FBX can do an join like this + // Model -> SubDeformer (bone) -> Deformer (skin pose) + // This is important because we need to somehow link skin back to bone id in skeleton :) + // The rules are: + // A subdeformer will exist if 'limbnode' class tag present + // The subdeformer will not necessarily have a deformer as joints do not have one + for (const FBXDocParser::Connection *con : conns) { + // goto: bone creation + //print_verbose("con: " + String(con->PropertyName().c_str())); + + // ignore object-property links we want the object to object links nothing else + if (con->PropertyName().length()) { + continue; + } + + // convert connection source object into Object base class + const FBXDocParser::Object *const object = con->SourceObject(); + + if (nullptr == object) { + print_verbose("failed to convert source object for Model link"); + continue; + } + + // FBX Model::Cube, Model::Bone001, etc elements + // This detects if we can cast the object into this model structure. + const FBXDocParser::Model *const model = dynamic_cast<const FBXDocParser::Model *>(object); + + // declare our bone element reference (invalid, unless we create a bone in this step) + // this lets us pass valid armature information into children objects and this is why we moved this up here + // previously this was created .instanced() on the same line. + Ref<FBXBone> bone_element; + + if (model != nullptr) { + // model marked with limb node / casted. + const FBXDocParser::ModelLimbNode *const limb_node = dynamic_cast<const FBXDocParser::ModelLimbNode *>(model); + if (limb_node != nullptr) { + // Write bone into bone list for FBX + + ERR_FAIL_COND_MSG(state.fbx_bone_map.has(limb_node->ID()), "[serious] duplicate LimbNode detected"); + + bool parent_is_bone = state.fbx_bone_map.find(p_id); + bone_element.instance(); + + // used to build the bone hierarchy in the skeleton + bone_element->parent_bone_id = parent_is_bone ? p_id : 0; + bone_element->valid_parent = parent_is_bone; + bone_element->limb_node = limb_node; + + // parent is a node and this is the first bone + if (!parent_is_bone) { + uint64_t armature_id = p_id; + bone_element->valid_armature_id = true; + bone_element->armature_id = armature_id; + print_verbose("[doc] valid armature has been configured for first child: " + itos(armature_id)); + } else if (p_parent_bone.is_valid()) { + if (p_parent_bone->valid_armature_id) { + bone_element->valid_armature_id = true; + bone_element->armature_id = p_parent_bone->armature_id; + print_verbose("[doc] bone has valid armature id:" + itos(bone_element->armature_id)); + } else { + print_error("[doc] unassigned armature id: " + String(limb_node->Name().c_str())); + } + } else { + print_error("[doc] error is this a bone? " + String(limb_node->Name().c_str())); + } + + if (!parent_is_bone) { + print_verbose("[doc] Root bone: " + bone_element->bone_name); + } + + uint64_t limb_id = limb_node->ID(); + bone_element->bone_id = limb_id; + bone_element->bone_name = ImportUtils::FBXNodeToName(model->Name()); + bone_element->parent_bone = p_parent_bone; + + // insert limb by ID into list. + state.fbx_bone_map.insert(limb_node->ID(), bone_element); + } + + // recursion call - child nodes + BuildDocumentBones(bone_element, state, p_doc, model->ID()); + } + } +} + +void EditorSceneImporterFBX::BuildDocumentNodes( + Ref<PivotTransform> parent_transform, + ImportState &state, + const FBXDocParser::Document *p_doc, + uint64_t id, + Ref<FBXNode> parent_node) { + // tree + // here we get the node 0 on the root by default + const std::vector<const FBXDocParser::Connection *> &conns = p_doc->GetConnectionsByDestinationSequenced(id, "Model"); + + // branch + for (const FBXDocParser::Connection *con : conns) { + // ignore object-property links + if (con->PropertyName().length()) { + // really important we document why this is ignored. + print_verbose("ignoring property link - no docs on why this is ignored"); + continue; + } + + // convert connection source object into Object base class + // Source objects can exist with 'null connections' this means that we only for sure know the source exists. + const FBXDocParser::Object *const source_object = con->SourceObject(); + + if (nullptr == source_object) { + print_verbose("failed to convert source object for Model link"); + continue; + } + + // FBX Model::Cube, Model::Bone001, etc elements + // This detects if we can cast the object into this model structure. + const FBXDocParser::Model *const model = dynamic_cast<const FBXDocParser::Model *>(source_object); + // model is the current node + if (nullptr != model) { + uint64_t current_node_id = model->ID(); + + Ref<FBXNode> new_node; + new_node.instance(); + new_node->current_node_id = current_node_id; + new_node->node_name = ImportUtils::FBXNodeToName(model->Name()); + + Ref<PivotTransform> fbx_transform; + fbx_transform.instance(); + fbx_transform->set_parent(parent_transform); + fbx_transform->set_model(model); + fbx_transform->debug_pivot_xform("name: " + new_node->node_name); + fbx_transform->Execute(); + + new_node->set_pivot_transform(fbx_transform); + + // check if this node is a bone + if (state.fbx_bone_map.has(current_node_id)) { + Ref<FBXBone> bone = state.fbx_bone_map[current_node_id]; + if (bone.is_valid()) { + bone->set_node(new_node); + print_verbose("allocated bone data: " + bone->bone_name); + } + } + + // set the model, we can't just assign this safely + new_node->set_model(model); + + if (parent_node.is_valid()) { + new_node->set_parent(parent_node); + } else { + new_node->set_parent(state.fbx_root_node); + } + + CRASH_COND_MSG(new_node->pivot_transform.is_null(), "invalid fbx target map pivot transform [serious]"); + + // populate lookup tables with references + // [fbx_node_id, fbx_node] + + state.fbx_node_list.push_back(new_node); + if (!state.fbx_target_map.has(new_node->current_node_id)) { + state.fbx_target_map[new_node->current_node_id] = new_node; + } + + // print node name + print_verbose("[doc] new node " + new_node->node_name); + + // sub branches + BuildDocumentNodes(new_node->pivot_transform, state, p_doc, current_node_id, new_node); + } + } +} diff --git a/modules/assimp/editor_scene_importer_assimp.h b/modules/fbx/editor_scene_importer_fbx.h index 80bba6ad66..ff1d243ede 100644 --- a/modules/assimp/editor_scene_importer_assimp.h +++ b/modules/fbx/editor_scene_importer_fbx.h @@ -1,5 +1,5 @@ /*************************************************************************/ -/* editor_scene_importer_assimp.h */ +/* editor_scene_importer_fbx.h */ /*************************************************************************/ /* This file is part of: */ /* GODOT ENGINE */ @@ -28,13 +28,20 @@ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ -#ifndef EDITOR_SCENE_IMPORTER_ASSIMP_H -#define EDITOR_SCENE_IMPORTER_ASSIMP_H +#ifndef EDITOR_SCENE_IMPORTER_FBX_H +#define EDITOR_SCENE_IMPORTER_FBX_H #ifdef TOOLS_ENABLED + +#include "data/import_state.h" +#include "tools/import_utils.h" + #include "core/core_bind.h" #include "core/io/resource_importer.h" +#include "core/string/ustring.h" +#include "core/templates/local_vector.h" #include "core/templates/vector.h" +#include "core/variant/dictionary.h" #include "editor/import/resource_importer_scene.h" #include "editor/project_settings_editor.h" #include "scene/3d/mesh_instance_3d.h" @@ -44,35 +51,16 @@ #include "scene/resources/animation.h" #include "scene/resources/surface_tool.h" -#include <assimp/matrix4x4.h> -#include <assimp/scene.h> -#include <assimp/types.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/LogStream.hpp> -#include <assimp/Logger.hpp> -#include <map> - -#include "import_state.h" -#include "import_utils.h" +#include "fbx_parser/FBXDocument.h" +#include "fbx_parser/FBXImportSettings.h" +#include "fbx_parser/FBXMeshGeometry.h" +#include "fbx_parser/FBXUtil.h" -using namespace AssimpImporter; - -class AssimpStream : public Assimp::LogStream { -public: - // Constructor - AssimpStream() {} - - // Destructor - ~AssimpStream() {} - // Write something using your own functionality - void write(const char *message) { - print_verbose(String("Open Asset Import: ") + String(message).strip_edges()); - } -}; +#define CONVERT_FBX_TIME(time) static_cast<double>(time) / 46186158000LL -class EditorSceneImporterAssimp : public EditorSceneImporter { +class EditorSceneImporterFBX : public EditorSceneImporter { private: - GDCLASS(EditorSceneImporterAssimp, EditorSceneImporter); + GDCLASS(EditorSceneImporterFBX, EditorSceneImporter); struct AssetImportAnimation { enum Interpolation { @@ -83,67 +71,63 @@ private: }; }; - struct BoneInfo { - uint32_t bone; - float weight; - }; + // ------------------------------------------------------------------------------------------------ + template <typename T> + const T *ProcessDOMConnection( + const FBXDocParser::Document *doc, + uint64_t current_element, + bool reverse_lookup = false) { + const std::vector<const FBXDocParser::Connection *> &conns = reverse_lookup ? doc->GetConnectionsByDestinationSequenced(current_element) : doc->GetConnectionsBySourceSequenced(current_element); + //print_verbose("[doc] looking for " + String(element_to_find)); + // using the temp pattern here so we can debug before it returns + // in some cases we return too early, with 'deformer object base class' in wrong place + // in assimp this means we can accidentally return too early... + const T *return_obj = nullptr; + + for (const FBXDocParser::Connection *con : conns) { + const FBXDocParser::Object *source_object = con->SourceObject(); + const FBXDocParser::Object *dest_object = con->DestinationObject(); + if (source_object && dest_object != nullptr) { + //print_verbose("[doc] connection name: " + String(source_object->Name().c_str()) + ", dest: " + String(dest_object->Name().c_str())); + const T *temp = dynamic_cast<const T *>(reverse_lookup ? source_object : dest_object); + if (temp) { + return_obj = temp; + } + } + } + + if (return_obj != nullptr) { + //print_verbose("[doc] returned valid element"); + //print_verbose("Found object for bone"); + return return_obj; + } + + // safe to return nothing, need to use nullptr here as nullptr is used internally for FBX document. + return nullptr; + } + + void BuildDocumentBones(Ref<FBXBone> p_parent_bone, + ImportState &state, const FBXDocParser::Document *p_doc, + uint64_t p_id); - Ref<Mesh> _generate_mesh_from_surface_indices(ImportState &state, const Vector<int> &p_surface_indices, - const aiNode *assimp_node, Ref<Skin> &skin, - Skeleton3D *&skeleton_assigned); - - // simple object creation functions - Node3D *create_light(ImportState &state, - const String &node_name, - Transform &look_at_transform); - Node3D *create_camera( - ImportState &state, - const String &node_name, - Transform &look_at_transform); - // non recursive - linear so must not use recursive arguments - MeshInstance3D *create_mesh(ImportState &state, const aiNode *assimp_node, const String &node_name, Node *active_node, Transform node_transform); - // recursive node generator - void _generate_node(ImportState &state, const aiNode *assimp_node); - void _insert_animation_track(ImportState &scene, const aiAnimation *assimp_anim, int track_id, - int anim_fps, Ref<Animation> animation, float ticks_per_second, - Skeleton3D *skeleton, const NodePath &node_path, - const String &node_name, aiBone *track_bone); - - void _import_animation(ImportState &state, int p_animation_index, int p_bake_fps); - Node *get_node_by_name(ImportState &state, String name); - aiBone *get_bone_from_stack(ImportState &state, aiString name); - Node3D *_generate_scene(const String &p_path, aiScene *scene, const uint32_t p_flags, int p_bake_fps, const int32_t p_max_bone_weights); + void BuildDocumentNodes(Ref<PivotTransform> parent_transform, ImportState &state, const FBXDocParser::Document *doc, uint64_t id, Ref<FBXNode> fbx_parent); + + Node3D *_generate_scene(const String &p_path, const FBXDocParser::Document *p_document, + const uint32_t p_flags, + int p_bake_fps, const int32_t p_max_bone_weights); template <class T> T _interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, float p_time, AssetImportAnimation::Interpolation p_interp); void _register_project_setting_import(const String generic, const String import_setting_string, const Vector<String> &exts, List<String> *r_extensions, const bool p_enabled) const; - struct ImportFormat { - Vector<String> extensions; - bool is_default; - }; - -protected: - static void _bind_methods(); - public: - EditorSceneImporterAssimp() { - Assimp::DefaultLogger::create("", Assimp::Logger::VERBOSE); - unsigned int severity = Assimp::Logger::Info | Assimp::Logger::Err | Assimp::Logger::Warn; - Assimp::DefaultLogger::get()->attachStream(new AssimpStream(), severity); - } - ~EditorSceneImporterAssimp() { - Assimp::DefaultLogger::kill(); - } + EditorSceneImporterFBX() {} + ~EditorSceneImporterFBX() {} virtual void get_extensions(List<String> *r_extensions) const override; virtual uint32_t get_import_flags() const override; - virtual Node *import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, List<String> *r_missing_deps, Error *r_err = nullptr) override; - Ref<Image> load_image(ImportState &state, const aiScene *p_scene, String p_path); - - static void RegenerateBoneStack(ImportState &state); - - void RegenerateBoneStack(ImportState &state, aiMesh *mesh); + virtual Node3D *import_scene(const String &p_path, uint32_t p_flags, int p_bake_fps, List<String> *r_missing_deps, Error *r_err = NULL) override; }; -#endif -#endif + +#endif // TOOLS_ENABLED +#endif // EDITOR_SCENE_IMPORTER_FBX_H diff --git a/modules/fbx/fbx_parser/ByteSwapper.h b/modules/fbx/fbx_parser/ByteSwapper.h new file mode 100644 index 0000000000..0a46a7df26 --- /dev/null +++ b/modules/fbx/fbx_parser/ByteSwapper.h @@ -0,0 +1,282 @@ +/*************************************************************************/ +/* ByteSwapper.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2020, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file Helper class tp perform various byte oder swappings + (e.g. little to big endian) */ +#ifndef BYTE_SWAPPER_H +#define BYTE_SWAPPER_H + +#include <stdint.h> +#include <algorithm> +#include <locale> + +namespace FBXDocParser { +// -------------------------------------------------------------------------------------- +/** Defines some useful byte order swap routines. + * + * This is required to read big-endian model formats on little-endian machines, + * and vice versa. Direct use of this class is DEPRECATED. Use #StreamReader instead. */ +// -------------------------------------------------------------------------------------- +class ByteSwap { + ByteSwap() {} + +public: + // ---------------------------------------------------------------------- + /** Swap two bytes of data + * @param[inout] _szOut A void* to save the reintcasts for the caller. */ + static inline void Swap2(void *_szOut) { + uint8_t *const szOut = reinterpret_cast<uint8_t *>(_szOut); + std::swap(szOut[0], szOut[1]); + } + + // ---------------------------------------------------------------------- + /** Swap four bytes of data + * @param[inout] _szOut A void* to save the reintcasts for the caller. */ + static inline void Swap4(void *_szOut) { + uint8_t *const szOut = reinterpret_cast<uint8_t *>(_szOut); + std::swap(szOut[0], szOut[3]); + std::swap(szOut[1], szOut[2]); + } + + // ---------------------------------------------------------------------- + /** Swap eight bytes of data + * @param[inout] _szOut A void* to save the reintcasts for the caller. */ + static inline void Swap8(void *_szOut) { + uint8_t *const szOut = reinterpret_cast<uint8_t *>(_szOut); + std::swap(szOut[0], szOut[7]); + std::swap(szOut[1], szOut[6]); + std::swap(szOut[2], szOut[5]); + std::swap(szOut[3], szOut[4]); + } + + // ---------------------------------------------------------------------- + /** ByteSwap a float. Not a joke. + * @param[inout] fOut ehm. .. */ + static inline void Swap(float *fOut) { + Swap4(fOut); + } + + // ---------------------------------------------------------------------- + /** ByteSwap a double. Not a joke. + * @param[inout] fOut ehm. .. */ + static inline void Swap(double *fOut) { + Swap8(fOut); + } + + // ---------------------------------------------------------------------- + /** ByteSwap an int16t. Not a joke. + * @param[inout] fOut ehm. .. */ + static inline void Swap(int16_t *fOut) { + Swap2(fOut); + } + + static inline void Swap(uint16_t *fOut) { + Swap2(fOut); + } + + // ---------------------------------------------------------------------- + /** ByteSwap an int32t. Not a joke. + * @param[inout] fOut ehm. .. */ + static inline void Swap(int32_t *fOut) { + Swap4(fOut); + } + + static inline void Swap(uint32_t *fOut) { + Swap4(fOut); + } + + // ---------------------------------------------------------------------- + /** ByteSwap an int64t. Not a joke. + * @param[inout] fOut ehm. .. */ + static inline void Swap(int64_t *fOut) { + Swap8(fOut); + } + + static inline void Swap(uint64_t *fOut) { + Swap8(fOut); + } + + // ---------------------------------------------------------------------- + //! Templatized ByteSwap + //! \returns param tOut as swapped + template <typename Type> + static inline Type Swapped(Type tOut) { + return _swapper<Type, sizeof(Type)>()(tOut); + } + +private: + template <typename T, size_t size> + struct _swapper; +}; + +template <typename T> +struct ByteSwap::_swapper<T, 2> { + T operator()(T tOut) { + Swap2(&tOut); + return tOut; + } +}; + +template <typename T> +struct ByteSwap::_swapper<T, 4> { + T operator()(T tOut) { + Swap4(&tOut); + return tOut; + } +}; + +template <typename T> +struct ByteSwap::_swapper<T, 8> { + T operator()(T tOut) { + Swap8(&tOut); + return tOut; + } +}; + +// -------------------------------------------------------------------------------------- +// ByteSwap macros for BigEndian/LittleEndian support +// -------------------------------------------------------------------------------------- +#if (defined AI_BUILD_BIG_ENDIAN) +#define AI_LE(t) (t) +#define AI_BE(t) ByteSwap::Swapped(t) +#define AI_LSWAP2(p) +#define AI_LSWAP4(p) +#define AI_LSWAP8(p) +#define AI_LSWAP2P(p) +#define AI_LSWAP4P(p) +#define AI_LSWAP8P(p) +#define LE_NCONST const +#define AI_SWAP2(p) ByteSwap::Swap2(&(p)) +#define AI_SWAP4(p) ByteSwap::Swap4(&(p)) +#define AI_SWAP8(p) ByteSwap::Swap8(&(p)) +#define AI_SWAP2P(p) ByteSwap::Swap2((p)) +#define AI_SWAP4P(p) ByteSwap::Swap4((p)) +#define AI_SWAP8P(p) ByteSwap::Swap8((p)) +#define BE_NCONST +#else +#define AI_BE(t) (t) +#define AI_LE(t) ByteSwap::Swapped(t) +#define AI_SWAP2(p) +#define AI_SWAP4(p) +#define AI_SWAP8(p) +#define AI_SWAP2P(p) +#define AI_SWAP4P(p) +#define AI_SWAP8P(p) +#define BE_NCONST const +#define AI_LSWAP2(p) ByteSwap::Swap2(&(p)) +#define AI_LSWAP4(p) ByteSwap::Swap4(&(p)) +#define AI_LSWAP8(p) ByteSwap::Swap8(&(p)) +#define AI_LSWAP2P(p) ByteSwap::Swap2((p)) +#define AI_LSWAP4P(p) ByteSwap::Swap4((p)) +#define AI_LSWAP8P(p) ByteSwap::Swap8((p)) +#define LE_NCONST +#endif + +namespace Intern { + +// -------------------------------------------------------------------------------------------- +template <typename T, bool doit> +struct ByteSwapper { + void operator()(T *inout) { + ByteSwap::Swap(inout); + } +}; + +template <typename T> +struct ByteSwapper<T, false> { + void operator()(T *) { + } +}; + +// -------------------------------------------------------------------------------------------- +template <bool SwapEndianess, typename T, bool RuntimeSwitch> +struct Getter { + void operator()(T *inout, bool le) { + le = !le; + if (le) { + ByteSwapper<T, (sizeof(T) > 1 ? true : false)>()(inout); + } else + ByteSwapper<T, false>()(inout); + } +}; + +template <bool SwapEndianess, typename T> +struct Getter<SwapEndianess, T, false> { + void operator()(T *inout, bool /*le*/) { + // static branch + ByteSwapper<T, (SwapEndianess && sizeof(T) > 1)>()(inout); + } +}; +} // namespace Intern +} // namespace FBXDocParser + +#endif // BYTE_SWAPPER_H diff --git a/thirdparty/assimp/CREDITS b/modules/fbx/fbx_parser/CREDITS index 26e21d2f41..62b449614e 100644 --- a/thirdparty/assimp/CREDITS +++ b/modules/fbx/fbx_parser/CREDITS @@ -60,7 +60,7 @@ The GUY who performed some of the CSM mocaps. Contributed fixes for the documentation and the doxygen markup - Zhao Lei -Contributed several bugfixes fixing memory leaks and improving float parsing +Contributed several bugfixes fixing memory leaks and improving float parsing - sueastside Updated PyAssimp to the latest Assimp data structures and provided a script to keep the Python binding up-to-date. @@ -129,7 +129,7 @@ Contributed a patch to fix the VertexTriangleAdjacency postprocessing step. Contributed the Debian build fixes ( architecture macro ). - gellule -Several LWO and LWS fixes (pivoting). +Several LWO and LWS fixes (pivoting). - Marcel Metz GCC/Linux fixes for the SimpleOpenGL sample. diff --git a/modules/fbx/fbx_parser/FBXAnimation.cpp b/modules/fbx/fbx_parser/FBXAnimation.cpp new file mode 100644 index 0000000000..bfd3e97314 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXAnimation.cpp @@ -0,0 +1,290 @@ +/*************************************************************************/ +/* FBXAnimation.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXAnimation.cpp + * @brief Assimp::FBX::AnimationCurve, Assimp::FBX::AnimationCurveNode, + * Assimp::FBX::AnimationLayer, Assimp::FBX::AnimationStack + */ + +#include "FBXCommon.h" +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXParser.h" + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +AnimationCurve::AnimationCurve(uint64_t id, const ElementPtr element, const std::string &name, const Document & /*doc*/) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + const ElementPtr KeyTime = GetRequiredElement(sc, "KeyTime"); + const ElementPtr KeyValueFloat = GetRequiredElement(sc, "KeyValueFloat"); + + // note preserved keys and values for legacy FBXConverter.cpp + // we can remove this once the animation system is written + // and clean up this code so we do not have copies everywhere. + ParseVectorDataArray(keys, KeyTime); + ParseVectorDataArray(values, KeyValueFloat); + + if (keys.size() != values.size()) { + DOMError("the number of key times does not match the number of keyframe values", KeyTime); + } + + // put the two lists into the map, underlying container is really just a dictionary + // these will always match, if not an error will throw and the file will not import + // this is useful because we then can report something and fix this later if it becomes an issue + // at this point we do not need a different count of these elements so this makes the + // most sense to do. + for (size_t x = 0; x < keys.size(); x++) { + keyvalues[keys[x]] = values[x]; + } + + const ElementPtr KeyAttrDataFloat = sc->GetElement("KeyAttrDataFloat"); + if (KeyAttrDataFloat) { + ParseVectorDataArray(attributes, KeyAttrDataFloat); + } + + const ElementPtr KeyAttrFlags = sc->GetElement("KeyAttrFlags"); + if (KeyAttrFlags) { + ParseVectorDataArray(flags, KeyAttrFlags); + } +} + +// ------------------------------------------------------------------------------------------------ +AnimationCurve::~AnimationCurve() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +AnimationCurveNode::AnimationCurveNode(uint64_t id, const ElementPtr element, const std::string &name, + const Document &doc, const char *const *target_prop_whitelist /*= NULL*/, + size_t whitelist_size /*= 0*/) : + Object(id, element, name), target(), doc(doc) { + const ScopePtr sc = GetRequiredScope(element); + + // find target node + const char *whitelist[] = { "Model", "NodeAttribute", "Deformer" }; + const std::vector<const Connection *> &conns = doc.GetConnectionsBySourceSequenced(ID(), whitelist, 3); + + for (const Connection *con : conns) { + // link should go for a property + if (!con->PropertyName().length()) { + continue; + } + + Object *object = con->DestinationObject(); + + if (!object) { + DOMWarning("failed to read destination object for AnimationCurveNode->Model link, ignoring", element); + continue; + } + + target = object; + prop = con->PropertyName(); + break; + } + + props = GetPropertyTable(doc, "AnimationCurveNode.FbxAnimCurveNode", element, sc, false); +} + +// ------------------------------------------------------------------------------------------------ +AnimationCurveNode::~AnimationCurveNode() { + curves.clear(); +} + +// ------------------------------------------------------------------------------------------------ +const AnimationMap &AnimationCurveNode::Curves() const { + /* Lazy loaded animation curves, will only load if required */ + if (curves.empty()) { + // resolve attached animation curves + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "AnimationCurve"); + + for (const Connection *con : conns) { + // So the advantage of having this STL boilerplate is that it's dead simple once you get it. + // The other advantage is casting is guaranteed to be safe and nullptr will be returned in the last step if it fails. + Object *ob = con->SourceObject(); + AnimationCurve *anim_curve = dynamic_cast<AnimationCurve *>(ob); + ERR_CONTINUE_MSG(!anim_curve, "Failed to convert animation curve from object"); + + curves.insert(std::make_pair(con->PropertyName(), anim_curve)); + } + } + + return curves; +} + +// ------------------------------------------------------------------------------------------------ +AnimationLayer::AnimationLayer(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Object(id, element, name), doc(doc) { + const ScopePtr sc = GetRequiredScope(element); + + // note: the props table here bears little importance and is usually absent + props = GetPropertyTable(doc, "AnimationLayer.FbxAnimLayer", element, sc, true); +} + +// ------------------------------------------------------------------------------------------------ +AnimationLayer::~AnimationLayer() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +const AnimationCurveNodeList AnimationLayer::Nodes(const char *const *target_prop_whitelist, + size_t whitelist_size /*= 0*/) const { + AnimationCurveNodeList nodes; + + // resolve attached animation nodes + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "AnimationCurveNode"); + nodes.reserve(conns.size()); + + for (const Connection *con : conns) { + // link should not go to a property + if (con->PropertyName().length()) { + continue; + } + + Object *ob = con->SourceObject(); + + if (!ob) { + DOMWarning("failed to read source object for AnimationCurveNode->AnimationLayer link, ignoring", element); + continue; + } + + const AnimationCurveNode *anim = dynamic_cast<AnimationCurveNode *>(ob); + if (!anim) { + DOMWarning("source object for ->AnimationLayer link is not an AnimationCurveNode", element); + continue; + } + + if (target_prop_whitelist) { + const char *s = anim->TargetProperty().c_str(); + bool ok = false; + for (size_t i = 0; i < whitelist_size; ++i) { + if (!strcmp(s, target_prop_whitelist[i])) { + ok = true; + break; + } + } + if (!ok) { + continue; + } + } + nodes.push_back(anim); + } + + return nodes; +} + +// ------------------------------------------------------------------------------------------------ +AnimationStack::AnimationStack(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + + // note: we don't currently use any of these properties so we shouldn't bother if it is missing + props = GetPropertyTable(doc, "AnimationStack.FbxAnimStack", element, sc, true); + + // resolve attached animation layers + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "AnimationLayer"); + layers.reserve(conns.size()); + + for (const Connection *con : conns) { + // link should not go to a property + if (con->PropertyName().length()) { + continue; + } + + Object *ob = con->SourceObject(); + if (!ob) { + DOMWarning("failed to read source object for AnimationLayer->AnimationStack link, ignoring", element); + continue; + } + + const AnimationLayer *anim = dynamic_cast<const AnimationLayer *>(ob); + + if (!anim) { + DOMWarning("source object for ->AnimationStack link is not an AnimationLayer", element); + continue; + } + + layers.push_back(anim); + } +} + +// ------------------------------------------------------------------------------------------------ +AnimationStack::~AnimationStack() { + if (props != nullptr) { + delete props; + props = nullptr; + } +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXBinaryTokenizer.cpp b/modules/fbx/fbx_parser/FBXBinaryTokenizer.cpp new file mode 100644 index 0000000000..0fc66b13cd --- /dev/null +++ b/modules/fbx/fbx_parser/FBXBinaryTokenizer.cpp @@ -0,0 +1,467 @@ +/*************************************************************************/ +/* FBXBinaryTokenizer.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ +/** @file FBXBinaryTokenizer.cpp + * @brief Implementation of a fake lexer for binary fbx files - + * we emit tokens so the parser needs almost no special handling + * for binary files. + */ + +#include "ByteSwapper.h" +#include "FBXTokenizer.h" +#include "core/string/print_string.h" + +#include <stdint.h> + +namespace FBXDocParser { +//enum Flag +//{ +// e_unknown_0 = 1 << 0, +// e_unknown_1 = 1 << 1, +// e_unknown_2 = 1 << 2, +// e_unknown_3 = 1 << 3, +// e_unknown_4 = 1 << 4, +// e_unknown_5 = 1 << 5, +// e_unknown_6 = 1 << 6, +// e_unknown_7 = 1 << 7, +// e_unknown_8 = 1 << 8, +// e_unknown_9 = 1 << 9, +// e_unknown_10 = 1 << 10, +// e_unknown_11 = 1 << 11, +// e_unknown_12 = 1 << 12, +// e_unknown_13 = 1 << 13, +// e_unknown_14 = 1 << 14, +// e_unknown_15 = 1 << 15, +// e_unknown_16 = 1 << 16, +// e_unknown_17 = 1 << 17, +// e_unknown_18 = 1 << 18, +// e_unknown_19 = 1 << 19, +// e_unknown_20 = 1 << 20, +// e_unknown_21 = 1 << 21, +// e_unknown_22 = 1 << 22, +// e_unknown_23 = 1 << 23, +// e_flag_field_size_64_bit = 1 << 24, // Not sure what is +// e_unknown_25 = 1 << 25, +// e_unknown_26 = 1 << 26, +// e_unknown_27 = 1 << 27, +// e_unknown_28 = 1 << 28, +// e_unknown_29 = 1 << 29, +// e_unknown_30 = 1 << 30, +// e_unknown_31 = 1 << 31 +//}; +// +//bool check_flag(uint32_t flags, Flag to_check) +//{ +// return (flags & to_check) != 0; +//} +// ------------------------------------------------------------------------------------------------ +Token::Token(const char *sbegin, const char *send, TokenType type, size_t offset) : + sbegin(sbegin), + send(send), + type(type), + line(offset), + column(BINARY_MARKER) { +#ifdef DEBUG_ENABLED + contents = std::string(sbegin, static_cast<size_t>(send - sbegin)); +#endif + // calc length + // measure from sBegin to sEnd and validate? +} + +namespace { + +// ------------------------------------------------------------------------------------------------ +// signal tokenization error +void TokenizeError(const std::string &message, size_t offset) { + print_error("[FBX-Tokenize] " + String(message.c_str()) + ", offset " + itos(offset)); +} + +// ------------------------------------------------------------------------------------------------ +size_t Offset(const char *begin, const char *cursor) { + //ai_assert(begin <= cursor); + + return cursor - begin; +} + +// ------------------------------------------------------------------------------------------------ +void TokenizeError(const std::string &message, const char *begin, const char *cursor) { + TokenizeError(message, Offset(begin, cursor)); +} + +// ------------------------------------------------------------------------------------------------ +uint32_t ReadWord(const char *input, const char *&cursor, const char *end) { + const size_t k_to_read = sizeof(uint32_t); + if (Offset(cursor, end) < k_to_read) { + TokenizeError("cannot ReadWord, out of bounds", input, cursor); + } + + uint32_t word; + ::memcpy(&word, cursor, 4); + AI_SWAP4(word); + + cursor += k_to_read; + + return word; +} + +// ------------------------------------------------------------------------------------------------ +uint64_t ReadDoubleWord(const char *input, const char *&cursor, const char *end) { + const size_t k_to_read = sizeof(uint64_t); + if (Offset(cursor, end) < k_to_read) { + TokenizeError("cannot ReadDoubleWord, out of bounds", input, cursor); + } + + uint64_t dword /*= *reinterpret_cast<const uint64_t*>(cursor)*/; + ::memcpy(&dword, cursor, sizeof(uint64_t)); + AI_SWAP8(dword); + + cursor += k_to_read; + + return dword; +} + +// ------------------------------------------------------------------------------------------------ +uint8_t ReadByte(const char *input, const char *&cursor, const char *end) { + if (Offset(cursor, end) < sizeof(uint8_t)) { + TokenizeError("cannot ReadByte, out of bounds", input, cursor); + } + + uint8_t word; /* = *reinterpret_cast< const uint8_t* >( cursor )*/ + ::memcpy(&word, cursor, sizeof(uint8_t)); + ++cursor; + + return word; +} + +// ------------------------------------------------------------------------------------------------ +unsigned int ReadString(const char *&sbegin_out, const char *&send_out, const char *input, + const char *&cursor, const char *end, bool long_length = false, bool allow_null = false) { + const uint32_t len_len = long_length ? 4 : 1; + if (Offset(cursor, end) < len_len) { + TokenizeError("cannot ReadString, out of bounds reading length", input, cursor); + } + + const uint32_t length = long_length ? ReadWord(input, cursor, end) : ReadByte(input, cursor, end); + + if (Offset(cursor, end) < length) { + TokenizeError("cannot ReadString, length is out of bounds", input, cursor); + } + + sbegin_out = cursor; + cursor += length; + + send_out = cursor; + + if (!allow_null) { + for (unsigned int i = 0; i < length; ++i) { + if (sbegin_out[i] == '\0') { + TokenizeError("failed ReadString, unexpected NUL character in string", input, cursor); + } + } + } + + return length; +} + +// ------------------------------------------------------------------------------------------------ +void ReadData(const char *&sbegin_out, const char *&send_out, const char *input, const char *&cursor, const char *end) { + if (Offset(cursor, end) < 1) { + TokenizeError("cannot ReadData, out of bounds reading length", input, cursor); + } + + const char type = *cursor; + sbegin_out = cursor++; + + switch (type) { + // 16 bit int + case 'Y': + cursor += 2; + break; + + // 1 bit bool flag (yes/no) + case 'C': + cursor += 1; + break; + + // 32 bit int + case 'I': + // <- fall through + + // float + case 'F': + cursor += 4; + break; + + // double + case 'D': + cursor += 8; + break; + + // 64 bit int + case 'L': + cursor += 8; + break; + + // note: do not write cursor += ReadWord(...cursor) as this would be UB + + // raw binary data + case 'R': { + const uint32_t length = ReadWord(input, cursor, end); + cursor += length; + break; + } + + case 'b': + // TODO: what is the 'b' type code? Right now we just skip over it / + // take the full range we could get + cursor = end; + break; + + // array of * + case 'f': + case 'd': + case 'l': + case 'i': + case 'c': { + const uint32_t length = ReadWord(input, cursor, end); + const uint32_t encoding = ReadWord(input, cursor, end); + + const uint32_t comp_len = ReadWord(input, cursor, end); + + // compute length based on type and check against the stored value + if (encoding == 0) { + uint32_t stride = 0; + switch (type) { + case 'f': + case 'i': + stride = 4; + break; + + case 'd': + case 'l': + stride = 8; + break; + + case 'c': + stride = 1; + break; + + default: + break; + }; + //ai_assert(stride > 0); + if (length * stride != comp_len) { + TokenizeError("cannot ReadData, calculated data stride differs from what the file claims", input, cursor); + } + } + // zip/deflate algorithm (encoding==1)? take given length. anything else? die + else if (encoding != 1) { + TokenizeError("cannot ReadData, unknown encoding", input, cursor); + } + cursor += comp_len; + break; + } + + // string + case 'S': { + const char *sb, *se; + // 0 characters can legally happen in such strings + ReadString(sb, se, input, cursor, end, true, true); + break; + } + default: + TokenizeError("cannot ReadData, unexpected type code: " + std::string(&type, 1), input, cursor); + } + + if (cursor > end) { + TokenizeError("cannot ReadData, the remaining size is too small for the data type: " + std::string(&type, 1), input, cursor); + } + + // the type code is contained in the returned range + send_out = cursor; +} + +// ------------------------------------------------------------------------------------------------ +bool ReadScope(TokenList &output_tokens, const char *input, const char *&cursor, const char *end, bool const is64bits) { + // the first word contains the offset at which this block ends + const uint64_t end_offset = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); + + // we may get 0 if reading reached the end of the file - + // fbx files have a mysterious extra footer which I don't know + // how to extract any information from, but at least it always + // starts with a 0. + if (!end_offset) { + return false; + } + + if (end_offset > Offset(input, end)) { + TokenizeError("block offset is out of range", input, cursor); + } else if (end_offset < Offset(input, cursor)) { + TokenizeError("block offset is negative out of range", input, cursor); + } + + // the second data word contains the number of properties in the scope + const uint64_t prop_count = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); + + // the third data word contains the length of the property list + const uint64_t prop_length = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); + + // now comes the name of the scope/key + const char *sbeg, *send; + ReadString(sbeg, send, input, cursor, end); + + output_tokens.push_back(new_Token(sbeg, send, TokenType_KEY, Offset(input, cursor))); + + // now come the individual properties + const char *begin_cursor = cursor; + for (unsigned int i = 0; i < prop_count; ++i) { + ReadData(sbeg, send, input, cursor, begin_cursor + prop_length); + + output_tokens.push_back(new_Token(sbeg, send, TokenType_DATA, Offset(input, cursor))); + + if (i != prop_count - 1) { + output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_COMMA, Offset(input, cursor))); + } + } + + if (Offset(begin_cursor, cursor) != prop_length) { + TokenizeError("property length not reached, something is wrong", input, cursor); + } + + // at the end of each nested block, there is a NUL record to indicate + // that the sub-scope exists (i.e. to distinguish between P: and P : {}) + // this NUL record is 13 bytes long on 32 bit version and 25 bytes long on 64 bit. + const size_t sentinel_block_length = is64bits ? (sizeof(uint64_t) * 3 + 1) : (sizeof(uint32_t) * 3 + 1); + + if (Offset(input, cursor) < end_offset) { + if (end_offset - Offset(input, cursor) < sentinel_block_length) { + TokenizeError("insufficient padding bytes at block end", input, cursor); + } + + output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_OPEN_BRACKET, Offset(input, cursor))); + + // XXX this is vulnerable to stack overflowing .. + while (Offset(input, cursor) < end_offset - sentinel_block_length) { + ReadScope(output_tokens, input, cursor, input + end_offset - sentinel_block_length, is64bits); + } + output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_CLOSE_BRACKET, Offset(input, cursor))); + + for (unsigned int i = 0; i < sentinel_block_length; ++i) { + if (cursor[i] != '\0') { + TokenizeError("failed to read nested block sentinel, expected all bytes to be 0", input, cursor); + } + } + cursor += sentinel_block_length; + } + + if (Offset(input, cursor) != end_offset) { + TokenizeError("scope length not reached, something is wrong", input, cursor); + } + + return true; +} +} // anonymous namespace + +// ------------------------------------------------------------------------------------------------ +// TODO: Test FBX Binary files newer than the 7500 version to check if the 64 bits address behaviour is consistent +void TokenizeBinary(TokenList &output_tokens, const char *input, size_t length) { + if (length < 0x1b) { + //TokenizeError("file is too short",0); + } + + //uint32_t offset = 0x15; + /* const char* cursor = input + 0x15; + const uint32_t flags = ReadWord(input, cursor, input + length); + const uint8_t padding_0 = ReadByte(input, cursor, input + length); // unused + const uint8_t padding_1 = ReadByte(input, cursor, input + length); // unused*/ + + if (strncmp(input, "Kaydara FBX Binary", 18)) { + TokenizeError("magic bytes not found", 0); + } + + const char *cursor = input + 18; + /*Result ignored*/ ReadByte(input, cursor, input + length); + /*Result ignored*/ ReadByte(input, cursor, input + length); + /*Result ignored*/ ReadByte(input, cursor, input + length); + /*Result ignored*/ ReadByte(input, cursor, input + length); + /*Result ignored*/ ReadByte(input, cursor, input + length); + const uint32_t version = ReadWord(input, cursor, input + length); + print_verbose("FBX Version: " + itos(version)); + //ASSIMP_LOG_DEBUG_F("FBX version: ", version); + const bool is64bits = version >= 7500; + const char *end = input + length; + while (cursor < end) { + if (!ReadScope(output_tokens, input, cursor, input + length, is64bits)) { + break; + } + } +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXCommon.h b/modules/fbx/fbx_parser/FBXCommon.h new file mode 100644 index 0000000000..98a3d78f0d --- /dev/null +++ b/modules/fbx/fbx_parser/FBXCommon.h @@ -0,0 +1,110 @@ +/*************************************************************************/ +/* FBXCommon.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above +copyright notice, this list of conditions and the +following disclaimer. + +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the +following disclaimer in the documentation and/or other +materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its +contributors may be used to endorse or promote products +derived from this software without specific prior +written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXCommon.h +* Some useful constants and enums for dealing with FBX files. +*/ +#ifndef FBX_COMMON_H +#define FBX_COMMON_H + +#include <string> + +namespace FBXDocParser { +const std::string NULL_RECORD = { // 13 null bytes + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0' +}; // who knows why +const std::string SEPARATOR = { '\x00', '\x01' }; // for use inside strings +const std::string MAGIC_NODE_TAG = "_$AssimpFbx$"; // from import +const int64_t SECOND = 46186158000; // FBX's kTime unit + +// rotation order. We'll probably use EulerXYZ for everything +enum RotOrder { + RotOrder_EulerXYZ = 0, + RotOrder_EulerXZY, + RotOrder_EulerYZX, + RotOrder_EulerYXZ, + RotOrder_EulerZXY, + RotOrder_EulerZYX, + + RotOrder_SphericXYZ, + + RotOrder_MAX // end-of-enum sentinel +}; + +enum TransformInheritance { + Transform_RrSs = 0, + Transform_RSrs = 1, + Transform_Rrs = 2, + TransformInheritance_MAX // end-of-enum sentinel +}; +} // namespace FBXDocParser + +#endif // FBX_COMMON_H diff --git a/modules/fbx/fbx_parser/FBXDeformer.cpp b/modules/fbx/fbx_parser/FBXDeformer.cpp new file mode 100644 index 0000000000..325641bc32 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXDeformer.cpp @@ -0,0 +1,279 @@ +/*************************************************************************/ +/* FBXDeformer.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXNoteAttribute.cpp + * @brief Assimp::FBX::NodeAttribute (and subclasses) implementation + */ + +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXMeshGeometry.h" +#include "FBXParser.h" +#include "core/math/math_funcs.h" +#include "core/math/transform.h" + +#include <iostream> + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +Deformer::Deformer(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + + const std::string &classname = ParseTokenAsString(GetRequiredToken(element, 2)); + props = GetPropertyTable(doc, "Deformer.Fbx" + classname, element, sc, true); +} + +// ------------------------------------------------------------------------------------------------ +Deformer::~Deformer() { +} + +Constraint::Constraint(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + const std::string &classname = ParseTokenAsString(GetRequiredToken(element, 2)); + // used something.fbx as this is a cache name. + props = GetPropertyTable(doc, "Something.Fbx" + classname, element, sc, true); +} + +Constraint::~Constraint() { +} + +// ------------------------------------------------------------------------------------------------ +Cluster::Cluster(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Deformer(id, element, doc, name), valid_transformAssociateModel(false) { + const ScopePtr sc = GetRequiredScope(element); + // for( auto element : sc.Elements()) + // { + // std::cout << "cluster element: " << element.first << std::endl; + // } + // + // element: Indexes + // element: Transform + // element: TransformAssociateModel + // element: TransformLink + // element: UserData + // element: Version + // element: Weights + + const ElementPtr Indexes = sc->GetElement("Indexes"); + const ElementPtr Weights = sc->GetElement("Weights"); + + const ElementPtr TransformAssociateModel = sc->GetElement("TransformAssociateModel"); + if (TransformAssociateModel != nullptr) { + //Transform t = ReadMatrix(*TransformAssociateModel); + link_mode = SkinLinkMode_Additive; + valid_transformAssociateModel = true; + } else { + link_mode = SkinLinkMode_Normalized; + valid_transformAssociateModel = false; + } + + const ElementPtr Transform = GetRequiredElement(sc, "Transform", element); + const ElementPtr TransformLink = GetRequiredElement(sc, "TransformLink", element); + + // todo: check if we need this + //const Element& TransformAssociateModel = GetRequiredElement(sc, "TransformAssociateModel", &element); + + transform = ReadMatrix(Transform); + transformLink = ReadMatrix(TransformLink); + + // it is actually possible that there be Deformer's with no weights + if (!!Indexes != !!Weights) { + DOMError("either Indexes or Weights are missing from Cluster", element); + } + + if (Indexes) { + ParseVectorDataArray(indices, Indexes); + ParseVectorDataArray(weights, Weights); + } + + if (indices.size() != weights.size()) { + DOMError("sizes of index and weight array don't match up", element); + } + + // read assigned node + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "Model"); + for (const Connection *con : conns) { + const Model *mod = ProcessSimpleConnection<Model>(*con, false, "Model -> Cluster", element); + if (mod) { + node = mod; + break; + } + } + + if (!node) { + DOMError("failed to read target Node for Cluster", element); + node = nullptr; + } +} + +// ------------------------------------------------------------------------------------------------ +Cluster::~Cluster() { +} + +// ------------------------------------------------------------------------------------------------ +Skin::Skin(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Deformer(id, element, doc, name), accuracy(0.0f) { + const ScopePtr sc = GetRequiredScope(element); + + // keep this it is used for debugging and any FBX format changes + // for (auto element : sc.Elements()) { + // std::cout << "skin element: " << element.first << std::endl; + // } + + const ElementPtr Link_DeformAcuracy = sc->GetElement("Link_DeformAcuracy"); + if (Link_DeformAcuracy) { + accuracy = ParseTokenAsFloat(GetRequiredToken(Link_DeformAcuracy, 0)); + } + + const ElementPtr SkinType = sc->GetElement("SkinningType"); + + if (SkinType) { + std::string skin_type = ParseTokenAsString(GetRequiredToken(SkinType, 0)); + + if (skin_type == "Linear") { + skinType = Skin_Linear; + } else if (skin_type == "Rigid") { + skinType = Skin_Rigid; + } else if (skin_type == "DualQuaternion") { + skinType = Skin_DualQuaternion; + } else if (skin_type == "Blend") { + skinType = Skin_Blend; + } else { + print_error("[doc:skin] could not find valid skin type: " + String(skin_type.c_str())); + } + } + + // resolve assigned clusters + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "Deformer"); + + // + + clusters.reserve(conns.size()); + for (const Connection *con : conns) { + const Cluster *cluster = ProcessSimpleConnection<Cluster>(*con, false, "Cluster -> Skin", element); + if (cluster) { + clusters.push_back(cluster); + continue; + } + } +} + +// ------------------------------------------------------------------------------------------------ +Skin::~Skin() { +} +// ------------------------------------------------------------------------------------------------ +BlendShape::BlendShape(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Deformer(id, element, doc, name) { + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "Deformer"); + blendShapeChannels.reserve(conns.size()); + for (const Connection *con : conns) { + const BlendShapeChannel *bspc = ProcessSimpleConnection<BlendShapeChannel>(*con, false, "BlendShapeChannel -> BlendShape", element); + if (bspc) { + blendShapeChannels.push_back(bspc); + continue; + } + } +} +// ------------------------------------------------------------------------------------------------ +BlendShape::~BlendShape() { +} +// ------------------------------------------------------------------------------------------------ +BlendShapeChannel::BlendShapeChannel(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Deformer(id, element, doc, name) { + const ScopePtr sc = GetRequiredScope(element); + const ElementPtr DeformPercent = sc->GetElement("DeformPercent"); + if (DeformPercent) { + percent = ParseTokenAsFloat(GetRequiredToken(DeformPercent, 0)); + } + const ElementPtr FullWeights = sc->GetElement("FullWeights"); + if (FullWeights) { + ParseVectorDataArray(fullWeights, FullWeights); + } + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "Geometry"); + shapeGeometries.reserve(conns.size()); + for (const Connection *con : conns) { + const ShapeGeometry *const sg = ProcessSimpleConnection<ShapeGeometry>(*con, false, "Shape -> BlendShapeChannel", element); + if (sg) { + shapeGeometries.push_back(sg); + continue; + } + } +} +// ------------------------------------------------------------------------------------------------ +BlendShapeChannel::~BlendShapeChannel() { +} +// ------------------------------------------------------------------------------------------------ +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXDocument.cpp b/modules/fbx/fbx_parser/FBXDocument.cpp new file mode 100644 index 0000000000..74798a655a --- /dev/null +++ b/modules/fbx/fbx_parser/FBXDocument.cpp @@ -0,0 +1,713 @@ +/*************************************************************************/ +/* FBXDocument.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the* + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXDocument.cpp + * @brief Implementation of the FBX DOM classes + */ + +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXImportSettings.h" +#include "FBXMeshGeometry.h" +#include "FBXParser.h" +#include "FBXProperties.h" +#include "FBXUtil.h" + +#include <algorithm> +#include <functional> +#include <iostream> +#include <map> +#include <memory> + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +LazyObject::LazyObject(uint64_t id, const ElementPtr element, const Document &doc) : + doc(doc), element(element), id(id), flags() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +LazyObject::~LazyObject() { + object.reset(); +} + +ObjectPtr LazyObject::LoadObject() { + if (IsBeingConstructed() || FailedToConstruct()) { + return nullptr; + } + + if (object) { + return object.get(); + } + + TokenPtr key = element->KeyToken(); + ERR_FAIL_COND_V(!key, nullptr); + const TokenList &tokens = element->Tokens(); + + if (tokens.size() < 3) { + //DOMError("expected at least 3 tokens: id, name and class tag",&element); + return nullptr; + } + + const char *err = nullptr; + std::string name = ParseTokenAsString(tokens[1], err); + if (err) { + DOMError(err, element); + } + + // small fix for binary reading: binary fbx files don't use + // prefixes such as Model:: in front of their names. The + // loading code expects this at many places, though! + // so convert the binary representation (a 0x0001) to the + // double colon notation. + if (tokens[1]->IsBinary()) { + for (size_t i = 0; i < name.length(); ++i) { + if (name[i] == 0x0 && name[i + 1] == 0x1) { + name = name.substr(i + 2) + "::" + name.substr(0, i); + } + } + } + + const std::string classtag = ParseTokenAsString(tokens[2], err); + if (err) { + DOMError(err, element); + } + + // prevent recursive calls + flags |= BEING_CONSTRUCTED; + + // this needs to be relatively fast since it happens a lot, + // so avoid constructing strings all the time. + const char *obtype = key->begin(); + const size_t length = static_cast<size_t>(key->end() - key->begin()); + + if (!strncmp(obtype, "Pose", length)) { + object.reset(new FbxPose(id, element, doc, name)); + } else if (!strncmp(obtype, "Geometry", length)) { + if (!strcmp(classtag.c_str(), "Mesh")) { + object.reset(new MeshGeometry(id, element, name, doc)); + } + if (!strcmp(classtag.c_str(), "Shape")) { + object.reset(new ShapeGeometry(id, element, name, doc)); + } + if (!strcmp(classtag.c_str(), "Line")) { + object.reset(new LineGeometry(id, element, name, doc)); + } + } else if (!strncmp(obtype, "NodeAttribute", length)) { + if (!strcmp(classtag.c_str(), "Camera")) { + object.reset(new Camera(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "CameraSwitcher")) { + object.reset(new CameraSwitcher(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "Light")) { + object.reset(new Light(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "Null")) { + object.reset(new Null(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "LimbNode")) { + // This is an older format for bones + // this is what blender uses I believe + object.reset(new LimbNode(id, element, doc, name)); + } + } else if (!strncmp(obtype, "Constraint", length)) { + object.reset(new Constraint(id, element, doc, name)); + } else if (!strncmp(obtype, "Deformer", length)) { + if (!strcmp(classtag.c_str(), "Cluster")) { + object.reset(new Cluster(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "Skin")) { + object.reset(new Skin(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "BlendShape")) { + object.reset(new BlendShape(id, element, doc, name)); + } else if (!strcmp(classtag.c_str(), "BlendShapeChannel")) { + object.reset(new BlendShapeChannel(id, element, doc, name)); + } + } else if (!strncmp(obtype, "Model", length)) { + // Model is normal node + + // LimbNode model is a 'bone' node. + if (!strcmp(classtag.c_str(), "LimbNode")) { + object.reset(new ModelLimbNode(id, element, doc, name)); + + } else if (strcmp(classtag.c_str(), "IKEffector") && strcmp(classtag.c_str(), "FKEffector")) { + // FK and IK effectors are not supporte + object.reset(new Model(id, element, doc, name)); + } + } else if (!strncmp(obtype, "Material", length)) { + object.reset(new Material(id, element, doc, name)); + } else if (!strncmp(obtype, "Texture", length)) { + object.reset(new Texture(id, element, doc, name)); + } else if (!strncmp(obtype, "LayeredTexture", length)) { + object.reset(new LayeredTexture(id, element, doc, name)); + } else if (!strncmp(obtype, "Video", length)) { + object.reset(new Video(id, element, doc, name)); + } else if (!strncmp(obtype, "AnimationStack", length)) { + object.reset(new AnimationStack(id, element, name, doc)); + } else if (!strncmp(obtype, "AnimationLayer", length)) { + object.reset(new AnimationLayer(id, element, name, doc)); + } else if (!strncmp(obtype, "AnimationCurve", length)) { + object.reset(new AnimationCurve(id, element, name, doc)); + } else if (!strncmp(obtype, "AnimationCurveNode", length)) { + object.reset(new AnimationCurveNode(id, element, name, doc)); + } else { + ERR_FAIL_V_MSG(nullptr, "FBX contains unsupported object: " + String(obtype)); + } + + flags &= ~BEING_CONSTRUCTED; + + return object.get(); +} + +// ------------------------------------------------------------------------------------------------ +Object::Object(uint64_t id, const ElementPtr element, const std::string &name) : + element(element), name(name), id(id) { +} + +// ------------------------------------------------------------------------------------------------ +Object::~Object() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +FileGlobalSettings::FileGlobalSettings(const Document &doc, const PropertyTable *props) : + props(props), doc(doc) { + // empty +} + +// ------------------------------------------------------------------------------------------------ +FileGlobalSettings::~FileGlobalSettings() { + if (props != nullptr) { + delete props; + props = nullptr; + } +} + +// ------------------------------------------------------------------------------------------------ +Document::Document(const Parser &parser, const ImportSettings &settings) : + settings(settings), parser(parser), SafeToImport(false) { + // Cannot use array default initialization syntax because vc8 fails on it + for (unsigned int &timeStamp : creationTimeStamp) { + timeStamp = 0; + } + + // we must check if we can read the header version safely, if its outdated then drop it. + if (ReadHeader()) { + SafeToImport = true; + ReadPropertyTemplates(); + + ReadGlobalSettings(); + + // This order is important, connections need parsed objects to check + // whether connections are ok or not. Objects may not be evaluated yet, + // though, since this may require valid connections. + ReadObjects(); + ReadConnections(); + } +} + +// ------------------------------------------------------------------------------------------------ +Document::~Document() { + for (PropertyTemplateMap::value_type v : templates) { + delete v.second; + } + + for (ObjectMap::value_type &v : objects) { + delete v.second; + } + + for (ConnectionMap::value_type &v : src_connections) { + delete v.second; + } + + if (metadata_properties != nullptr) { + delete metadata_properties; + } + // clear globals import pointer + globals.reset(); +} + +// ------------------------------------------------------------------------------------------------ +static const unsigned int LowerSupportedVersion = 7300; +static const unsigned int UpperSupportedVersion = 7700; + +bool Document::ReadHeader() { + // Read ID objects from "Objects" section + ScopePtr sc = parser.GetRootScope(); + ElementPtr ehead = sc->GetElement("FBXHeaderExtension"); + if (!ehead || !ehead->Compound()) { + DOMError("no FBXHeaderExtension dictionary found"); + } + + const ScopePtr shead = ehead->Compound(); + fbxVersion = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(shead, "FBXVersion", ehead), 0)); + + // While we may have some success with newer files, we don't support + // the older 6.n fbx format + if (fbxVersion < LowerSupportedVersion) { + DOMWarning("unsupported, old format version, FBX 2015-2020, you must re-export in a more modern version of your original modelling application"); + return false; + } + if (fbxVersion > UpperSupportedVersion) { + DOMWarning("unsupported, newer format version, supported are only FBX 2015, up to FBX 2020" + " trying to read it nevertheless"); + } + + const ElementPtr ecreator = shead->GetElement("Creator"); + if (ecreator) { + creator = ParseTokenAsString(GetRequiredToken(ecreator, 0)); + } + + // + // Scene Info + // + + const ElementPtr scene_info = shead->GetElement("SceneInfo"); + + if (scene_info) { + PropertyTable *fileExportProps = const_cast<PropertyTable *>(GetPropertyTable(*this, "", scene_info, scene_info->Compound(), true)); + + if (fileExportProps) { + metadata_properties = fileExportProps; + } + } + + const ElementPtr etimestamp = shead->GetElement("CreationTimeStamp"); + if (etimestamp && etimestamp->Compound()) { + const ScopePtr stimestamp = etimestamp->Compound(); + creationTimeStamp[0] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Year"), 0)); + creationTimeStamp[1] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Month"), 0)); + creationTimeStamp[2] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Day"), 0)); + creationTimeStamp[3] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Hour"), 0)); + creationTimeStamp[4] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Minute"), 0)); + creationTimeStamp[5] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Second"), 0)); + creationTimeStamp[6] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp, "Millisecond"), 0)); + } + + return true; +} + +// ------------------------------------------------------------------------------------------------ +void Document::ReadGlobalSettings() { + ERR_FAIL_COND_MSG(globals != nullptr, "Global settings is already setup this is a serious error and should be reported"); + + const ScopePtr sc = parser.GetRootScope(); + const ElementPtr ehead = sc->GetElement("GlobalSettings"); + if (nullptr == ehead || !ehead->Compound()) { + DOMWarning("no GlobalSettings dictionary found"); + globals = std::make_shared<FileGlobalSettings>(*this, new PropertyTable()); + return; + } + + const PropertyTable *props = GetPropertyTable(*this, "", ehead, ehead->Compound(), true); + + //double v = PropertyGet<float>( *props, std::string("UnitScaleFactor"), 1.0 ); + + if (!props) { + DOMError("GlobalSettings dictionary contains no property table"); + } + + globals = std::make_shared<FileGlobalSettings>(*this, props); +} + +// ------------------------------------------------------------------------------------------------ +void Document::ReadObjects() { + // read ID objects from "Objects" section + const ScopePtr sc = parser.GetRootScope(); + const ElementPtr eobjects = sc->GetElement("Objects"); + if (!eobjects || !eobjects->Compound()) { + DOMError("no Objects dictionary found"); + } + + // add a dummy entry to represent the Model::RootNode object (id 0), + // which is only indirectly defined in the input file + objects[0] = new LazyObject(0L, eobjects, *this); + + const ScopePtr sobjects = eobjects->Compound(); + for (const ElementMap::value_type &iter : sobjects->Elements()) { + // extract ID + const TokenList &tok = iter.second->Tokens(); + + if (tok.empty()) { + DOMError("expected ID after object key", iter.second); + } + + const char *err; + const uint64_t id = ParseTokenAsID(tok[0], err); + if (err) { + DOMError(err, iter.second); + } + + // id=0 is normally implicit + if (id == 0L) { + DOMError("encountered object with implicitly defined id 0", iter.second); + } + + if (objects.find(id) != objects.end()) { + DOMWarning("encountered duplicate object id, ignoring first occurrence", iter.second); + } + + objects[id] = new LazyObject(id, iter.second, *this); + + // grab all animation stacks upfront since there is no listing of them + if (!strcmp(iter.first.c_str(), "AnimationStack")) { + animationStacks.push_back(id); + } else if (!strcmp(iter.first.c_str(), "Constraint")) { + constraints.push_back(id); + } else if (!strcmp(iter.first.c_str(), "Pose")) { + bind_poses.push_back(id); + } else if (!strcmp(iter.first.c_str(), "Material")) { + materials.push_back(id); + } else if (!strcmp(iter.first.c_str(), "Deformer")) { + TokenPtr key = iter.second->KeyToken(); + ERR_CONTINUE_MSG(!key, "[parser bug] invalid token key for deformer"); + const TokenList &tokens = iter.second->Tokens(); + const std::string class_tag = ParseTokenAsString(tokens[2], err); + + if (err) { + DOMError(err, iter.second); + } + + if (class_tag == "Skin") { + //print_verbose("registered skin:" + itos(id)); + skins.push_back(id); + } + } + } +} + +// ------------------------------------------------------------------------------------------------ +void Document::ReadPropertyTemplates() { + const ScopePtr sc = parser.GetRootScope(); + // read property templates from "Definitions" section + const ElementPtr edefs = sc->GetElement("Definitions"); + if (!edefs || !edefs->Compound()) { + DOMWarning("no Definitions dictionary found"); + return; + } + + const ScopePtr sdefs = edefs->Compound(); + const ElementCollection otypes = sdefs->GetCollection("ObjectType"); + for (ElementMap::const_iterator it = otypes.first; it != otypes.second; ++it) { + const ElementPtr el = (*it).second; + const ScopePtr sc_2 = el->Compound(); + if (!sc_2) { + DOMWarning("expected nested scope in ObjectType, ignoring", el); + continue; + } + + const TokenList &tok = el->Tokens(); + if (tok.empty()) { + DOMWarning("expected name for ObjectType element, ignoring", el); + continue; + } + + const std::string &oname = ParseTokenAsString(tok[0]); + + const ElementCollection templs = sc_2->GetCollection("PropertyTemplate"); + for (ElementMap::const_iterator iter = templs.first; iter != templs.second; ++iter) { + const ElementPtr el_2 = (*iter).second; + const ScopePtr sc_3 = el_2->Compound(); + if (!sc_3) { + DOMWarning("expected nested scope in PropertyTemplate, ignoring", el); + continue; + } + + const TokenList &tok_2 = el_2->Tokens(); + if (tok_2.empty()) { + DOMWarning("expected name for PropertyTemplate element, ignoring", el); + continue; + } + + const std::string &pname = ParseTokenAsString(tok_2[0]); + + const ElementPtr Properties70 = sc_3->GetElement("Properties70"); + if (Properties70) { + // PropertyTable(const ElementPtr element, const PropertyTable* templateProps); + const PropertyTable *props = new PropertyTable(Properties70, nullptr); + + templates[oname + "." + pname] = props; + } + } + } +} + +// ------------------------------------------------------------------------------------------------ +void Document::ReadConnections() { + const ScopePtr sc = parser.GetRootScope(); + + // read property templates from "Definitions" section + const ElementPtr econns = sc->GetElement("Connections"); + if (!econns || !econns->Compound()) { + DOMError("no Connections dictionary found"); + } + + uint64_t insertionOrder = 0l; + const ScopePtr sconns = econns->Compound(); + const ElementCollection conns = sconns->GetCollection("C"); + for (ElementMap::const_iterator it = conns.first; it != conns.second; ++it) { + const ElementPtr el = (*it).second; + const std::string &type = ParseTokenAsString(GetRequiredToken(el, 0)); + + // PP = property-property connection, ignored for now + // (tokens: "PP", ID1, "Property1", ID2, "Property2") + if (type == "PP") { + continue; + } + + const uint64_t src = ParseTokenAsID(GetRequiredToken(el, 1)); + const uint64_t dest = ParseTokenAsID(GetRequiredToken(el, 2)); + + // OO = object-object connection + // OP = object-property connection, in which case the destination property follows the object ID + const std::string &prop = (type == "OP" ? ParseTokenAsString(GetRequiredToken(el, 3)) : ""); + + if (objects.find(src) == objects.end()) { + DOMWarning("source object for connection does not exist", el); + continue; + } + + // dest may be 0 (root node) but we added a dummy object before + if (objects.find(dest) == objects.end()) { + DOMWarning("destination object for connection does not exist", el); + continue; + } + + // add new connection + const Connection *const c = new Connection(insertionOrder++, src, dest, prop, *this); + src_connections.insert(ConnectionMap::value_type(src, c)); + dest_connections.insert(ConnectionMap::value_type(dest, c)); + } +} + +// ------------------------------------------------------------------------------------------------ +const std::vector<const AnimationStack *> &Document::AnimationStacks() const { + if (!animationStacksResolved.empty() || animationStacks.empty()) { + return animationStacksResolved; + } + + animationStacksResolved.reserve(animationStacks.size()); + for (uint64_t id : animationStacks) { + LazyObject *lazy = GetObject(id); + + // Two things happen here: + // We cast internally an Object PTR to an Animation Stack PTR + // We return invalid weak_ptrs for objects which are invalid + + const AnimationStack *stack = lazy->Get<AnimationStack>(); + ERR_CONTINUE_MSG(!stack, "invalid ptr to AnimationStack - conversion failure"); + + // We push back the weak reference :) to keep things simple, as ownership is on the parser side so it wont be cleaned up. + animationStacksResolved.push_back(stack); + } + + return animationStacksResolved; +} + +// ------------------------------------------------------------------------------------------------ +LazyObject *Document::GetObject(uint64_t id) const { + ObjectMap::const_iterator it = objects.find(id); + return it == objects.end() ? nullptr : (*it).second; +} + +#define MAX_CLASSNAMES 6 + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsSequenced(uint64_t id, const ConnectionMap &conns) const { + std::vector<const Connection *> temp; + + const std::pair<ConnectionMap::const_iterator, ConnectionMap::const_iterator> range = + conns.equal_range(id); + + temp.reserve(std::distance(range.first, range.second)); + for (ConnectionMap::const_iterator it = range.first; it != range.second; ++it) { + temp.push_back((*it).second); + } + + std::sort(temp.begin(), temp.end(), std::mem_fn(&Connection::Compare)); + + return temp; // NRVO should handle this +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsSequenced(uint64_t id, bool is_src, + const ConnectionMap &conns, + const char *const *classnames, + size_t count) const + +{ + size_t lengths[MAX_CLASSNAMES]; + + const size_t c = count; + for (size_t i = 0; i < c; ++i) { + lengths[i] = strlen(classnames[i]); + } + + std::vector<const Connection *> temp; + const std::pair<ConnectionMap::const_iterator, ConnectionMap::const_iterator> range = + conns.equal_range(id); + + temp.reserve(std::distance(range.first, range.second)); + for (ConnectionMap::const_iterator it = range.first; it != range.second; ++it) { + TokenPtr key = (is_src ? (*it).second->LazyDestinationObject() : (*it).second->LazySourceObject())->GetElement()->KeyToken(); + + const char *obtype = key->begin(); + + for (size_t i = 0; i < c; ++i) { + //ai_assert(classnames[i]); + if (static_cast<size_t>(std::distance(key->begin(), key->end())) == lengths[i] && !strncmp(classnames[i], obtype, lengths[i])) { + obtype = nullptr; + break; + } + } + + if (obtype) { + continue; + } + + temp.push_back((*it).second); + } + + std::sort(temp.begin(), temp.end(), std::mem_fn(&Connection::Compare)); + return temp; // NRVO should handle this +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsBySourceSequenced(uint64_t source) const { + return GetConnectionsSequenced(source, ConnectionsBySource()); +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsBySourceSequenced(uint64_t src, const char *classname) const { + const char *arr[] = { classname }; + return GetConnectionsBySourceSequenced(src, arr, 1); +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsBySourceSequenced(uint64_t source, + const char *const *classnames, size_t count) const { + return GetConnectionsSequenced(source, true, ConnectionsBySource(), classnames, count); +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsByDestinationSequenced(uint64_t dest, + const char *classname) const { + const char *arr[] = { classname }; + return GetConnectionsByDestinationSequenced(dest, arr, 1); +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsByDestinationSequenced(uint64_t dest) const { + return GetConnectionsSequenced(dest, ConnectionsByDestination()); +} + +// ------------------------------------------------------------------------------------------------ +std::vector<const Connection *> Document::GetConnectionsByDestinationSequenced(uint64_t dest, + const char *const *classnames, size_t count) const { + return GetConnectionsSequenced(dest, false, ConnectionsByDestination(), classnames, count); +} + +// ------------------------------------------------------------------------------------------------ +Connection::Connection(uint64_t insertionOrder, uint64_t src, uint64_t dest, const std::string &prop, + const Document &doc) : + insertionOrder(insertionOrder), prop(prop), src(src), dest(dest), doc(doc) { +} + +// ------------------------------------------------------------------------------------------------ +Connection::~Connection() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +LazyObject *Connection::LazySourceObject() const { + LazyObject *const lazy = doc.GetObject(src); + return lazy; +} + +// ------------------------------------------------------------------------------------------------ +LazyObject *Connection::LazyDestinationObject() const { + LazyObject *const lazy = doc.GetObject(dest); + return lazy; +} + +// ------------------------------------------------------------------------------------------------ +Object *Connection::SourceObject() const { + LazyObject *lazy = doc.GetObject(src); + //ai_assert(lazy); + return lazy->LoadObject(); +} + +// ------------------------------------------------------------------------------------------------ +Object *Connection::DestinationObject() const { + LazyObject *lazy = doc.GetObject(dest); + //ai_assert(lazy); + return lazy->LoadObject(); +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXDocument.h b/modules/fbx/fbx_parser/FBXDocument.h new file mode 100644 index 0000000000..7c08f3de65 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXDocument.h @@ -0,0 +1,1319 @@ +/*************************************************************************/ +/* FBXDocument.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/** @file FBXDocument.h + * @brief FBX DOM + */ +#ifndef FBX_DOCUMENT_H +#define FBX_DOCUMENT_H + +#include "FBXCommon.h" +#include "FBXParser.h" +#include "FBXProperties.h" +#include "core/math/transform.h" +#include "core/math/vector2.h" +#include "core/math/vector3.h" +#include "core/string/print_string.h" +#include <stdint.h> +#include <numeric> + +#define _AI_CONCAT(a, b) a##b +#define AI_CONCAT(a, b) _AI_CONCAT(a, b) + +namespace FBXDocParser { + +class Parser; +class Object; +struct ImportSettings; +class Connection; + +class PropertyTable; +class Document; +class Material; +class ShapeGeometry; +class LineGeometry; +class Geometry; + +class Video; + +class AnimationCurve; +class AnimationCurveNode; +class AnimationLayer; +class AnimationStack; + +class BlendShapeChannel; +class BlendShape; +class Skin; +class Cluster; + +typedef Object *ObjectPtr; +#define new_Object new Object + +/** Represents a delay-parsed FBX objects. Many objects in the scene + * are not needed by assimp, so it makes no sense to parse them + * upfront. */ +class LazyObject { +public: + LazyObject(uint64_t id, const ElementPtr element, const Document &doc); + ~LazyObject(); + + ObjectPtr LoadObject(); + + /* Casting weak pointers to their templated type safely and preserving ref counting and safety + * with lock() keyword to prevent leaking memory + */ + template <typename T> + const T *Get() { + ObjectPtr ob = LoadObject(); + return dynamic_cast<const T *>(ob); + } + + uint64_t ID() const { + return id; + } + + bool IsBeingConstructed() const { + return (flags & BEING_CONSTRUCTED) != 0; + } + + bool FailedToConstruct() const { + return (flags & FAILED_TO_CONSTRUCT) != 0; + } + + ElementPtr GetElement() const { + return element; + } + + const Document &GetDocument() const { + return doc; + } + +private: + const Document &doc; + ElementPtr element = nullptr; + std::shared_ptr<Object> object = nullptr; + const uint64_t id = 0; + + enum Flags { + BEING_CONSTRUCTED = 0x1, + FAILED_TO_CONSTRUCT = 0x2 + }; + + unsigned int flags = 0; +}; + +/** Base class for in-memory (DOM) representations of FBX objects */ +class Object { +public: + Object(uint64_t id, const ElementPtr element, const std::string &name); + + virtual ~Object(); + + ElementPtr SourceElement() const { + return element; + } + + const std::string &Name() const { + return name; + } + + uint64_t ID() const { + return id; + } + +protected: + const ElementPtr element; + const std::string name; + const uint64_t id = 0; +}; + +/** DOM class for generic FBX NoteAttribute blocks. NoteAttribute's just hold a property table, + * fixed members are added by deriving classes. */ +class NodeAttribute : public Object { +public: + NodeAttribute(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~NodeAttribute(); + + const PropertyTable *Props() const { + return props; + } + +private: + const PropertyTable *props; +}; + +/** DOM base class for FBX camera settings attached to a node */ +class CameraSwitcher : public NodeAttribute { +public: + CameraSwitcher(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~CameraSwitcher(); + + int CameraID() const { + return cameraId; + } + + const std::string &CameraName() const { + return cameraName; + } + + const std::string &CameraIndexName() const { + return cameraIndexName; + } + +private: + int cameraId; + std::string cameraName; + std::string cameraIndexName; +}; + +#define fbx_stringize(a) #a + +#define fbx_simple_property(name, type, default_value) \ + type name() const { \ + return PropertyGet<type>(Props(), fbx_stringize(name), (default_value)); \ + } + +// XXX improve logging +#define fbx_simple_enum_property(name, type, default_value) \ + type name() const { \ + const int ival = PropertyGet<int>(Props(), fbx_stringize(name), static_cast<int>(default_value)); \ + if (ival < 0 || ival >= AI_CONCAT(type, _MAX)) { \ + return static_cast<type>(default_value); \ + } \ + return static_cast<type>(ival); \ + } + +class FbxPoseNode; +class FbxPose : public Object { +public: + FbxPose(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + const std::vector<FbxPoseNode *> &GetBindPoses() const { + return pose_nodes; + } + + virtual ~FbxPose(); + +private: + std::vector<FbxPoseNode *> pose_nodes; +}; + +class FbxPoseNode { +public: + FbxPoseNode(const ElementPtr element, const Document &doc, const std::string &name) { + const ScopePtr sc = GetRequiredScope(element); + + // get pose node transform + const ElementPtr Transform = GetRequiredElement(sc, "Matrix", element); + transform = ReadMatrix(Transform); + + // get node id this pose node is for + const ElementPtr NodeId = sc->GetElement("Node3D"); + if (NodeId) { + target_id = ParseTokenAsInt64(GetRequiredToken(NodeId, 0)); + } + + print_verbose("added posenode " + itos(target_id) + " transform: " + transform); + } + virtual ~FbxPoseNode() { + } + + uint64_t GetNodeID() const { + return target_id; + } + + Transform GetBindPose() const { + return transform; + } + +private: + uint64_t target_id; + Transform transform; +}; + +/** DOM base class for FBX cameras attached to a node */ +class Camera : public NodeAttribute { +public: + Camera(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Camera(); + + fbx_simple_property(Position, Vector3, Vector3(0, 0, 0)); + fbx_simple_property(UpVector, Vector3, Vector3(0, 1, 0)); + fbx_simple_property(InterestPosition, Vector3, Vector3(0, 0, 0)); + + fbx_simple_property(AspectWidth, float, 1.0f); + fbx_simple_property(AspectHeight, float, 1.0f); + fbx_simple_property(FilmWidth, float, 1.0f); + fbx_simple_property(FilmHeight, float, 1.0f); + + fbx_simple_property(NearPlane, float, 0.1f); + fbx_simple_property(FarPlane, float, 100.0f); + + fbx_simple_property(FilmAspectRatio, float, 1.0f); + fbx_simple_property(ApertureMode, int, 0); + + fbx_simple_property(FieldOfView, float, 1.0f); + fbx_simple_property(FocalLength, float, 1.0f); +}; + +/** DOM base class for FBX null markers attached to a node */ +class Null : public NodeAttribute { +public: + Null(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~Null(); +}; + +/** DOM base class for FBX limb node markers attached to a node */ +class LimbNode : public NodeAttribute { +public: + LimbNode(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~LimbNode(); +}; + +/** DOM base class for FBX lights attached to a node */ +class Light : public NodeAttribute { +public: + Light(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~Light(); + + enum Type { + Type_Point, + Type_Directional, + Type_Spot, + Type_Area, + Type_Volume, + + Type_MAX // end-of-enum sentinel + }; + + enum Decay { + Decay_None, + Decay_Linear, + Decay_Quadratic, + Decay_Cubic, + + Decay_MAX // end-of-enum sentinel + }; + + fbx_simple_property(Color, Vector3, Vector3(1, 1, 1)); + fbx_simple_enum_property(LightType, Type, 0); + fbx_simple_property(CastLightOnObject, bool, false); + fbx_simple_property(DrawVolumetricLight, bool, true); + fbx_simple_property(DrawGroundProjection, bool, true); + fbx_simple_property(DrawFrontFacingVolumetricLight, bool, false); + fbx_simple_property(Intensity, float, 100.0f); + fbx_simple_property(InnerAngle, float, 0.0f); + fbx_simple_property(OuterAngle, float, 45.0f); + fbx_simple_property(Fog, int, 50); + fbx_simple_enum_property(DecayType, Decay, 2); + fbx_simple_property(DecayStart, float, 1.0f); + fbx_simple_property(FileName, std::string, ""); + + fbx_simple_property(EnableNearAttenuation, bool, false); + fbx_simple_property(NearAttenuationStart, float, 0.0f); + fbx_simple_property(NearAttenuationEnd, float, 0.0f); + fbx_simple_property(EnableFarAttenuation, bool, false); + fbx_simple_property(FarAttenuationStart, float, 0.0f); + fbx_simple_property(FarAttenuationEnd, float, 0.0f); + + fbx_simple_property(CastShadows, bool, true); + fbx_simple_property(ShadowColor, Vector3, Vector3(0, 0, 0)); + + fbx_simple_property(AreaLightShape, int, 0); + + fbx_simple_property(LeftBarnDoor, float, 20.0f); + fbx_simple_property(RightBarnDoor, float, 20.0f); + fbx_simple_property(TopBarnDoor, float, 20.0f); + fbx_simple_property(BottomBarnDoor, float, 20.0f); + fbx_simple_property(EnableBarnDoor, bool, true); +}; + +class Model; + +typedef Model *ModelPtr; +#define new_Model new Model + +/** DOM base class for FBX models (even though its semantics are more "node" than "model" */ +class Model : public Object { +public: + enum RotOrder { + RotOrder_EulerXYZ = 0, + RotOrder_EulerXZY, + RotOrder_EulerYZX, + RotOrder_EulerYXZ, + RotOrder_EulerZXY, + RotOrder_EulerZYX, + + RotOrder_SphericXYZ, + + RotOrder_MAX // end-of-enum sentinel + }; + + Model(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Model(); + + fbx_simple_property(QuaternionInterpolate, int, 0); + + fbx_simple_property(RotationOffset, Vector3, Vector3()); + fbx_simple_property(RotationPivot, Vector3, Vector3()); + fbx_simple_property(ScalingOffset, Vector3, Vector3()); + fbx_simple_property(ScalingPivot, Vector3, Vector3()); + fbx_simple_property(TranslationActive, bool, false); + fbx_simple_property(TranslationMin, Vector3, Vector3()); + fbx_simple_property(TranslationMax, Vector3, Vector3()); + + fbx_simple_property(TranslationMinX, bool, false); + fbx_simple_property(TranslationMaxX, bool, false); + fbx_simple_property(TranslationMinY, bool, false); + fbx_simple_property(TranslationMaxY, bool, false); + fbx_simple_property(TranslationMinZ, bool, false); + fbx_simple_property(TranslationMaxZ, bool, false); + + fbx_simple_enum_property(RotationOrder, RotOrder, 0); + fbx_simple_property(RotationSpaceForLimitOnly, bool, false); + fbx_simple_property(RotationStiffnessX, float, 0.0f); + fbx_simple_property(RotationStiffnessY, float, 0.0f); + fbx_simple_property(RotationStiffnessZ, float, 0.0f); + fbx_simple_property(AxisLen, float, 0.0f); + + fbx_simple_property(PreRotation, Vector3, Vector3()); + fbx_simple_property(PostRotation, Vector3, Vector3()); + fbx_simple_property(RotationActive, bool, false); + + fbx_simple_property(RotationMin, Vector3, Vector3()); + fbx_simple_property(RotationMax, Vector3, Vector3()); + + fbx_simple_property(RotationMinX, bool, false); + fbx_simple_property(RotationMaxX, bool, false); + fbx_simple_property(RotationMinY, bool, false); + fbx_simple_property(RotationMaxY, bool, false); + fbx_simple_property(RotationMinZ, bool, false); + fbx_simple_property(RotationMaxZ, bool, false); + fbx_simple_enum_property(InheritType, TransformInheritance, 0); + + fbx_simple_property(ScalingActive, bool, false); + fbx_simple_property(ScalingMin, Vector3, Vector3()); + fbx_simple_property(ScalingMax, Vector3, Vector3(1, 1, 1)); + fbx_simple_property(ScalingMinX, bool, false); + fbx_simple_property(ScalingMaxX, bool, false); + fbx_simple_property(ScalingMinY, bool, false); + fbx_simple_property(ScalingMaxY, bool, false); + fbx_simple_property(ScalingMinZ, bool, false); + fbx_simple_property(ScalingMaxZ, bool, false); + + fbx_simple_property(GeometricTranslation, Vector3, Vector3()); + fbx_simple_property(GeometricRotation, Vector3, Vector3()); + fbx_simple_property(GeometricScaling, Vector3, Vector3(1, 1, 1)); + + fbx_simple_property(MinDampRangeX, float, 0.0f); + fbx_simple_property(MinDampRangeY, float, 0.0f); + fbx_simple_property(MinDampRangeZ, float, 0.0f); + fbx_simple_property(MaxDampRangeX, float, 0.0f); + fbx_simple_property(MaxDampRangeY, float, 0.0f); + fbx_simple_property(MaxDampRangeZ, float, 0.0f); + + fbx_simple_property(MinDampStrengthX, float, 0.0f); + fbx_simple_property(MinDampStrengthY, float, 0.0f); + fbx_simple_property(MinDampStrengthZ, float, 0.0f); + fbx_simple_property(MaxDampStrengthX, float, 0.0f); + fbx_simple_property(MaxDampStrengthY, float, 0.0f); + fbx_simple_property(MaxDampStrengthZ, float, 0.0f); + + fbx_simple_property(PreferredAngleX, float, 0.0f); + fbx_simple_property(PreferredAngleY, float, 0.0f); + fbx_simple_property(PreferredAngleZ, float, 0.0f); + + fbx_simple_property(Show, bool, true); + fbx_simple_property(LODBox, bool, false); + fbx_simple_property(Freeze, bool, false); + + const std::string &Shading() const { + return shading; + } + + const std::string &Culling() const { + return culling; + } + + const PropertyTable *Props() const { + return props; + } + + /** Get material links */ + const std::vector<const Material *> &GetMaterials() const { + return materials; + } + + /** Get geometry links */ + const std::vector<const Geometry *> &GetGeometry() const { + return geometry; + } + + /** Get node attachments */ + const std::vector<const NodeAttribute *> &GetAttributes() const { + return attributes; + } + + /** convenience method to check if the node has a Null node marker */ + bool IsNull() const; + +private: + void ResolveLinks(const ElementPtr element, const Document &doc); + +private: + std::vector<const Material *> materials; + std::vector<const Geometry *> geometry; + std::vector<const NodeAttribute *> attributes; + + std::string shading; + std::string culling; + const PropertyTable *props = nullptr; +}; + +class ModelLimbNode : public Model { +public: + ModelLimbNode(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~ModelLimbNode(); +}; + +/** DOM class for generic FBX textures */ +class Texture : public Object { +public: + Texture(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Texture(); + + const std::string &Type() const { + return type; + } + + const std::string &FileName() const { + return fileName; + } + + const std::string &RelativeFilename() const { + return relativeFileName; + } + + const std::string &AlphaSource() const { + return alphaSource; + } + + const Vector2 &UVTranslation() const { + return uvTrans; + } + + const Vector2 &UVScaling() const { + return uvScaling; + } + + const PropertyTable *Props() const { + return props; + } + + // return a 4-tuple + const unsigned int *Crop() const { + return crop; + } + + const Video *Media() const { + return media; + } + +private: + Vector2 uvTrans; + Vector2 uvScaling; + + std::string type; + std::string relativeFileName; + std::string fileName; + std::string alphaSource; + const PropertyTable *props = nullptr; + + unsigned int crop[4] = { 0 }; + + const Video *media = nullptr; +}; + +/** DOM class for layered FBX textures */ +class LayeredTexture : public Object { +public: + LayeredTexture(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~LayeredTexture(); + + // Can only be called after construction of the layered texture object due to construction flag. + void fillTexture(const Document &doc); + + enum BlendMode { + BlendMode_Translucent, + BlendMode_Additive, + BlendMode_Modulate, + BlendMode_Modulate2, + BlendMode_Over, + BlendMode_Normal, + BlendMode_Dissolve, + BlendMode_Darken, + BlendMode_ColorBurn, + BlendMode_LinearBurn, + BlendMode_DarkerColor, + BlendMode_Lighten, + BlendMode_Screen, + BlendMode_ColorDodge, + BlendMode_LinearDodge, + BlendMode_LighterColor, + BlendMode_SoftLight, + BlendMode_HardLight, + BlendMode_VividLight, + BlendMode_LinearLight, + BlendMode_PinLight, + BlendMode_HardMix, + BlendMode_Difference, + BlendMode_Exclusion, + BlendMode_Subtract, + BlendMode_Divide, + BlendMode_Hue, + BlendMode_Saturation, + BlendMode_Color, + BlendMode_Luminosity, + BlendMode_Overlay, + BlendMode_BlendModeCount + }; + + const Texture *getTexture(int index = 0) const { + return textures[index]; + } + int textureCount() const { + return static_cast<int>(textures.size()); + } + BlendMode GetBlendMode() const { + return blendMode; + } + float Alpha() { + return alpha; + } + +private: + std::vector<const Texture *> textures; + BlendMode blendMode; + float alpha; +}; + +typedef std::map<std::string, const Texture *> TextureMap; +typedef std::map<std::string, const LayeredTexture *> LayeredTextureMap; + +/** DOM class for generic FBX videos */ +class Video : public Object { +public: + Video(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Video(); + + const std::string &Type() const { + return type; + } + + bool IsEmbedded() const { + return contentLength > 0; + } + + const std::string &FileName() const { + return fileName; + } + + const std::string &RelativeFilename() const { + return relativeFileName; + } + + const PropertyTable *Props() const { + return props; + } + + const uint8_t *Content() const { + return content; + } + + uint64_t ContentLength() const { + return contentLength; + } + + uint8_t *RelinquishContent() { + uint8_t *ptr = content; + content = 0; + return ptr; + } + + bool operator==(const Video &other) const { + return ( + type == other.type && relativeFileName == other.relativeFileName && fileName == other.fileName); + } + + bool operator<(const Video &other) const { + return std::tie(type, relativeFileName, fileName) < std::tie(other.type, other.relativeFileName, other.fileName); + } + +private: + std::string type; + std::string relativeFileName; + std::string fileName; + const PropertyTable *props = nullptr; + + uint64_t contentLength = 0; + uint8_t *content = nullptr; +}; + +/** DOM class for generic FBX materials */ +class Material : public Object { +public: + Material(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Material(); + + const std::string &GetShadingModel() const { + return shading; + } + + bool IsMultilayer() const { + return multilayer; + } + + const PropertyTable *Props() const { + return props; + } + + const TextureMap &Textures() const { + return textures; + } + + const LayeredTextureMap &LayeredTextures() const { + return layeredTextures; + } + +private: + std::string shading; + bool multilayer; + const PropertyTable *props; + + TextureMap textures; + LayeredTextureMap layeredTextures; +}; + +// signed int keys (this can happen!) +typedef std::vector<int64_t> KeyTimeList; +typedef std::vector<float> KeyValueList; + +/** Represents a FBX animation curve (i.e. a 1-dimensional set of keyframes and values therefor) */ +class AnimationCurve : public Object { +public: + AnimationCurve(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + virtual ~AnimationCurve(); + + /** get list of keyframe positions (time). + * Invariant: |GetKeys()| > 0 */ + const KeyTimeList &GetKeys() const { + return keys; + } + + /** get list of keyframe values. + * Invariant: |GetKeys()| == |GetValues()| && |GetKeys()| > 0*/ + const KeyValueList &GetValues() const { + return values; + } + + const std::map<int64_t, float> &GetValueTimeTrack() const { + return keyvalues; + } + + const std::vector<float> &GetAttributes() const { + return attributes; + } + + const std::vector<unsigned int> &GetFlags() const { + return flags; + } + +private: + KeyTimeList keys; + KeyValueList values; + std::vector<float> attributes; + std::map<int64_t, float> keyvalues; + std::vector<unsigned int> flags; +}; + +/* Typedef for pointers for the animation handler */ +typedef std::shared_ptr<AnimationCurve> AnimationCurvePtr; +typedef std::weak_ptr<AnimationCurve> AnimationCurveWeakPtr; +typedef std::map<std::string, const AnimationCurve *> AnimationMap; + +/* Animation Curve node ptr */ +typedef std::shared_ptr<AnimationCurveNode> AnimationCurveNodePtr; +typedef std::weak_ptr<AnimationCurveNode> AnimationCurveNodeWeakPtr; + +/** Represents a FBX animation curve (i.e. a mapping from single animation curves to nodes) */ +class AnimationCurveNode : public Object { +public: + /* the optional white list specifies a list of property names for which the caller + wants animations for. If the curve node does not match one of these, std::range_error + will be thrown. */ + AnimationCurveNode(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc, + const char *const *target_prop_whitelist = nullptr, size_t whitelist_size = 0); + + virtual ~AnimationCurveNode(); + + const PropertyTable *Props() const { + return props; + } + + const AnimationMap &Curves() const; + + /** Object the curve is assigned to, this can be NULL if the + * target object has no DOM representation or could not + * be read for other reasons.*/ + Object *Target() const { + return target; + } + + Model *TargetAsModel() const { + return dynamic_cast<Model *>(target); + } + + NodeAttribute *TargetAsNodeAttribute() const { + return dynamic_cast<NodeAttribute *>(target); + } + + /** Property of Target() that is being animated*/ + const std::string &TargetProperty() const { + return prop; + } + +private: + Object *target = nullptr; + const PropertyTable *props; + mutable AnimationMap curves; + std::string prop; + const Document &doc; +}; + +typedef std::vector<const AnimationCurveNode *> AnimationCurveNodeList; + +typedef std::shared_ptr<AnimationLayer> AnimationLayerPtr; +typedef std::weak_ptr<AnimationLayer> AnimationLayerWeakPtr; +typedef std::vector<const AnimationLayer *> AnimationLayerList; + +/** Represents a FBX animation layer (i.e. a list of node animations) */ +class AnimationLayer : public Object { +public: + AnimationLayer(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + virtual ~AnimationLayer(); + + const PropertyTable *Props() const { + //ai_assert(props.get()); + return props; + } + + /* the optional white list specifies a list of property names for which the caller + wants animations for. Curves not matching this list will not be added to the + animation layer. */ + const AnimationCurveNodeList Nodes(const char *const *target_prop_whitelist = nullptr, size_t whitelist_size = 0) const; + +private: + const PropertyTable *props; + const Document &doc; +}; + +/** Represents a FBX animation stack (i.e. a list of animation layers) */ +class AnimationStack : public Object { +public: + AnimationStack(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + virtual ~AnimationStack(); + + fbx_simple_property(LocalStart, int64_t, 0L); + fbx_simple_property(LocalStop, int64_t, 0L); + fbx_simple_property(ReferenceStart, int64_t, 0L); + fbx_simple_property(ReferenceStop, int64_t, 0L); + + const PropertyTable *Props() const { + return props; + } + + const AnimationLayerList &Layers() const { + return layers; + } + +private: + const PropertyTable *props = nullptr; + AnimationLayerList layers; +}; + +/** DOM class for deformers */ +class Deformer : public Object { +public: + Deformer(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~Deformer(); + + const PropertyTable *Props() const { + //ai_assert(props.get()); + return props; + } + +private: + const PropertyTable *props; +}; + +/** Constraints are from Maya they can help us with BoneAttachments :) **/ +class Constraint : public Object { +public: + Constraint(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + virtual ~Constraint(); + +private: + const PropertyTable *props; +}; + +typedef std::vector<float> WeightArray; +typedef std::vector<unsigned int> WeightIndexArray; + +/** DOM class for BlendShapeChannel deformers */ +class BlendShapeChannel : public Deformer { +public: + BlendShapeChannel(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~BlendShapeChannel(); + + float DeformPercent() const { + return percent; + } + + const WeightArray &GetFullWeights() const { + return fullWeights; + } + + const std::vector<const ShapeGeometry *> &GetShapeGeometries() const { + return shapeGeometries; + } + +private: + float percent; + WeightArray fullWeights; + std::vector<const ShapeGeometry *> shapeGeometries; +}; + +/** DOM class for BlendShape deformers */ +class BlendShape : public Deformer { +public: + BlendShape(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~BlendShape(); + + const std::vector<const BlendShapeChannel *> &BlendShapeChannels() const { + return blendShapeChannels; + } + +private: + std::vector<const BlendShapeChannel *> blendShapeChannels; +}; + +/** DOM class for skin deformer clusters (aka sub-deformers) */ +class Cluster : public Deformer { +public: + Cluster(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Cluster(); + + /** get the list of deformer weights associated with this cluster. + * Use #GetIndices() to get the associated vertices. Both arrays + * have the same size (and may also be empty). */ + const std::vector<float> &GetWeights() const { + return weights; + } + + /** get indices into the vertex data of the geometry associated + * with this cluster. Use #GetWeights() to get the associated weights. + * Both arrays have the same size (and may also be empty). */ + const std::vector<unsigned int> &GetIndices() const { + return indices; + } + + /** */ + const Transform &GetTransform() const { + return transform; + } + + const Transform &TransformLink() const { + return transformLink; + } + + const Model *TargetNode() const { + return node; + } + + const Transform &TransformAssociateModel() const { + return transformAssociateModel; + } + + bool TransformAssociateModelValid() const { + return valid_transformAssociateModel; + } + + // property is not in the fbx file + // if the cluster has an associate model + // we then have an additive type + enum SkinLinkMode { + SkinLinkMode_Normalized = 0, + SkinLinkMode_Additive = 1 + }; + + SkinLinkMode GetLinkMode() { + return link_mode; + } + +private: + std::vector<float> weights; + std::vector<unsigned int> indices; + + Transform transform; + Transform transformLink; + Transform transformAssociateModel; + SkinLinkMode link_mode; + bool valid_transformAssociateModel; + const Model *node = nullptr; +}; + +/** DOM class for skin deformers */ +class Skin : public Deformer { +public: + Skin(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name); + + virtual ~Skin(); + + float DeformAccuracy() const { + return accuracy; + } + + const std::vector<const Cluster *> &Clusters() const { + return clusters; + } + + enum SkinType { + Skin_Rigid = 0, + Skin_Linear, + Skin_DualQuaternion, + Skin_Blend + }; + + const SkinType &GetSkinType() const { + return skinType; + } + +private: + float accuracy; + SkinType skinType; + std::vector<const Cluster *> clusters; +}; + +/** Represents a link between two FBX objects. */ +class Connection { +public: + Connection(uint64_t insertionOrder, uint64_t src, uint64_t dest, const std::string &prop, const Document &doc); + ~Connection(); + + // note: a connection ensures that the source and dest objects exist, but + // not that they have DOM representations, so the return value of one of + // these functions can still be NULL. + Object *SourceObject() const; + Object *DestinationObject() const; + + // these, however, are always guaranteed to be valid + LazyObject *LazySourceObject() const; + LazyObject *LazyDestinationObject() const; + + /** return the name of the property the connection is attached to. + * this is an empty string for object to object (OO) connections. */ + const std::string &PropertyName() const { + return prop; + } + + uint64_t InsertionOrder() const { + return insertionOrder; + } + + int CompareTo(const Connection *c) const { + //ai_assert(nullptr != c); + + // note: can't subtract because this would overflow uint64_t + if (InsertionOrder() > c->InsertionOrder()) { + return 1; + } else if (InsertionOrder() < c->InsertionOrder()) { + return -1; + } + return 0; + } + + bool Compare(const Connection *c) const { + //ai_assert(nullptr != c); + + return InsertionOrder() < c->InsertionOrder(); + } + +public: + uint64_t insertionOrder; + const std::string prop; + + uint64_t src, dest; + const Document &doc; +}; + +// XXX again, unique_ptr would be useful. shared_ptr is too +// bloated since the objects have a well-defined single owner +// during their entire lifetime (Document). FBX files have +// up to many thousands of objects (most of which we never use), +// so the memory overhead for them should be kept at a minimum. +typedef std::map<uint64_t, LazyObject *> ObjectMap; +typedef std::map<std::string, const PropertyTable *> PropertyTemplateMap; +typedef std::multimap<uint64_t, const Connection *> ConnectionMap; + +/** DOM class for global document settings, a single instance per document can + * be accessed via Document.Globals(). */ +class FileGlobalSettings { +public: + FileGlobalSettings(const Document &doc, const PropertyTable *props); + + ~FileGlobalSettings(); + + const PropertyTable *Props() const { + return props; + } + + const Document &GetDocument() const { + return doc; + } + + fbx_simple_property(UpAxis, int, 1); + fbx_simple_property(UpAxisSign, int, 1); + fbx_simple_property(FrontAxis, int, 2); + fbx_simple_property(FrontAxisSign, int, 1); + fbx_simple_property(CoordAxis, int, 0); + fbx_simple_property(CoordAxisSign, int, 1); + fbx_simple_property(OriginalUpAxis, int, 0); + fbx_simple_property(OriginalUpAxisSign, int, 1); + fbx_simple_property(UnitScaleFactor, float, 1); + fbx_simple_property(OriginalUnitScaleFactor, float, 1); + fbx_simple_property(AmbientColor, Vector3, Vector3(0, 0, 0)); + fbx_simple_property(DefaultCamera, std::string, ""); + + enum FrameRate { + FrameRate_DEFAULT = 0, + FrameRate_120 = 1, + FrameRate_100 = 2, + FrameRate_60 = 3, + FrameRate_50 = 4, + FrameRate_48 = 5, + FrameRate_30 = 6, + FrameRate_30_DROP = 7, + FrameRate_NTSC_DROP_FRAME = 8, + FrameRate_NTSC_FULL_FRAME = 9, + FrameRate_PAL = 10, + FrameRate_CINEMA = 11, + FrameRate_1000 = 12, + FrameRate_CINEMA_ND = 13, + FrameRate_CUSTOM = 14, + + FrameRate_MAX // end-of-enum sentinel + }; + + fbx_simple_enum_property(TimeMode, FrameRate, FrameRate_DEFAULT); + fbx_simple_property(TimeSpanStart, uint64_t, 0L); + fbx_simple_property(TimeSpanStop, uint64_t, 0L); + fbx_simple_property(CustomFrameRate, float, -1.0f); + +private: + const PropertyTable *props = nullptr; + const Document &doc; +}; + +/** DOM root for a FBX file */ +class Document { +public: + Document(const Parser &parser, const ImportSettings &settings); + + ~Document(); + + LazyObject *GetObject(uint64_t id) const; + + bool IsSafeToImport() const { + return SafeToImport; + } + + bool IsBinary() const { + return parser.IsBinary(); + } + + unsigned int FBXVersion() const { + return fbxVersion; + } + + const std::string &Creator() const { + return creator; + } + + // elements (in this order): Year, Month, Day, Hour, Second, Millisecond + const unsigned int *CreationTimeStamp() const { + return creationTimeStamp; + } + + const FileGlobalSettings *GlobalSettingsPtr() const { + return globals.get(); + } + + const PropertyTable *GetMetadataProperties() const { + return metadata_properties; + } + + const PropertyTemplateMap &Templates() const { + return templates; + } + + const ObjectMap &Objects() const { + return objects; + } + + const ImportSettings &Settings() const { + return settings; + } + + const ConnectionMap &ConnectionsBySource() const { + return src_connections; + } + + const ConnectionMap &ConnectionsByDestination() const { + return dest_connections; + } + + // note: the implicit rule in all DOM classes is to always resolve + // from destination to source (since the FBX object hierarchy is, + // with very few exceptions, a DAG, this avoids cycles). In all + // cases that may involve back-facing edges in the object graph, + // use LazyObject::IsBeingConstructed() to check. + + std::vector<const Connection *> GetConnectionsBySourceSequenced(uint64_t source) const; + std::vector<const Connection *> GetConnectionsByDestinationSequenced(uint64_t dest) const; + + std::vector<const Connection *> GetConnectionsBySourceSequenced(uint64_t source, const char *classname) const; + std::vector<const Connection *> GetConnectionsByDestinationSequenced(uint64_t dest, const char *classname) const; + + std::vector<const Connection *> GetConnectionsBySourceSequenced(uint64_t source, + const char *const *classnames, size_t count) const; + std::vector<const Connection *> GetConnectionsByDestinationSequenced(uint64_t dest, + const char *const *classnames, + size_t count) const; + + const std::vector<const AnimationStack *> &AnimationStacks() const; + const std::vector<uint64_t> &GetAnimationStackIDs() const { + return animationStacks; + } + + const std::vector<uint64_t> &GetConstraintStackIDs() const { + return constraints; + } + + const std::vector<uint64_t> &GetBindPoseIDs() const { + return bind_poses; + }; + + const std::vector<uint64_t> &GetMaterialIDs() const { + return materials; + }; + + const std::vector<uint64_t> &GetSkinIDs() const { + return skins; + } + +private: + std::vector<const Connection *> GetConnectionsSequenced(uint64_t id, const ConnectionMap &) const; + std::vector<const Connection *> GetConnectionsSequenced(uint64_t id, bool is_src, + const ConnectionMap &, + const char *const *classnames, + size_t count) const; + bool ReadHeader(); + void ReadObjects(); + void ReadPropertyTemplates(); + void ReadConnections(); + void ReadGlobalSettings(); + +private: + const ImportSettings &settings; + + ObjectMap objects; + const Parser &parser; + bool SafeToImport = false; + + PropertyTemplateMap templates; + ConnectionMap src_connections; + ConnectionMap dest_connections; + + unsigned int fbxVersion = 0; + std::string creator; + unsigned int creationTimeStamp[7] = { 0 }; + + std::vector<uint64_t> animationStacks; + std::vector<uint64_t> bind_poses; + // constraints aren't in the tree / at least they are not easy to access. + std::vector<uint64_t> constraints; + std::vector<uint64_t> materials; + std::vector<uint64_t> skins; + mutable std::vector<const AnimationStack *> animationStacksResolved; + PropertyTable *metadata_properties = nullptr; + std::shared_ptr<FileGlobalSettings> globals = nullptr; +}; +} // namespace FBXDocParser + +namespace std { +template <> +struct hash<const FBXDocParser::Video> { + std::size_t operator()(const FBXDocParser::Video &video) const { + using std::hash; + using std::size_t; + using std::string; + + size_t res = 17; + res = res * 31 + hash<string>()(video.Name()); + res = res * 31 + hash<string>()(video.RelativeFilename()); + res = res * 31 + hash<string>()(video.Type()); + + return res; + } +}; +} // namespace std + +#endif // FBX_DOCUMENT_H diff --git a/modules/fbx/fbx_parser/FBXDocumentUtil.cpp b/modules/fbx/fbx_parser/FBXDocumentUtil.cpp new file mode 100644 index 0000000000..70b2b3bbe9 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXDocumentUtil.cpp @@ -0,0 +1,172 @@ +/*************************************************************************/ +/* FBXDocumentUtil.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXDocumentUtil.cpp + * @brief Implementation of the FBX DOM utility functions declared in FBXDocumentUtil.h + */ + +#include "FBXDocumentUtil.h" +#include "FBXDocument.h" +#include "FBXParser.h" +#include "FBXProperties.h" +#include "FBXUtil.h" +#include "core/string/print_string.h" + +namespace FBXDocParser { +namespace Util { + +void DOMError(const std::string &message) { + print_error("[FBX-DOM]" + String(message.c_str())); +} + +void DOMError(const std::string &message, const Token *token) { + print_error("[FBX-DOM]" + String(message.c_str()) + ";" + String(token->StringContents().c_str())); +} + +void DOMError(const std::string &message, const std::shared_ptr<Token> token) { + print_error("[FBX-DOM]" + String(message.c_str()) + ";" + String(token->StringContents().c_str())); +} + +void DOMError(const std::string &message, const Element *element /*= NULL*/) { + if (element) { + DOMError(message, element->KeyToken()); + } + print_error("[FBX-DOM] " + String(message.c_str())); +} + +void DOMError(const std::string &message, const std::shared_ptr<Element> element /*= NULL*/) { + if (element) { + DOMError(message, element->KeyToken()); + } + print_error("[FBX-DOM] " + String(message.c_str())); +} + +void DOMWarning(const std::string &message) { + print_verbose("[FBX-DOM] warning:" + String(message.c_str())); +} + +void DOMWarning(const std::string &message, const Token *token) { + print_verbose("[FBX-DOM] warning:" + String(message.c_str()) + ";" + String(token->StringContents().c_str())); +} + +void DOMWarning(const std::string &message, const Element *element /*= NULL*/) { + if (element) { + DOMWarning(message, element->KeyToken()); + return; + } + print_verbose("[FBX-DOM] warning:" + String(message.c_str())); +} + +void DOMWarning(const std::string &message, const std::shared_ptr<Token> token) { + print_verbose("[FBX-DOM] warning:" + String(message.c_str()) + ";" + String(token->StringContents().c_str())); +} + +void DOMWarning(const std::string &message, const std::shared_ptr<Element> element /*= NULL*/) { + if (element) { + DOMWarning(message, element->KeyToken()); + return; + } + print_verbose("[FBX-DOM] warning:" + String(message.c_str())); +} + +// ------------------------------------------------------------------------------------------------ +// fetch a property table and the corresponding property template +const PropertyTable *GetPropertyTable(const Document &doc, + const std::string &templateName, + const ElementPtr element, + const ScopePtr sc, + bool no_warn /*= false*/) { + // todo: make this an abstraction + const ElementPtr Properties70 = sc->GetElement("Properties70"); + const PropertyTable *templateProps = static_cast<const PropertyTable *>(nullptr); + + if (templateName.length()) { + PropertyTemplateMap::const_iterator it = doc.Templates().find(templateName); + if (it != doc.Templates().end()) { + templateProps = (*it).second; + } + } + + if (!Properties70 || !Properties70->Compound()) { + if (!no_warn) { + DOMWarning("property table (Properties70) not found", element); + } + if (templateProps) { + return templateProps; + } else { + return new const PropertyTable(); + } + } + + return new PropertyTable(Properties70, templateProps); +} +} // namespace Util +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXDocumentUtil.h b/modules/fbx/fbx_parser/FBXDocumentUtil.h new file mode 100644 index 0000000000..fb6eb04aea --- /dev/null +++ b/modules/fbx/fbx_parser/FBXDocumentUtil.h @@ -0,0 +1,141 @@ +/*************************************************************************/ +/* FBXDocumentUtil.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2012, assimp team +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXDocumentUtil.h + * @brief FBX internal utilities used by the DOM reading code + */ +#ifndef FBX_DOCUMENT_UTIL_H +#define FBX_DOCUMENT_UTIL_H + +#include "FBXDocument.h" +#include <memory> +#include <string> + +struct Token; +struct Element; + +namespace FBXDocParser { +namespace Util { + +// Parser errors +void DOMError(const std::string &message); +void DOMError(const std::string &message, const Token *token); +void DOMError(const std::string &message, const Element *element); +void DOMError(const std::string &message, const std::shared_ptr<Element> element); +void DOMError(const std::string &message, const std::shared_ptr<Token> token); + +// Parser warnings +void DOMWarning(const std::string &message); +void DOMWarning(const std::string &message, const Token *token); +void DOMWarning(const std::string &message, const Element *element); +void DOMWarning(const std::string &message, const std::shared_ptr<Token> token); +void DOMWarning(const std::string &message, const std::shared_ptr<Element> element); + +// fetch a property table and the corresponding property template +const PropertyTable *GetPropertyTable(const Document &doc, + const std::string &templateName, + const ElementPtr element, + const ScopePtr sc, + bool no_warn = false); + +// ------------------------------------------------------------------------------------------------ +template <typename T> +const T *ProcessSimpleConnection(const Connection &con, + bool is_object_property_conn, + const char *name, + const ElementPtr element, + const char **propNameOut = nullptr) { + if (is_object_property_conn && !con.PropertyName().length()) { + DOMWarning("expected incoming " + std::string(name) + + " link to be an object-object connection, ignoring", + element); + return nullptr; + } else if (!is_object_property_conn && con.PropertyName().length()) { + DOMWarning("expected incoming " + std::string(name) + + " link to be an object-property connection, ignoring", + element); + return nullptr; + } + + if (is_object_property_conn && propNameOut) { + // note: this is ok, the return value of PropertyValue() is guaranteed to + // remain valid and unchanged as long as the document exists. + *propNameOut = con.PropertyName().c_str(); + } + + // Cast Object to AnimationPlayer for example using safe functions, which return nullptr etc + Object *ob = con.SourceObject(); + ERR_FAIL_COND_V_MSG(!ob, nullptr, "Failed to load object from SourceObject ptr"); + return dynamic_cast<const T *>(ob); +} +} // namespace Util +} // namespace FBXDocParser + +#endif // FBX_DOCUMENT_UTIL_H diff --git a/modules/fbx/fbx_parser/FBXImportSettings.h b/modules/fbx/fbx_parser/FBXImportSettings.h new file mode 100644 index 0000000000..4abca6b990 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXImportSettings.h @@ -0,0 +1,173 @@ +/*************************************************************************/ +/* FBXImportSettings.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXImportSettings.h + * @brief FBX importer runtime configuration + */ +#ifndef FBX_IMPORT_SETTINGS_H +#define FBX_IMPORT_SETTINGS_H + +namespace FBXDocParser { + +/** FBX import settings, parts of which are publicly accessible via their corresponding AI_CONFIG constants */ +struct ImportSettings { + ImportSettings() : + strictMode(true), readAllLayers(true), readAllMaterials(true), readMaterials(true), readTextures(true), readCameras(true), readLights(true), readAnimations(true), readWeights(true), preservePivots(true), optimizeEmptyAnimationCurves(true), useLegacyEmbeddedTextureNaming(false), removeEmptyBones(true), convertToMeters(false) { + // empty + } + + /** enable strict mode: + * - only accept fbx 2012, 2013 files + * - on the slightest error, give up. + * + * Basically, strict mode means that the fbx file will actually + * be validated. Strict mode is off by default. */ + bool strictMode; + + /** specifies whether all geometry layers are read and scanned for + * usable data channels. The FBX spec indicates that many readers + * will only read the first channel and that this is in some way + * the recommended way- in reality, however, it happens a lot that + * vertex data is spread among multiple layers. The default + * value for this option is true.*/ + bool readAllLayers; + + /** specifies whether all materials are read, or only those that + * are referenced by at least one mesh. Reading all materials + * may make FBX reading a lot slower since all objects + * need to be processed . + * This bit is ignored unless readMaterials=true*/ + bool readAllMaterials; + + /** import materials (true) or skip them and assign a default + * material. The default value is true.*/ + bool readMaterials; + + /** import embedded textures? Default value is true.*/ + bool readTextures; + + /** import cameras? Default value is true.*/ + bool readCameras; + + /** import light sources? Default value is true.*/ + bool readLights; + + /** import animations (i.e. animation curves, the node + * skeleton is always imported). Default value is true. */ + bool readAnimations; + + /** read bones (vertex weights and deform info). + * Default value is true. */ + bool readWeights; + + /** preserve transformation pivots and offsets. Since these can + * not directly be represented in assimp, additional dummy + * nodes will be generated. Note that settings this to false + * can make animation import a lot slower. The default value + * is true. + * + * The naming scheme for the generated nodes is: + * <OriginalName>_$AssimpFbx$_<TransformName> + * + * where <TransformName> is one of + * RotationPivot + * RotationOffset + * PreRotation + * PostRotation + * ScalingPivot + * ScalingOffset + * Translation + * Scaling + * Rotation + **/ + bool preservePivots; + + /** do not import animation curves that specify a constant + * values matching the corresponding node transformation. + * The default value is true. */ + bool optimizeEmptyAnimationCurves; + + /** use legacy naming for embedded textures eg: (*0, *1, *2) + */ + bool useLegacyEmbeddedTextureNaming; + + /** Empty bones shall be removed + */ + bool removeEmptyBones; + + /** Set to true to perform a conversion from cm to meter after the import + */ + bool convertToMeters; +}; +} // namespace FBXDocParser + +#endif // FBX_IMPORT_SETTINGS_H diff --git a/modules/fbx/fbx_parser/FBXMaterial.cpp b/modules/fbx/fbx_parser/FBXMaterial.cpp new file mode 100644 index 0000000000..5d25809d7b --- /dev/null +++ b/modules/fbx/fbx_parser/FBXMaterial.cpp @@ -0,0 +1,407 @@ +/*************************************************************************/ +/* FBXMaterial.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2020, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXMaterial.cpp + * @brief Assimp::FBX::Material and Assimp::FBX::Texture implementation + */ + +#include "ByteSwapper.h" +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXImportSettings.h" +#include "FBXParser.h" +#include "FBXProperties.h" + +#include "FBXUtil.h" +#include <algorithm> // std::transform + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +Material::Material(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + + const ElementPtr ShadingModel = sc->GetElement("ShadingModel"); + const ElementPtr MultiLayer = sc->GetElement("MultiLayer"); + + if (MultiLayer) { + multilayer = !!ParseTokenAsInt(GetRequiredToken(MultiLayer, 0)); + } + + if (ShadingModel) { + shading = ParseTokenAsString(GetRequiredToken(ShadingModel, 0)); + } else { + DOMWarning("shading mode not specified, assuming phong", element); + shading = "phong"; + } + + std::string templateName; + + if (shading == "phong") { + templateName = "Material.Phong"; + } else if (shading == "lambert") { + templateName = "Material.Lambert"; + } else if (shading == "unknown") { + templateName = "Material.StingRay"; + } else { + DOMWarning("shading mode not recognized: " + shading, element); + } + + props = GetPropertyTable(doc, templateName, element, sc); + + // resolve texture links + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID()); + for (const Connection *con : conns) { + // texture link to properties, not objects + if (!con->PropertyName().length()) { + continue; + } + + Object *ob = con->SourceObject(); + if (!ob) { + DOMWarning("failed to read source object for texture link, ignoring", element); + continue; + } + + const Texture *tex = dynamic_cast<const Texture *>(ob); + if (!tex) { + LayeredTexture *layeredTexture = dynamic_cast<LayeredTexture *>(ob); + + if (!layeredTexture) { + DOMWarning("source object for texture link is not a texture or layered texture, ignoring", element); + continue; + } + + const std::string &prop = con->PropertyName(); + if (layeredTextures.find(prop) != layeredTextures.end()) { + DOMWarning("duplicate layered texture link: " + prop, element); + } + + layeredTextures[prop] = layeredTexture; + layeredTexture->fillTexture(doc); + } else { + const std::string &prop = con->PropertyName(); + if (textures.find(prop) != textures.end()) { + DOMWarning("duplicate texture link: " + prop, element); + } + + textures[prop] = tex; + } + } +} + +// ------------------------------------------------------------------------------------------------ +Material::~Material() { + if (props != nullptr) { + delete props; + props = nullptr; + } +} + +// ------------------------------------------------------------------------------------------------ +Texture::Texture(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name), uvScaling(1.0f, 1.0f), media(nullptr) { + const ScopePtr sc = GetRequiredScope(element); + + const ElementPtr Type = sc->GetElement("Type"); + const ElementPtr FileName = sc->GetElement("FileName"); + const ElementPtr RelativeFilename = sc->GetElement("RelativeFilename"); + const ElementPtr ModelUVTranslation = sc->GetElement("ModelUVTranslation"); + const ElementPtr ModelUVScaling = sc->GetElement("ModelUVScaling"); + const ElementPtr Texture_Alpha_Source = sc->GetElement("Texture_Alpha_Source"); + const ElementPtr Cropping = sc->GetElement("Cropping"); + + if (Type) { + type = ParseTokenAsString(GetRequiredToken(Type, 0)); + } + + if (FileName) { + fileName = ParseTokenAsString(GetRequiredToken(FileName, 0)); + } + + if (RelativeFilename) { + relativeFileName = ParseTokenAsString(GetRequiredToken(RelativeFilename, 0)); + } + + if (ModelUVTranslation) { + uvTrans = Vector2(ParseTokenAsFloat(GetRequiredToken(ModelUVTranslation, 0)), + ParseTokenAsFloat(GetRequiredToken(ModelUVTranslation, 1))); + } + + if (ModelUVScaling) { + uvScaling = Vector2(ParseTokenAsFloat(GetRequiredToken(ModelUVScaling, 0)), + ParseTokenAsFloat(GetRequiredToken(ModelUVScaling, 1))); + } + + if (Cropping) { + crop[0] = ParseTokenAsInt(GetRequiredToken(Cropping, 0)); + crop[1] = ParseTokenAsInt(GetRequiredToken(Cropping, 1)); + crop[2] = ParseTokenAsInt(GetRequiredToken(Cropping, 2)); + crop[3] = ParseTokenAsInt(GetRequiredToken(Cropping, 3)); + } else { + // vc8 doesn't support the crop() syntax in initialization lists + // (and vc9 WARNS about the new (i.e. compliant) behaviour). + crop[0] = crop[1] = crop[2] = crop[3] = 0; + } + + if (Texture_Alpha_Source) { + alphaSource = ParseTokenAsString(GetRequiredToken(Texture_Alpha_Source, 0)); + } + + props = GetPropertyTable(doc, "Texture.FbxFileTexture", element, sc); + + // 3DS Max and FBX SDK use "Scaling" and "Translation" instead of "ModelUVScaling" and "ModelUVTranslation". Use these properties if available. + bool ok; + const Vector3 &scaling = PropertyGet<Vector3>(props, "Scaling", ok); + if (ok) { + uvScaling.x = scaling.x; + uvScaling.y = scaling.y; + } + + const Vector3 &trans = PropertyGet<Vector3>(props, "Translation", ok); + if (ok) { + uvTrans.x = trans.x; + uvTrans.y = trans.y; + } + + // resolve video links + if (doc.Settings().readTextures) { + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID()); + for (const Connection *con : conns) { + const Object *const ob = con->SourceObject(); + if (!ob) { + DOMWarning("failed to read source object for texture link, ignoring", element); + continue; + } + + const Video *const video = dynamic_cast<const Video *>(ob); + if (video) { + media = video; + } + } + } +} + +Texture::~Texture() { + if (props != nullptr) { + delete props; + props = nullptr; + } +} + +LayeredTexture::LayeredTexture(uint64_t id, const ElementPtr element, const Document & /*doc*/, const std::string &name) : + Object(id, element, name), blendMode(BlendMode_Modulate), alpha(1) { + const ScopePtr sc = GetRequiredScope(element); + + ElementPtr BlendModes = sc->GetElement("BlendModes"); + ElementPtr Alphas = sc->GetElement("Alphas"); + + if (BlendModes != 0) { + blendMode = (BlendMode)ParseTokenAsInt(GetRequiredToken(BlendModes, 0)); + } + if (Alphas != 0) { + alpha = ParseTokenAsFloat(GetRequiredToken(Alphas, 0)); + } +} + +LayeredTexture::~LayeredTexture() { +} + +void LayeredTexture::fillTexture(const Document &doc) { + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID()); + for (size_t i = 0; i < conns.size(); ++i) { + const Connection *con = conns.at(i); + + const Object *const ob = con->SourceObject(); + if (!ob) { + DOMWarning("failed to read source object for texture link, ignoring", element); + continue; + } + + const Texture *const tex = dynamic_cast<const Texture *>(ob); + + textures.push_back(tex); + } +} + +// ------------------------------------------------------------------------------------------------ +Video::Video(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name), contentLength(0), content(0) { + const ScopePtr sc = GetRequiredScope(element); + + const ElementPtr Type = sc->GetElement("Type"); + // File Version 7500 Crashes if this is not checked fully. + // As of writing this comment 7700 exists, in August 2020 + ElementPtr FileName = nullptr; + if (HasElement(sc, "Filename")) { + FileName = (ElementPtr)sc->GetElement("Filename"); + } else if (HasElement(sc, "FileName")) { + FileName = (ElementPtr)sc->GetElement("FileName"); + } else { + print_error("file has invalid video material returning..."); + return; + } + const ElementPtr RelativeFilename = sc->GetElement("RelativeFilename"); + const ElementPtr Content = sc->GetElement("Content"); + + if (Type) { + type = ParseTokenAsString(GetRequiredToken(Type, 0)); + } + + if (FileName) { + fileName = ParseTokenAsString(GetRequiredToken(FileName, 0)); + } + + if (RelativeFilename) { + relativeFileName = ParseTokenAsString(GetRequiredToken(RelativeFilename, 0)); + } + + if (Content && !Content->Tokens().empty()) { + //this field is omitted when the embedded texture is already loaded, let's ignore if it's not found + try { + const Token *token = GetRequiredToken(Content, 0); + const char *data = token->begin(); + if (!token->IsBinary()) { + if (*data != '"') { + DOMError("embedded content is not surrounded by quotation marks", element); + } else { + size_t targetLength = 0; + auto numTokens = Content->Tokens().size(); + // First time compute size (it could be large like 64Gb and it is good to allocate it once) + for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) { + const Token *dataToken = GetRequiredToken(Content, tokenIdx); + size_t tokenLength = dataToken->end() - dataToken->begin() - 2; // ignore double quotes + const char *base64data = dataToken->begin() + 1; + const size_t outLength = Util::ComputeDecodedSizeBase64(base64data, tokenLength); + if (outLength == 0) { + DOMError("Corrupted embedded content found", element); + } + targetLength += outLength; + } + if (targetLength == 0) { + DOMError("Corrupted embedded content found", element); + } else { + content = new uint8_t[targetLength]; + contentLength = static_cast<uint64_t>(targetLength); + size_t dst_offset = 0; + for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) { + const Token *dataToken = GetRequiredToken(Content, tokenIdx); + ERR_FAIL_COND(!dataToken); + size_t tokenLength = dataToken->end() - dataToken->begin() - 2; // ignore double quotes + const char *base64data = dataToken->begin() + 1; + dst_offset += Util::DecodeBase64(base64data, tokenLength, content + dst_offset, targetLength - dst_offset); + } + if (targetLength != dst_offset) { + delete[] content; + contentLength = 0; + DOMError("Corrupted embedded content found", element); + } + } + } + } else if (static_cast<size_t>(token->end() - data) < 5) { + DOMError("binary data array is too short, need five (5) bytes for type signature and element count", element); + } else if (*data != 'R') { + DOMWarning("video content is not raw binary data, ignoring", element); + } else { + // read number of elements + uint32_t len = 0; + ::memcpy(&len, data + 1, sizeof(len)); + AI_SWAP4(len); + + contentLength = len; + + content = new uint8_t[len]; + ::memcpy(content, data + 5, len); + } + } catch (...) { + // //we don't need the content data for contents that has already been loaded + // ASSIMP_LOG_VERBOSE_DEBUG_F("Caught exception in FBXMaterial (likely because content was already loaded): ", + // runtimeError.what()); + } + } + + props = GetPropertyTable(doc, "Video.FbxVideo", element, sc); +} + +Video::~Video() { + if (content) { + delete[] content; + } + + if (props != nullptr) { + delete props; + props = nullptr; + } +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXMeshGeometry.cpp b/modules/fbx/fbx_parser/FBXMeshGeometry.cpp new file mode 100644 index 0000000000..4fa5fc0ac9 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXMeshGeometry.cpp @@ -0,0 +1,485 @@ +/*************************************************************************/ +/* FBXMeshGeometry.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXMeshGeometry.cpp + * @brief Assimp::FBX::MeshGeometry implementation + */ + +#include <functional> + +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXImportSettings.h" +#include "FBXMeshGeometry.h" +#include "core/math/vector3.h" + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +Geometry::Geometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Object(id, element, name), skin() { + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), "Deformer"); + for (const Connection *con : conns) { + const Skin *sk = ProcessSimpleConnection<Skin>(*con, false, "Skin -> Geometry", element); + if (sk) { + skin = sk; + } + const BlendShape *bsp = ProcessSimpleConnection<BlendShape>(*con, false, "BlendShape -> Geometry", + element); + if (bsp) { + blendShapes.push_back(bsp); + } + } +} + +// ------------------------------------------------------------------------------------------------ +Geometry::~Geometry() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +const std::vector<const BlendShape *> &Geometry::get_blend_shapes() const { + return blendShapes; +} + +// ------------------------------------------------------------------------------------------------ +const Skin *Geometry::DeformerSkin() const { + return skin; +} + +// ------------------------------------------------------------------------------------------------ +MeshGeometry::MeshGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Geometry(id, element, name, doc) { + print_verbose("mesh name: " + String(name.c_str())); + + ScopePtr sc = element->Compound(); + ERR_FAIL_COND_MSG(sc == nullptr, "failed to read geometry, prevented crash"); + ERR_FAIL_COND_MSG(!HasElement(sc, "Vertices"), "Detected mesh with no vertexes, didn't populate the mesh"); + + // must have Mesh elements: + const ElementPtr Vertices = GetRequiredElement(sc, "Vertices", element); + const ElementPtr PolygonVertexIndex = GetRequiredElement(sc, "PolygonVertexIndex", element); + + if (HasElement(sc, "Edges")) { + const ElementPtr element_edges = GetRequiredElement(sc, "Edges", element); + ParseVectorDataArray(m_edges, element_edges); + } + + // read mesh data into arrays + ParseVectorDataArray(m_vertices, Vertices); + ParseVectorDataArray(m_face_indices, PolygonVertexIndex); + + ERR_FAIL_COND_MSG(m_vertices.empty(), "mesh with no vertexes in FBX file, did you mean to delete it?"); + ERR_FAIL_COND_MSG(m_face_indices.empty(), "mesh has no faces, was this intended?"); + + // Retrieve layer elements, for all of the mesh + const ElementCollection &Layer = sc->GetCollection("Layer"); + + // Store all layers + std::vector<std::tuple<int, std::string>> valid_layers; + + // now read the sub mesh information from the geometry (normals, uvs, etc) + for (ElementMap::const_iterator it = Layer.first; it != Layer.second; ++it) { + const ScopePtr layer = GetRequiredScope(it->second); + const ElementCollection &LayerElement = layer->GetCollection("LayerElement"); + for (ElementMap::const_iterator eit = LayerElement.first; eit != LayerElement.second; ++eit) { + std::string layer_name = eit->first; + ElementPtr element_layer = eit->second; + const ScopePtr layer_element = GetRequiredScope(element_layer); + + // Actual usable 'type' LayerElementUV, LayerElementNormal, etc + const ElementPtr Type = GetRequiredElement(layer_element, "Type"); + const ElementPtr TypedIndex = GetRequiredElement(layer_element, "TypedIndex"); + const std::string &type = ParseTokenAsString(GetRequiredToken(Type, 0)); + const int typedIndex = ParseTokenAsInt(GetRequiredToken(TypedIndex, 0)); + + // we only need the layer name and the typed index. + valid_layers.push_back(std::tuple<int, std::string>(typedIndex, type)); + } + } + + // get object / mesh directly from the FBX by the element ID. + const ScopePtr top = GetRequiredScope(element); + + // iterate over all layers for the mesh (uvs, normals, smoothing groups, colors, etc) + for (size_t x = 0; x < valid_layers.size(); x++) { + const int layer_id = std::get<0>(valid_layers[x]); + const std::string &layer_type_name = std::get<1>(valid_layers[x]); + + // Get collection of elements from the XLayerMap (example: LayerElementUV) + // this must contain our proper elements. + + // This is stupid, because it means we select them ALL not just the one we want. + // but it's fine we can match by id. + + const ElementCollection &candidates = top->GetCollection(layer_type_name); + + ElementMap::const_iterator iter; + for (iter = candidates.first; iter != candidates.second; ++iter) { + const ScopePtr layer_scope = GetRequiredScope(iter->second); + TokenPtr layer_token = GetRequiredToken(iter->second, 0); + const int index = ParseTokenAsInt(layer_token); + + ERR_FAIL_COND_MSG(layer_scope == nullptr, "prevented crash, layer scope is invalid"); + + if (index == layer_id) { + const std::string &MappingInformationType = ParseTokenAsString(GetRequiredToken( + GetRequiredElement(layer_scope, "MappingInformationType"), 0)); + + const std::string &ReferenceInformationType = ParseTokenAsString(GetRequiredToken( + GetRequiredElement(layer_scope, "ReferenceInformationType"), 0)); + + if (layer_type_name == "LayerElementUV") { + if (index == 0) { + m_uv_0 = resolve_vertex_data_array<Vector2>(layer_scope, MappingInformationType, ReferenceInformationType, "UV"); + } else if (index == 1) { + m_uv_1 = resolve_vertex_data_array<Vector2>(layer_scope, MappingInformationType, ReferenceInformationType, "UV"); + } + } else if (layer_type_name == "LayerElementMaterial") { + m_material_allocation_ids = resolve_vertex_data_array<int>(layer_scope, MappingInformationType, ReferenceInformationType, "Materials"); + } else if (layer_type_name == "LayerElementNormal") { + m_normals = resolve_vertex_data_array<Vector3>(layer_scope, MappingInformationType, ReferenceInformationType, "Normals"); + } else if (layer_type_name == "LayerElementColor") { + m_colors = resolve_vertex_data_array<Color>(layer_scope, MappingInformationType, ReferenceInformationType, "Colors", "ColorIndex"); + // NOTE: this is a useful sanity check to ensure you're getting any color data which is not default. + // const Color first_color_check = m_colors.data[0]; + // bool colors_are_all_the_same = true; + // size_t i = 1; + // for(i = 1; i < m_colors.data.size(); i++) + // { + // const Color current_color = m_colors.data[i]; + // if(current_color.is_equal_approx(first_color_check)) + // { + // continue; + // } + // else + // { + // colors_are_all_the_same = false; + // break; + // } + // } + // + // if(colors_are_all_the_same) + // { + // print_error("Color serialisation is not working for vertex colors some should be different in the test asset."); + // } + // else + // { + // print_verbose("Color array has unique colors at index: " + itos(i)); + // } + } + } + } + } + + print_verbose("Mesh statistics \nuv_0: " + m_uv_0.debug_info() + "\nuv_1: " + m_uv_1.debug_info() + "\nvertices: " + itos(m_vertices.size())); + + // Compose the edge of the mesh. + // You can see how the edges are stored into the FBX here: https://gist.github.com/AndreaCatania/da81840f5aa3b2feedf189e26c5a87e6 + for (size_t i = 0; i < m_edges.size(); i += 1) { + ERR_FAIL_INDEX_MSG((size_t)m_edges[i], m_face_indices.size(), "The edge is pointing to a weird location in the face indices. The FBX is corrupted."); + int polygon_vertex_0 = m_face_indices[m_edges[i]]; + int polygon_vertex_1; + if (polygon_vertex_0 < 0) { + // The polygon_vertex_0 points to the end of a polygon, so it's + // connected with the beginning of polygon in the edge list. + + // Fist invert the vertex. + polygon_vertex_0 = ~polygon_vertex_0; + + // Search the start vertex of the polygon. + // Iterate from the polygon_vertex_index backward till the start of + // the polygon is found. + ERR_FAIL_COND_MSG(m_edges[i] - 1 < 0, "The polygon is not yet started and we already need the final vertex. This FBX is corrupted."); + bool found_it = false; + for (int x = m_edges[i] - 1; x >= 0; x -= 1) { + if (x == 0) { + // This for sure is the start. + polygon_vertex_1 = m_face_indices[x]; + found_it = true; + break; + } else if (m_face_indices[x] < 0) { + // This is the end of the previous polygon, so the next is + // the start of the polygon we need. + polygon_vertex_1 = m_face_indices[x + 1]; + found_it = true; + break; + } + } + // As the algorithm above, this check is useless. Because the first + // ever vertex is always considered the begining of a polygon. + ERR_FAIL_COND_MSG(found_it == false, "Was not possible to find the first vertex of this polygon. FBX file is corrupted."); + + } else { + ERR_FAIL_INDEX_MSG((size_t)(m_edges[i] + 1), m_face_indices.size(), "FBX The other FBX edge seems to point to an invalid vertices. This FBX file is corrupted."); + // Take the next vertex + polygon_vertex_1 = m_face_indices[m_edges[i] + 1]; + } + + if (polygon_vertex_1 < 0) { + // We don't care if the `polygon_vertex_1` is the end of the polygon, + // for `polygon_vertex_1` so we can just invert it. + polygon_vertex_1 = ~polygon_vertex_1; + } + + ERR_FAIL_COND_MSG(polygon_vertex_0 == polygon_vertex_1, "The vertices of this edge can't be the same, Is this a point???. This FBX file is corrupted."); + + // Just create the edge. + edge_map.push_back({ polygon_vertex_0, polygon_vertex_1 }); + } +} + +MeshGeometry::~MeshGeometry() { + // empty +} + +const std::vector<Vector3> &MeshGeometry::get_vertices() const { + return m_vertices; +} + +const std::vector<MeshGeometry::Edge> &MeshGeometry::get_edge_map() const { + return edge_map; +} + +const std::vector<int> &MeshGeometry::get_polygon_indices() const { + return m_face_indices; +} + +const std::vector<int> &MeshGeometry::get_edges() const { + return m_edges; +} + +const MeshGeometry::MappingData<Vector3> &MeshGeometry::get_normals() const { + return m_normals; +} + +const MeshGeometry::MappingData<Vector2> &MeshGeometry::get_uv_0() const { + //print_verbose("get uv_0 " + m_uv_0.debug_info() ); + return m_uv_0; +} + +const MeshGeometry::MappingData<Vector2> &MeshGeometry::get_uv_1() const { + //print_verbose("get uv_1 " + m_uv_1.debug_info() ); + return m_uv_1; +} + +const MeshGeometry::MappingData<Color> &MeshGeometry::get_colors() const { + return m_colors; +} + +const MeshGeometry::MappingData<int> &MeshGeometry::get_material_allocation_id() const { + return m_material_allocation_ids; +} + +int MeshGeometry::get_edge_id(const std::vector<Edge> &p_map, int p_vertex_a, int p_vertex_b) { + for (size_t i = 0; i < p_map.size(); i += 1) { + if ((p_map[i].vertex_0 == p_vertex_a && p_map[i].vertex_1 == p_vertex_b) || (p_map[i].vertex_1 == p_vertex_a && p_map[i].vertex_0 == p_vertex_b)) { + return i; + } + } + return -1; +} + +MeshGeometry::Edge MeshGeometry::get_edge(const std::vector<Edge> &p_map, int p_id) { + ERR_FAIL_INDEX_V_MSG((size_t)p_id, p_map.size(), Edge({ -1, -1 }), "ID not found."); + return p_map[p_id]; +} + +template <class T> +MeshGeometry::MappingData<T> MeshGeometry::resolve_vertex_data_array( + const ScopePtr source, + const std::string &MappingInformationType, + const std::string &ReferenceInformationType, + const std::string &dataElementName, + const std::string &indexOverride) { + ERR_FAIL_COND_V_MSG(source == nullptr, MappingData<T>(), "Invalid scope operator preventing memory corruption"); + + // UVIndex, MaterialIndex, NormalIndex, etc.. + std::string indexDataElementName; + + if (indexOverride != "") { + // Colors should become ColorIndex + indexDataElementName = indexOverride; + } else { + // Some indexes will exist. + indexDataElementName = dataElementName + "Index"; + } + + // goal: expand everything to be per vertex + + ReferenceType l_ref_type = ReferenceType::direct; + + // Read the reference type into the enumeration + if (ReferenceInformationType == "IndexToDirect") { + l_ref_type = ReferenceType::index_to_direct; + } else if (ReferenceInformationType == "Index") { + // set non legacy index to direct mapping + l_ref_type = ReferenceType::index; + } else if (ReferenceInformationType == "Direct") { + l_ref_type = ReferenceType::direct; + } else { + ERR_FAIL_V_MSG(MappingData<T>(), "invalid reference type has the FBX format changed?"); + } + + MapType l_map_type = MapType::none; + + if (MappingInformationType == "None") { + l_map_type = MapType::none; + } else if (MappingInformationType == "ByVertice") { + l_map_type = MapType::vertex; + } else if (MappingInformationType == "ByPolygonVertex") { + l_map_type = MapType::polygon_vertex; + } else if (MappingInformationType == "ByPolygon") { + l_map_type = MapType::polygon; + } else if (MappingInformationType == "ByEdge") { + l_map_type = MapType::edge; + } else if (MappingInformationType == "AllSame") { + l_map_type = MapType::all_the_same; + } else { + print_error("invalid mapping type: " + String(MappingInformationType.c_str())); + } + + // create mapping data + MeshGeometry::MappingData<T> tempData; + tempData.map_type = l_map_type; + tempData.ref_type = l_ref_type; + + // parse data into array + ParseVectorDataArray(tempData.data, GetRequiredElement(source, dataElementName)); + + // index array wont always exist + const ElementPtr element = GetOptionalElement(source, indexDataElementName); + if (element) { + ParseVectorDataArray(tempData.index, element); + } + + return tempData; +} +// ------------------------------------------------------------------------------------------------ +ShapeGeometry::ShapeGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Geometry(id, element, name, doc) { + const ScopePtr sc = element->Compound(); + if (nullptr == sc) { + DOMError("failed to read Geometry object (class: Shape), no data scope found"); + } + const ElementPtr Indexes = GetRequiredElement(sc, "Indexes", element); + const ElementPtr Normals = GetRequiredElement(sc, "Normals", element); + const ElementPtr Vertices = GetRequiredElement(sc, "Vertices", element); + ParseVectorDataArray(m_indices, Indexes); + ParseVectorDataArray(m_vertices, Vertices); + ParseVectorDataArray(m_normals, Normals); +} + +// ------------------------------------------------------------------------------------------------ +ShapeGeometry::~ShapeGeometry() { + // empty +} +// ------------------------------------------------------------------------------------------------ +const std::vector<Vector3> &ShapeGeometry::GetVertices() const { + return m_vertices; +} +// ------------------------------------------------------------------------------------------------ +const std::vector<Vector3> &ShapeGeometry::GetNormals() const { + return m_normals; +} +// ------------------------------------------------------------------------------------------------ +const std::vector<unsigned int> &ShapeGeometry::GetIndices() const { + return m_indices; +} +// ------------------------------------------------------------------------------------------------ +LineGeometry::LineGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc) : + Geometry(id, element, name, doc) { + const ScopePtr sc = element->Compound(); + if (!sc) { + DOMError("failed to read Geometry object (class: Line), no data scope found"); + } + const ElementPtr Points = GetRequiredElement(sc, "Points", element); + const ElementPtr PointsIndex = GetRequiredElement(sc, "PointsIndex", element); + ParseVectorDataArray(m_vertices, Points); + ParseVectorDataArray(m_indices, PointsIndex); +} + +// ------------------------------------------------------------------------------------------------ +LineGeometry::~LineGeometry() { + // empty +} +// ------------------------------------------------------------------------------------------------ +const std::vector<Vector3> &LineGeometry::GetVertices() const { + return m_vertices; +} +// ------------------------------------------------------------------------------------------------ +const std::vector<int> &LineGeometry::GetIndices() const { + return m_indices; +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXMeshGeometry.h b/modules/fbx/fbx_parser/FBXMeshGeometry.h new file mode 100644 index 0000000000..666da46edd --- /dev/null +++ b/modules/fbx/fbx_parser/FBXMeshGeometry.h @@ -0,0 +1,263 @@ +/*************************************************************************/ +/* FBXMeshGeometry.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above +copyright notice, this list of conditions and the +following disclaimer. + +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the +following disclaimer in the documentation and/or other +materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its +contributors may be used to endorse or promote products +derived from this software without specific prior +written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +#ifndef FBX_MESH_GEOMETRY_H +#define FBX_MESH_GEOMETRY_H + +#include "core/math/color.h" +#include "core/math/vector2.h" +#include "core/math/vector3.h" +#include "core/templates/vector.h" + +#include "FBXDocument.h" +#include "FBXParser.h" + +#include <iostream> + +#define AI_MAX_NUMBER_OF_TEXTURECOORDS 4 +#define AI_MAX_NUMBER_OF_COLOR_SETS 8 + +namespace FBXDocParser { + +/* + * DOM base class for all kinds of FBX geometry + */ +class Geometry : public Object { +public: + Geometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + virtual ~Geometry(); + + /** Get the Skin attached to this geometry or NULL */ + const Skin *DeformerSkin() const; + + const std::vector<const BlendShape *> &get_blend_shapes() const; + + size_t get_blend_shape_count() const { + return blendShapes.size(); + } + +private: + const Skin *skin = nullptr; + std::vector<const BlendShape *> blendShapes; +}; + +typedef std::vector<int> MatIndexArray; + +/// Map Geometry stores the FBX file information. +/// +/// # FBX doc. +/// ## Reference type declared: +/// - Direct (directly related to the mapping information type) +/// - IndexToDirect (Map with key value, meaning depends on the MappingInformationType) +/// +/// ## Map Type: +/// * None The mapping is undetermined. +/// * ByVertex There will be one mapping coordinate for each surface control point/vertex (ControlPoint is a vertex). +/// * If you have direct reference type verticies[x] +/// * If you have IndexToDirect reference type the UV +/// * ByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part. (Sorted by polygon, referencing vertex) +/// * ByPolygon There can be only one mapping coordinate for the whole polygon. +/// * One mapping per polygon polygon x has this normal x +/// * For each vertex of the polygon then set the normal to x +/// * ByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements. (Mapping is referencing the edge id) +/// * AllSame There can be only one mapping coordinate for the whole surface. +class MeshGeometry : public Geometry { +public: + enum class MapType { + none = 0, // No mapping type. Stored as "None". + vertex, // Maps per vertex. Stored as "ByVertice". + polygon_vertex, // Maps per polygon vertex. Stored as "ByPolygonVertex". + polygon, // Maps per polygon. Stored as "ByPolygon". + edge, // Maps per edge. Stored as "ByEdge". + all_the_same // Uaps to everything. Stored as "AllSame". + }; + + enum class ReferenceType { + direct = 0, + index = 1, + index_to_direct = 2 + }; + + template <class T> + struct MappingData { + MapType map_type = MapType::none; + ReferenceType ref_type = ReferenceType::direct; + std::vector<T> data; + /// The meaning of the indices depends from the `MapType`. + /// If `ref_type` is `direct` this map is hollow. + std::vector<int> index; + + String debug_info() const { + return "indexes: " + itos(index.size()) + " data: " + itos(data.size()); + } + }; + + struct Edge { + int vertex_0 = 0, vertex_1 = 0; + Edge(int v0, int v1) : + vertex_0(v0), vertex_1(v1) {} + Edge() {} + }; + +public: + MeshGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + + virtual ~MeshGeometry(); + + const std::vector<Vector3> &get_vertices() const; + const std::vector<Edge> &get_edge_map() const; + const std::vector<int> &get_polygon_indices() const; + const std::vector<int> &get_edges() const; + const MappingData<Vector3> &get_normals() const; + const MappingData<Vector2> &get_uv_0() const; + const MappingData<Vector2> &get_uv_1() const; + const MappingData<Color> &get_colors() const; + const MappingData<int> &get_material_allocation_id() const; + + /// Returns -1 if the vertices doesn't form an edge. Vertex order, doesn't + // matter. + static int get_edge_id(const std::vector<Edge> &p_map, int p_vertex_a, int p_vertex_b); + // Retuns the edge point bu that ID, or the edge with -1 vertices if the + // id is not valid. + static Edge get_edge(const std::vector<Edge> &p_map, int p_id); + +private: + // Read directly from the FBX file. + std::vector<Vector3> m_vertices; + std::vector<Edge> edge_map; + std::vector<int> m_face_indices; + std::vector<int> m_edges; + MappingData<Vector3> m_normals; + MappingData<Vector2> m_uv_0; // first uv coordinates + MappingData<Vector2> m_uv_1; // second uv coordinates + MappingData<Color> m_colors; // colors for the mesh + MappingData<int> m_material_allocation_ids; // slot of material used + + template <class T> + MappingData<T> resolve_vertex_data_array( + const ScopePtr source, + const std::string &MappingInformationType, + const std::string &ReferenceInformationType, + const std::string &dataElementName, + const std::string &indexOverride = ""); +}; + +/* + * DOM class for FBX geometry of type "Shape" + */ +class ShapeGeometry : public Geometry { +public: + /** The class constructor */ + ShapeGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + + /** The class destructor */ + virtual ~ShapeGeometry(); + + /** Get a list of all vertex points, non-unique*/ + const std::vector<Vector3> &GetVertices() const; + + /** Get a list of all vertex normals or an empty array if + * no normals are specified. */ + const std::vector<Vector3> &GetNormals() const; + + /** Return list of vertex indices. */ + const std::vector<unsigned int> &GetIndices() const; + +private: + std::vector<Vector3> m_vertices; + std::vector<Vector3> m_normals; + std::vector<unsigned int> m_indices; +}; +/** +* DOM class for FBX geometry of type "Line" +*/ +class LineGeometry : public Geometry { +public: + /** The class constructor */ + LineGeometry(uint64_t id, const ElementPtr element, const std::string &name, const Document &doc); + + /** The class destructor */ + virtual ~LineGeometry(); + + /** Get a list of all vertex points, non-unique*/ + const std::vector<Vector3> &GetVertices() const; + + /** Return list of vertex indices. */ + const std::vector<int> &GetIndices() const; + +private: + std::vector<Vector3> m_vertices; + std::vector<int> m_indices; +}; +} // namespace FBXDocParser + +#endif // FBX_MESH_GEOMETRY_H diff --git a/modules/fbx/fbx_parser/FBXModel.cpp b/modules/fbx/fbx_parser/FBXModel.cpp new file mode 100644 index 0000000000..5867f59d6a --- /dev/null +++ b/modules/fbx/fbx_parser/FBXModel.cpp @@ -0,0 +1,176 @@ +/*************************************************************************/ +/* FBXModel.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXModel.cpp + * @brief Assimp::FBX::Model implementation + */ + +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXMeshGeometry.h" +#include "FBXParser.h" + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +Model::Model(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name), shading("Y") { + const ScopePtr sc = GetRequiredScope(element); + const ElementPtr Shading = sc->GetElement("Shading"); + const ElementPtr Culling = sc->GetElement("Culling"); + + if (Shading) { + shading = GetRequiredToken(Shading, 0)->StringContents(); + } + + if (Culling) { + culling = ParseTokenAsString(GetRequiredToken(Culling, 0)); + } + + props = GetPropertyTable(doc, "Model.FbxNode", element, sc); + ResolveLinks(element, doc); +} + +// ------------------------------------------------------------------------------------------------ +Model::~Model() { + if (props != nullptr) { + delete props; + props = nullptr; + } +} + +ModelLimbNode::ModelLimbNode(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Model(id, element, doc, name){}; + +ModelLimbNode::~ModelLimbNode() { +} + +// ------------------------------------------------------------------------------------------------ +void Model::ResolveLinks(const ElementPtr element, const Document &doc) { + const char *const arr[] = { "Geometry", "Material", "NodeAttribute" }; + + // resolve material + const std::vector<const Connection *> &conns = doc.GetConnectionsByDestinationSequenced(ID(), arr, 3); + + materials.reserve(conns.size()); + geometry.reserve(conns.size()); + attributes.reserve(conns.size()); + for (const Connection *con : conns) { + // material and geometry links should be Object-Object connections + if (con->PropertyName().length()) { + continue; + } + + const Object *const ob = con->SourceObject(); + if (!ob) { + //DOMWarning("failed to read source object for incoming Model link, ignoring",&element); + continue; + } + + const Material *const mat = dynamic_cast<const Material *>(ob); + if (mat) { + materials.push_back(mat); + continue; + } + + const Geometry *const geo = dynamic_cast<const Geometry *>(ob); + if (geo) { + geometry.push_back(geo); + continue; + } + + const NodeAttribute *const att = dynamic_cast<const NodeAttribute *>(ob); + if (att) { + attributes.push_back(att); + continue; + } + + DOMWarning("source object for model link is neither Material, NodeAttribute nor Geometry, ignoring", element); + continue; + } +} + +// ------------------------------------------------------------------------------------------------ +bool Model::IsNull() const { + const std::vector<const NodeAttribute *> &attrs = GetAttributes(); + for (const NodeAttribute *att : attrs) { + const Null *null_tag = dynamic_cast<const Null *>(att); + if (null_tag) { + return true; + } + } + + return false; +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXNodeAttribute.cpp b/modules/fbx/fbx_parser/FBXNodeAttribute.cpp new file mode 100644 index 0000000000..c5051bd147 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXNodeAttribute.cpp @@ -0,0 +1,183 @@ +/*************************************************************************/ +/* FBXNodeAttribute.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXNoteAttribute.cpp + * @brief Assimp::FBX::NodeAttribute (and subclasses) implementation + */ + +#include "FBXDocument.h" +#include "FBXDocumentUtil.h" +#include "FBXParser.h" +#include <iostream> + +namespace FBXDocParser { +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +NodeAttribute::NodeAttribute(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name), props() { + const ScopePtr sc = GetRequiredScope(element); + + const std::string &classname = ParseTokenAsString(GetRequiredToken(element, 2)); + + // hack on the deriving type but Null/LimbNode attributes are the only case in which + // the property table is by design absent and no warning should be generated + // for it. + const bool is_null_or_limb = !strcmp(classname.c_str(), "Null") || !strcmp(classname.c_str(), "LimbNode"); + props = GetPropertyTable(doc, "NodeAttribute.Fbx" + classname, element, sc, is_null_or_limb); +} + +// ------------------------------------------------------------------------------------------------ +NodeAttribute::~NodeAttribute() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +CameraSwitcher::CameraSwitcher(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + NodeAttribute(id, element, doc, name) { + const ScopePtr sc = GetRequiredScope(element); + const ElementPtr CameraId = sc->GetElement("CameraId"); + const ElementPtr CameraName = sc->GetElement("CameraName"); + const ElementPtr CameraIndexName = sc->GetElement("CameraIndexName"); + + if (CameraId) { + cameraId = ParseTokenAsInt(GetRequiredToken(CameraId, 0)); + } + + if (CameraName) { + cameraName = GetRequiredToken(CameraName, 0)->StringContents(); + } + + if (CameraIndexName && CameraIndexName->Tokens().size()) { + cameraIndexName = GetRequiredToken(CameraIndexName, 0)->StringContents(); + } +} + +// ------------------------------------------------------------------------------------------------ +CameraSwitcher::~CameraSwitcher() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +Camera::Camera(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + NodeAttribute(id, element, doc, name) { + // empty +} + +// ------------------------------------------------------------------------------------------------ +Camera::~Camera() { + // empty +} + +// ------------------------------------------------------------------------------------------------ +Light::Light(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + NodeAttribute(id, element, doc, name) { + // empty +} + +// ------------------------------------------------------------------------------------------------ +Light::~Light() { +} + +// ------------------------------------------------------------------------------------------------ +Null::Null(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + NodeAttribute(id, element, doc, name) { +} + +// ------------------------------------------------------------------------------------------------ +Null::~Null() { +} + +// ------------------------------------------------------------------------------------------------ +LimbNode::LimbNode(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + NodeAttribute(id, element, doc, name) { + //std::cout << "limb node: " << name << std::endl; + //const Scope &sc = GetRequiredScope(element); + + //const ElementPtr const TypeFlag = sc["TypeFlags"]; + + // keep this it can dump new properties for you + // for( auto element : sc.Elements()) + // { + // std::cout << "limbnode element: " << element.first << std::endl; + // } + + // if(TypeFlag) + // { + // // std::cout << "type flag: " << GetRequiredToken(*TypeFlag, 0).StringContents() << std::endl; + // } +} + +// ------------------------------------------------------------------------------------------------ +LimbNode::~LimbNode() { +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXParseTools.h b/modules/fbx/fbx_parser/FBXParseTools.h new file mode 100644 index 0000000000..edc52009b3 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXParseTools.h @@ -0,0 +1,111 @@ +/*************************************************************************/ +/* FBXParseTools.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_PARSE_TOOLS_H +#define FBX_PARSE_TOOLS_H + +#include "core/error/error_macros.h" +#include "core/string/ustring.h" + +#include <stdint.h> +#include <algorithm> +#include <locale> + +template <class char_t> +inline bool IsNewLine(char_t c) { + return c == '\n' || c == '\r'; +} +template <class char_t> +inline bool IsSpace(char_t c) { + return (c == (char_t)' ' || c == (char_t)'\t'); +} + +template <class char_t> +inline bool IsSpaceOrNewLine(char_t c) { + return IsNewLine(c) || IsSpace(c); +} + +template <class char_t> +inline bool IsLineEnd(char_t c) { + return (c == (char_t)'\r' || c == (char_t)'\n' || c == (char_t)'\0' || c == (char_t)'\f'); +} + +// ------------------------------------------------------------------------------------ +// Special version of the function, providing higher accuracy and safety +// It is mainly used by fast_atof to prevent ugly and unwanted integer overflows. +// ------------------------------------------------------------------------------------ +inline uint64_t strtoul10_64(const char *in, bool &errored, const char **out = 0, unsigned int *max_inout = 0) { + unsigned int cur = 0; + uint64_t value = 0; + + errored = *in < '0' || *in > '9'; + ERR_FAIL_COND_V_MSG(errored, 0, "The string cannot be converted parser error"); + + for (;;) { + if (*in < '0' || *in > '9') { + break; + } + + const uint64_t new_value = (value * (uint64_t)10) + ((uint64_t)(*in - '0')); + + // numeric overflow, we rely on you + if (new_value < value) { + //WARN_PRINT( "Converting the string \" " + in + " \" into a value resulted in overflow." ); + return 0; + } + + value = new_value; + + ++in; + ++cur; + + if (max_inout && *max_inout == cur) { + if (out) { /* skip to end */ + while (*in >= '0' && *in <= '9') { + ++in; + } + *out = in; + } + + return value; + } + } + if (out) { + *out = in; + } + + if (max_inout) { + *max_inout = cur; + } + + return value; +} + +#endif // FBX_PARSE_TOOLS_H diff --git a/modules/fbx/fbx_parser/FBXParser.cpp b/modules/fbx/fbx_parser/FBXParser.cpp new file mode 100644 index 0000000000..208358f55e --- /dev/null +++ b/modules/fbx/fbx_parser/FBXParser.cpp @@ -0,0 +1,1295 @@ +/*************************************************************************/ +/* FBXParser.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXParser.cpp + * @brief Implementation of the FBX parser and the rudimentary DOM that we use + */ + +#include "thirdparty/zlib/zlib.h" +#include <stdlib.h> /* strtol */ + +#include "ByteSwapper.h" +#include "FBXParseTools.h" +#include "FBXParser.h" +#include "FBXTokenizer.h" +#include "core/math/math_defs.h" +#include "core/math/transform.h" +#include "core/math/vector3.h" +#include "core/string/print_string.h" + +using namespace FBXDocParser; +namespace { + +// Initially, we did reinterpret_cast, breaking strict aliasing rules. +// This actually caused trouble on Android, so let's be safe this time. +// https://github.com/assimp/assimp/issues/24 +template <typename T> +T SafeParse(const char *data, const char *end) { + // Actual size validation happens during Tokenization so + // this is valid as an assertion. + (void)(end); + //ai_assert(static_cast<size_t>(end - data) >= sizeof(T)); + T result = static_cast<T>(0); + ::memcpy(&result, data, sizeof(T)); + return result; +} +} // namespace + +namespace FBXDocParser { + +// ------------------------------------------------------------------------------------------------ +Element::Element(const TokenPtr key_token, Parser &parser) : + key_token(key_token) { + TokenPtr n = nullptr; + do { + n = parser.AdvanceToNextToken(); + if (n == nullptr) { + continue; + } + + if (!n) { + print_error("unexpected end of file, expected closing bracket" + String(parser.LastToken()->StringContents().c_str())); + } + + if (n && n->Type() == TokenType_DATA) { + tokens.push_back(n); + TokenPtr prev = n; + n = parser.AdvanceToNextToken(); + + if (n == nullptr) { + break; + } + + if (!n) { + print_error("unexpected end of file, expected bracket, comma or key" + String(parser.LastToken()->StringContents().c_str())); + } + + const TokenType ty = n->Type(); + + // some exporters are missing a comma on the next line + if (ty == TokenType_DATA && prev->Type() == TokenType_DATA && (n->Line() == prev->Line() + 1)) { + tokens.push_back(n); + continue; + } + + if (ty != TokenType_OPEN_BRACKET && ty != TokenType_CLOSE_BRACKET && ty != TokenType_COMMA && ty != TokenType_KEY) { + print_error("unexpected token; expected bracket, comma or key" + String(n->StringContents().c_str())); + } + } + + if (n && n->Type() == TokenType_OPEN_BRACKET) { + compound = new_Scope(parser); + parser.scopes.push_back(compound); + + // current token should be a TOK_CLOSE_BRACKET + n = parser.CurrentToken(); + + if (n && n->Type() != TokenType_CLOSE_BRACKET) { + print_error("expected closing bracket" + String(n->StringContents().c_str())); + } + + parser.AdvanceToNextToken(); + return; + } + } while (n && n->Type() != TokenType_KEY && n->Type() != TokenType_CLOSE_BRACKET); +} + +// ------------------------------------------------------------------------------------------------ +Element::~Element() { +} + +// ------------------------------------------------------------------------------------------------ +Scope::Scope(Parser &parser, bool topLevel) { + if (!topLevel) { + TokenPtr t = parser.CurrentToken(); + if (t->Type() != TokenType_OPEN_BRACKET) { + print_error("expected open bracket" + String(t->StringContents().c_str())); + } + } + + TokenPtr n = parser.AdvanceToNextToken(); + if (n == nullptr) { + print_error("unexpected end of file"); + } + + // note: empty scopes are allowed + while (n && n->Type() != TokenType_CLOSE_BRACKET) { + if (n->Type() != TokenType_KEY) { + print_error("unexpected token, expected TOK_KEY" + String(n->StringContents().c_str())); + } + + const std::string str = n->StringContents(); + + // std::multimap<std::string, ElementPtr> (key and value) + elements.insert(ElementMap::value_type(str, new_Element(n, parser))); + + // Element() should stop at the next Key token (or right after a Close token) + n = parser.CurrentToken(); + if (n == nullptr) { + if (topLevel) { + return; + } + + //print_error("unexpected end of file" + String(parser.LastToken()->StringContents().c_str())); + } + } +} + +// ------------------------------------------------------------------------------------------------ +Scope::~Scope() { + for (ElementMap::value_type &v : elements) { + delete v.second; + v.second = nullptr; + } + + elements.clear(); +} + +// ------------------------------------------------------------------------------------------------ +Parser::Parser(const TokenList &tokens, bool is_binary) : + tokens(tokens), last(), current(), cursor(tokens.begin()), is_binary(is_binary) { + root = new_Scope(*this, true); + scopes.push_back(root); +} + +// ------------------------------------------------------------------------------------------------ +Parser::~Parser() { + for (ScopePtr scope : scopes) { + delete scope; + scope = nullptr; + } +} + +// ------------------------------------------------------------------------------------------------ +TokenPtr Parser::AdvanceToNextToken() { + last = current; + if (cursor == tokens.end()) { + current = nullptr; + } else { + current = *cursor++; + } + return current; +} + +// ------------------------------------------------------------------------------------------------ +TokenPtr Parser::CurrentToken() const { + return current; +} + +// ------------------------------------------------------------------------------------------------ +TokenPtr Parser::LastToken() const { + return last; +} + +// ------------------------------------------------------------------------------------------------ +uint64_t ParseTokenAsID(const TokenPtr t, const char *&err_out) { + ERR_FAIL_COND_V_MSG(t == nullptr, 0L, "Invalid token passed to ParseTokenAsID"); + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return 0L; + } + + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'L') { + err_out = "failed to parse ID, unexpected data type, expected L(ong) (binary)"; + return 0L; + } + + uint64_t id = SafeParse<uint64_t>(data + 1, t->end()); + return id; + } + + // XXX: should use size_t here + unsigned int length = static_cast<unsigned int>(t->end() - t->begin()); + //ai_assert(length > 0); + + const char *out = nullptr; + bool errored = false; + + const uint64_t id = strtoul10_64(t->begin(), errored, &out, &length); + if (errored || out > t->end()) { + err_out = "failed to parse ID (text)"; + return 0L; + } + + return id; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsID() with print_error handling +uint64_t ParseTokenAsID(const TokenPtr t) { + const char *err = nullptr; + const uint64_t i = ParseTokenAsID(t, err); + if (err) { + print_error(String(err) + " " + String(t->StringContents().c_str())); + } + return i; +} + +// ------------------------------------------------------------------------------------------------ +size_t ParseTokenAsDim(const TokenPtr t, const char *&err_out) { + // same as ID parsing, except there is a trailing asterisk + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return 0; + } + + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'L') { + err_out = "failed to parse ID, unexpected data type, expected L(ong) (binary)"; + return 0; + } + + uint64_t id = SafeParse<uint64_t>(data + 1, t->end()); + AI_SWAP8(id); + return static_cast<size_t>(id); + } + + if (*t->begin() != '*') { + err_out = "expected asterisk before array dimension"; + return 0; + } + + // XXX: should use size_t here + unsigned int length = static_cast<unsigned int>(t->end() - t->begin()); + if (length == 0) { + err_out = "expected valid integer number after asterisk"; + return 0; + } + + const char *out = nullptr; + bool errored = false; + const size_t id = static_cast<size_t>(strtoul10_64(t->begin() + 1, errored, &out, &length)); + if (errored || out > t->end()) { + print_error("failed to parse id"); + err_out = "failed to parse ID"; + return 0; + } + + return id; +} + +// ------------------------------------------------------------------------------------------------ +float ParseTokenAsFloat(const TokenPtr t, const char *&err_out) { + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return 0.0f; + } + + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'F' && data[0] != 'D') { + err_out = "failed to parse F(loat) or D(ouble), unexpected data type (binary)"; + return 0.0f; + } + + if (data[0] == 'F') { + return SafeParse<float>(data + 1, t->end()); + } else { + return static_cast<float>(SafeParse<double>(data + 1, t->end())); + } + } + +// need to copy the input string to a temporary buffer +// first - next in the fbx token stream comes ',', +// which fast_atof could interpret as decimal point. +#define MAX_FLOAT_LENGTH 31 + char temp[MAX_FLOAT_LENGTH + 1]; + const size_t length = static_cast<size_t>(t->end() - t->begin()); + std::copy(t->begin(), t->end(), temp); + temp[std::min(static_cast<size_t>(MAX_FLOAT_LENGTH), length)] = '\0'; + + return atof(temp); +} + +// ------------------------------------------------------------------------------------------------ +int ParseTokenAsInt(const TokenPtr t, const char *&err_out) { + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return 0; + } + + // binary files are simple to parse + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'I') { + err_out = "failed to parse I(nt), unexpected data type (binary)"; + return 0; + } + + int32_t ival = SafeParse<int32_t>(data + 1, t->end()); + AI_SWAP4(ival); + return static_cast<int>(ival); + } + + // ASCII files are unsafe. + const size_t length = static_cast<size_t>(t->end() - t->begin()); + if (length == 0) { + err_out = "expected valid integer number after asterisk"; + ERR_FAIL_V_MSG(0, "expected valid integer number after asterisk"); + } + + // must not be null for strtol to work + char *out = (char *)t->end(); + // string begin, end ptr ref, base 10 + const int value = strtol(t->begin(), &out, 10); + if (out == nullptr || out != t->end()) { + err_out = "failed to parse ID"; + ERR_FAIL_V_MSG(0, "failed to parse ID"); + } + + return value; +} + +// ------------------------------------------------------------------------------------------------ +int64_t ParseTokenAsInt64(const TokenPtr t, const char *&err_out) { + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return 0L; + } + + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'L') { + err_out = "failed to parse Int64, unexpected data type"; + return 0L; + } + + int64_t id = SafeParse<int64_t>(data + 1, t->end()); + AI_SWAP8(id); + return id; + } + + // XXX: should use size_t here + unsigned int length = static_cast<unsigned int>(t->end() - t->begin()); + //ai_assert(length > 0); + + char *out = nullptr; + const int64_t id = strtol(t->begin(), &out, length); + if (out > t->end()) { + err_out = "failed to parse Int64 (text)"; + return 0L; + } + + return id; +} + +// ------------------------------------------------------------------------------------------------ +std::string ParseTokenAsString(const TokenPtr t, const char *&err_out) { + err_out = nullptr; + + if (t->Type() != TokenType_DATA) { + err_out = "expected TOK_DATA token"; + return ""; + } + + if (t->IsBinary()) { + const char *data = t->begin(); + if (data[0] != 'S') { + err_out = "failed to parse String, unexpected data type (binary)"; + return ""; + } + + // read string length + int32_t len = SafeParse<int32_t>(data + 1, t->end()); + AI_SWAP4(len); + + //ai_assert(t.end() - data == 5 + len); + return std::string(data + 5, len); + } + + const size_t length = static_cast<size_t>(t->end() - t->begin()); + if (length < 2) { + err_out = "token is too short to hold a string"; + return ""; + } + + const char *s = t->begin(), *e = t->end() - 1; + if (*s != '\"' || *e != '\"') { + err_out = "expected double quoted string"; + return ""; + } + + return std::string(s + 1, length - 2); +} + +namespace { + +// ------------------------------------------------------------------------------------------------ +// read the type code and element count of a binary data array and stop there +void ReadBinaryDataArrayHead(const char *&data, const char *end, char &type, uint32_t &count, + const ElementPtr el) { + TokenPtr token = el->KeyToken(); + if (static_cast<size_t>(end - data) < 5) { + print_error("binary data array is too short, need five (5) bytes for type signature and element count: " + String(token->StringContents().c_str())); + } + + // data type + type = *data; + + // read number of elements + uint32_t len = SafeParse<uint32_t>(data + 1, end); + AI_SWAP4(len); + + count = len; + data += 5; +} + +// ------------------------------------------------------------------------------------------------ +// read binary data array, assume cursor points to the 'compression mode' field (i.e. behind the header) +void ReadBinaryDataArray(char type, uint32_t count, const char *&data, const char *end, + std::vector<char> &buff, + const ElementPtr /*el*/) { + uint32_t encmode = SafeParse<uint32_t>(data, end); + AI_SWAP4(encmode); + data += 4; + + // next comes the compressed length + uint32_t comp_len = SafeParse<uint32_t>(data, end); + AI_SWAP4(comp_len); + data += 4; + + //ai_assert(data + comp_len == end); + + // determine the length of the uncompressed data by looking at the type signature + uint32_t stride = 0; + switch (type) { + case 'f': + case 'i': + stride = 4; + break; + + case 'd': + case 'l': + stride = 8; + break; + } + + const uint32_t full_length = stride * count; + buff.resize(full_length); + + if (encmode == 0) { + //ai_assert(full_length == comp_len); + + // plain data, no compression + std::copy(data, end, buff.begin()); + } else if (encmode == 1) { + // zlib/deflate, next comes ZIP head (0x78 0x01) + // see http://www.ietf.org/rfc/rfc1950.txt + + z_stream zstream; + zstream.opaque = Z_NULL; + zstream.zalloc = Z_NULL; + zstream.zfree = Z_NULL; + zstream.data_type = Z_BINARY; + + // http://hewgill.com/journal/entries/349-how-to-decompress-gzip-stream-with-zlib + if (Z_OK != inflateInit(&zstream)) { + print_error("failure initializing zlib"); + } + + zstream.next_in = reinterpret_cast<Bytef *>(const_cast<char *>(data)); + zstream.avail_in = comp_len; + + zstream.avail_out = static_cast<uInt>(buff.size()); + zstream.next_out = reinterpret_cast<Bytef *>(&*buff.begin()); + const int ret = inflate(&zstream, Z_FINISH); + + if (ret != Z_STREAM_END && ret != Z_OK) { + print_error("failure decompressing compressed data section"); + } + + // terminate zlib + inflateEnd(&zstream); + } +#ifdef ASSIMP_BUILD_DEBUG + else { + // runtime check for this happens at tokenization stage + //ai_assert(false); + } +#endif + + data += comp_len; + //ai_assert(data == end); +} +} // namespace + +// ------------------------------------------------------------------------------------------------ +// read an array of float3 tuples +void ParseVectorDataArray(std::vector<Vector3> &out, const ElementPtr el) { + out.resize(0); + + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + if (tok.empty()) { + print_error("unexpected empty element" + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (count % 3 != 0) { + print_error("number of floats is not a multiple of three (3) (binary)" + String(token->StringContents().c_str())); + } + + if (!count) { + return; + } + + if (type != 'd' && type != 'f') { + print_error("expected float or double array (binary)" + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); + + const uint32_t count3 = count / 3; + out.reserve(count3); + + if (type == 'd') { + const double *d = reinterpret_cast<const double *>(&buff[0]); + for (unsigned int i = 0; i < count3; ++i, d += 3) { + out.push_back(Vector3(static_cast<real_t>(d[0]), + static_cast<real_t>(d[1]), + static_cast<real_t>(d[2]))); + } + // for debugging + /*for ( size_t i = 0; i < out.size(); i++ ) { + aiVector3D vec3( out[ i ] ); + std::stringstream stream; + stream << " vec3.x = " << vec3.x << " vec3.y = " << vec3.y << " vec3.z = " << vec3.z << std::endl; + DefaultLogger::get()->info( stream.str() ); + }*/ + } else if (type == 'f') { + const float *f = reinterpret_cast<const float *>(&buff[0]); + for (unsigned int i = 0; i < count3; ++i, f += 3) { + out.push_back(Vector3(f[0], f[1], f[2])); + } + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // may throw bad_alloc if the input is rubbish, but this need + // not to be prevented - importing would fail but we wouldn't + // crash since assimp handles this case properly. + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + if (a->Tokens().size() % 3 != 0) { + print_error("number of floats is not a multiple of three (3)" + String(token->StringContents().c_str())); + } else { + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + Vector3 v; + v.x = ParseTokenAsFloat(*it++); + v.y = ParseTokenAsFloat(*it++); + v.z = ParseTokenAsFloat(*it++); + + out.push_back(v); + } + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of color4 tuples +void ParseVectorDataArray(std::vector<Color> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + + TokenPtr token = el->KeyToken(); + + if (tok.empty()) { + print_error("unexpected empty element" + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (count % 4 != 0) { + print_error("number of floats is not a multiple of four (4) (binary)" + String(token->StringContents().c_str())); + } + + if (!count) { + return; + } + + if (type != 'd' && type != 'f') { + print_error("expected float or double array (binary)" + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); + + const uint32_t count4 = count / 4; + out.reserve(count4); + + if (type == 'd') { + const double *d = reinterpret_cast<const double *>(&buff[0]); + for (unsigned int i = 0; i < count4; ++i, d += 4) { + out.push_back(Color(static_cast<float>(d[0]), + static_cast<float>(d[1]), + static_cast<float>(d[2]), + static_cast<float>(d[3]))); + } + } else if (type == 'f') { + const float *f = reinterpret_cast<const float *>(&buff[0]); + for (unsigned int i = 0; i < count4; ++i, f += 4) { + out.push_back(Color(f[0], f[1], f[2], f[3])); + } + } + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() above + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + if (a->Tokens().size() % 4 != 0) { + print_error("number of floats is not a multiple of four (4)" + String(token->StringContents().c_str())); + } + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + Color v; + v.r = ParseTokenAsFloat(*it++); + v.g = ParseTokenAsFloat(*it++); + v.b = ParseTokenAsFloat(*it++); + v.a = ParseTokenAsFloat(*it++); + + out.push_back(v); + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of float2 tuples +void ParseVectorDataArray(std::vector<Vector2> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + if (tok.empty()) { + print_error("unexpected empty element" + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (count % 2 != 0) { + print_error("number of floats is not a multiple of two (2) (binary)" + String(token->StringContents().c_str())); + } + + if (!count) { + return; + } + + if (type != 'd' && type != 'f') { + print_error("expected float or double array (binary)" + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); + + const uint32_t count2 = count / 2; + out.reserve(count2); + + if (type == 'd') { + const double *d = reinterpret_cast<const double *>(&buff[0]); + for (unsigned int i = 0; i < count2; ++i, d += 2) { + out.push_back(Vector2(static_cast<float>(d[0]), + static_cast<float>(d[1]))); + } + } else if (type == 'f') { + const float *f = reinterpret_cast<const float *>(&buff[0]); + for (unsigned int i = 0; i < count2; ++i, f += 2) { + out.push_back(Vector2(f[0], f[1])); + } + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() above + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + if (a->Tokens().size() % 2 != 0) { + print_error("number of floats is not a multiple of two (2)" + String(token->StringContents().c_str())); + } else { + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + Vector2 v; + v.x = ParseTokenAsFloat(*it++); + v.y = ParseTokenAsFloat(*it++); + out.push_back(v); + } + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of ints +void ParseVectorDataArray(std::vector<int> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + if (tok.empty()) { + print_error("unexpected empty element" + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (!count) { + return; + } + + if (type != 'i') { + print_error("expected int array (binary)" + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * 4); + + out.reserve(count); + + const int32_t *ip = reinterpret_cast<const int32_t *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++ip) { + int32_t val = *ip; + AI_SWAP4(val); + out.push_back(val); + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + const int ival = ParseTokenAsInt(*it++); + out.push_back(ival); + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of floats +void ParseVectorDataArray(std::vector<float> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + if (tok.empty()) { + print_error("unexpected empty element: " + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (!count) { + return; + } + + if (type != 'd' && type != 'f') { + print_error("expected float or double array (binary) " + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); + + if (type == 'd') { + const double *d = reinterpret_cast<const double *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++d) { + out.push_back(static_cast<float>(*d)); + } + } else if (type == 'f') { + const float *f = reinterpret_cast<const float *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++f) { + out.push_back(*f); + } + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + const float ival = ParseTokenAsFloat(*it++); + out.push_back(ival); + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of uints +void ParseVectorDataArray(std::vector<unsigned int> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + const TokenPtr token = el->KeyToken(); + + ERR_FAIL_COND_MSG(!token, "invalid ParseVectorDataArrat token invalid"); + + if (tok.empty()) { + print_error("unexpected empty element: " + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (!count) { + return; + } + + if (type != 'i') { + print_error("expected (u)int array (binary)" + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * 4); + + out.reserve(count); + + const int32_t *ip = reinterpret_cast<const int32_t *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++ip) { + int32_t val = *ip; + if (val < 0) { + print_error("encountered negative integer index (binary)"); + } + + out.push_back(val); + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + const int ival = ParseTokenAsInt(*it++); + if (ival < 0) { + print_error("encountered negative integer index"); + } + out.push_back(static_cast<unsigned int>(ival)); + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of uint64_ts +void ParseVectorDataArray(std::vector<uint64_t> &out, const ElementPtr el) { + out.resize(0); + + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + ERR_FAIL_COND(!token); + + if (tok.empty()) { + print_error("unexpected empty element " + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (!count) { + return; + } + + if (type != 'l') { + print_error("expected long array (binary): " + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * 8); + + out.reserve(count); + + const uint64_t *ip = reinterpret_cast<const uint64_t *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++ip) { + uint64_t val = *ip; + AI_SWAP8(val); + out.push_back(val); + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + const uint64_t ival = ParseTokenAsID(*it++); + + out.push_back(ival); + } +} + +// ------------------------------------------------------------------------------------------------ +// read an array of int64_ts +void ParseVectorDataArray(std::vector<int64_t> &out, const ElementPtr el) { + out.resize(0); + const TokenList &tok = el->Tokens(); + TokenPtr token = el->KeyToken(); + ERR_FAIL_COND(!token); + if (tok.empty()) { + print_error("unexpected empty element: " + String(token->StringContents().c_str())); + } + + if (tok[0]->IsBinary()) { + const char *data = tok[0]->begin(), *end = tok[0]->end(); + + char type; + uint32_t count; + ReadBinaryDataArrayHead(data, end, type, count, el); + + if (!count) { + return; + } + + if (type != 'l') { + print_error("expected long array (binary) " + String(token->StringContents().c_str())); + } + + std::vector<char> buff; + ReadBinaryDataArray(type, count, data, end, buff, el); + + //ai_assert(data == end); + //ai_assert(buff.size() == count * 8); + + out.reserve(count); + + const int64_t *ip = reinterpret_cast<const int64_t *>(&buff[0]); + for (unsigned int i = 0; i < count; ++i, ++ip) { + int64_t val = *ip; + AI_SWAP8(val); + out.push_back(val); + } + + return; + } + + const size_t dim = ParseTokenAsDim(tok[0]); + + // see notes in ParseVectorDataArray() + out.reserve(dim); + + const ScopePtr scope = GetRequiredScope(el); + const ElementPtr a = GetRequiredElement(scope, "a", el); + + for (TokenList::const_iterator it = a->Tokens().begin(), end = a->Tokens().end(); it != end;) { + const int64_t val = ParseTokenAsInt64(*it++); + out.push_back(val); + } +} + +// ------------------------------------------------------------------------------------------------ +Transform ReadMatrix(const ElementPtr element) { + std::vector<float> values; + ParseVectorDataArray(values, element); + + if (values.size() != 16) { + print_error("expected 16 matrix elements"); + } + + // clean values to prevent any IBM damage on inverse() / affine_inverse() + for (float &value : values) { + if (::Math::is_equal_approx(0, value)) { + value = 0; + } + } + + Transform xform; + Basis basis; + + basis.set( + Vector3(values[0], values[1], values[2]), + Vector3(values[4], values[5], values[6]), + Vector3(values[8], values[9], values[10])); + + xform.basis = basis; + xform.origin = Vector3(values[12], values[13], values[14]); + // determine if we need to think about this with dynamic rotation order? + // for example: + // xform.basis = z_axis * y_axis * x_axis; + //xform.basis.transpose(); + + print_verbose("xform verbose basis: " + (xform.basis.get_euler() * (180 / Math_PI)) + " xform origin:" + xform.origin); + + return xform; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsString() with print_error handling +std::string ParseTokenAsString(const TokenPtr t) { + ERR_FAIL_COND_V(!t, ""); + const char *err; + const std::string &i = ParseTokenAsString(t, err); + if (err) { + print_error(String(err) + ", " + String(t->StringContents().c_str())); + } + return i; +} + +// ------------------------------------------------------------------------------------------------ +// extract a required element from a scope, abort if the element cannot be found +ElementPtr GetRequiredElement(const ScopePtr sc, const std::string &index, const ElementPtr element /*= NULL*/) { + const ElementPtr el = sc->GetElement(index); + TokenPtr token = el->KeyToken(); + ERR_FAIL_COND_V(!token, nullptr); + if (!el) { + print_error("did not find required element \"" + String(index.c_str()) + "\" " + String(token->StringContents().c_str())); + } + return el; +} + +bool HasElement(const ScopePtr sc, const std::string &index) { + const ElementPtr el = sc->GetElement(index); + if (nullptr == el) { + return false; + } + + return true; +} + +// ------------------------------------------------------------------------------------------------ +// extract a required element from a scope, abort if the element cannot be found +ElementPtr GetOptionalElement(const ScopePtr sc, const std::string &index, const ElementPtr element /*= NULL*/) { + const ElementPtr el = sc->GetElement(index); + return el; +} + +// ------------------------------------------------------------------------------------------------ +// extract required compound scope +ScopePtr GetRequiredScope(const ElementPtr el) { + if (el) { + ScopePtr s = el->Compound(); + TokenPtr token = el->KeyToken(); + ERR_FAIL_COND_V(!token, nullptr); + if (s) { + return s; + } + + ERR_FAIL_V_MSG(nullptr, "expected compound scope " + String(token->StringContents().c_str())); + } + + ERR_FAIL_V_MSG(nullptr, "Invalid element supplied to parser"); +} + +// ------------------------------------------------------------------------------------------------ +// get token at a particular index +TokenPtr GetRequiredToken(const ElementPtr el, unsigned int index) { + if (el) { + const TokenList &x = el->Tokens(); + TokenPtr token = el->KeyToken(); + + ERR_FAIL_COND_V(!token, nullptr); + + if (index >= x.size()) { + ERR_FAIL_V_MSG(nullptr, "missing token at index: " + itos(index) + " " + String(token->StringContents().c_str())); + } + + return x[index]; + } + + return nullptr; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsDim() with print_error handling +size_t ParseTokenAsDim(const TokenPtr t) { + const char *err; + const size_t i = ParseTokenAsDim(t, err); + if (err) { + print_error(String(err) + " " + String(t->StringContents().c_str())); + } + return i; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsFloat() with print_error handling +float ParseTokenAsFloat(const TokenPtr t) { + const char *err; + const float i = ParseTokenAsFloat(t, err); + if (err) { + print_error(String(err) + " " + String(t->StringContents().c_str())); + } + return i; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsInt() with print_error handling +int ParseTokenAsInt(const TokenPtr t) { + const char *err; + const int i = ParseTokenAsInt(t, err); + if (err) { + print_error(String(err) + " " + String(t->StringContents().c_str())); + } + return i; +} + +// ------------------------------------------------------------------------------------------------ +// wrapper around ParseTokenAsInt64() with print_error handling +int64_t ParseTokenAsInt64(const TokenPtr t) { + const char *err; + const int64_t i = ParseTokenAsInt64(t, err); + if (err) { + print_error(String(err) + " " + String(t->StringContents().c_str())); + } + return i; +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXParser.h b/modules/fbx/fbx_parser/FBXParser.h new file mode 100644 index 0000000000..3cba81ff83 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXParser.h @@ -0,0 +1,263 @@ +/*************************************************************************/ +/* FBXParser.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXParser.h + * @brief FBX parsing code + */ +#ifndef FBX_PARSER_H +#define FBX_PARSER_H + +#include <stdint.h> +#include <map> +#include <memory> + +#include "core/math/color.h" +#include "core/math/transform.h" +#include "core/math/vector2.h" +#include "core/math/vector3.h" + +#include "FBXTokenizer.h" + +namespace FBXDocParser { + +class Scope; +class Parser; +class Element; + +typedef Element *ElementPtr; +typedef Scope *ScopePtr; + +typedef std::vector<ScopePtr> ScopeList; +typedef std::multimap<std::string, ElementPtr> ElementMap; +typedef std::pair<ElementMap::const_iterator, ElementMap::const_iterator> ElementCollection; + +#define new_Scope new Scope +#define new_Element new Element + +/** FBX data entity that consists of a key:value tuple. + * + * Example: + * @verbatim + * AnimationCurve: 23, "AnimCurve::", "" { + * [..] + * } + * @endverbatim + * + * As can be seen in this sample, elements can contain nested #Scope + * as their trailing member. **/ +class Element { +public: + Element(TokenPtr key_token, Parser &parser); + ~Element(); + + ScopePtr Compound() const { + return compound; + } + + TokenPtr KeyToken() const { + return key_token; + } + + const TokenList &Tokens() const { + return tokens; + } + +private: + TokenList tokens; + ScopePtr compound = nullptr; + std::vector<ScopePtr> compound_scope; + TokenPtr key_token = nullptr; +}; + +/** FBX data entity that consists of a 'scope', a collection + * of not necessarily unique #Element instances. + * + * Example: + * @verbatim + * GlobalSettings: { + * Version: 1000 + * Properties70: + * [...] + * } + * @endverbatim */ +class Scope { +public: + Scope(Parser &parser, bool topLevel = false); + ~Scope(); + + ElementPtr GetElement(const std::string &index) const { + ElementMap::const_iterator it = elements.find(index); + return it == elements.end() ? nullptr : (*it).second; + } + + ElementPtr FindElementCaseInsensitive(const std::string &elementName) const { + for (auto element = elements.begin(); element != elements.end(); ++element) { + if (element->first.compare(elementName)) { + return element->second; + } + } + + // nothing to reference / expired. + return nullptr; + } + + ElementCollection GetCollection(const std::string &index) const { + return elements.equal_range(index); + } + + const ElementMap &Elements() const { + return elements; + } + +private: + ElementMap elements; +}; + +/** FBX parsing class, takes a list of input tokens and generates a hierarchy + * of nested #Scope instances, representing the fbx DOM.*/ +class Parser { +public: + /** Parse given a token list. Does not take ownership of the tokens - + * the objects must persist during the entire parser lifetime */ + Parser(const TokenList &tokens, bool is_binary); + ~Parser(); + + ScopePtr GetRootScope() const { + return root; + } + + bool IsBinary() const { + return is_binary; + } + +private: + friend class Scope; + friend class Element; + + TokenPtr AdvanceToNextToken(); + TokenPtr LastToken() const; + TokenPtr CurrentToken() const; + +private: + ScopeList scopes; + const TokenList &tokens; + + TokenPtr last = nullptr, current = nullptr; + TokenList::const_iterator cursor; + ScopePtr root = nullptr; + + const bool is_binary; +}; + +/* token parsing - this happens when building the DOM out of the parse-tree*/ +uint64_t ParseTokenAsID(const TokenPtr t, const char *&err_out); +size_t ParseTokenAsDim(const TokenPtr t, const char *&err_out); +float ParseTokenAsFloat(const TokenPtr t, const char *&err_out); +int ParseTokenAsInt(const TokenPtr t, const char *&err_out); +int64_t ParseTokenAsInt64(const TokenPtr t, const char *&err_out); +std::string ParseTokenAsString(const TokenPtr t, const char *&err_out); + +/* wrapper around ParseTokenAsXXX() with DOMError handling */ +uint64_t ParseTokenAsID(const TokenPtr t); +size_t ParseTokenAsDim(const TokenPtr t); +float ParseTokenAsFloat(const TokenPtr t); +int ParseTokenAsInt(const TokenPtr t); +int64_t ParseTokenAsInt64(const TokenPtr t); +std::string ParseTokenAsString(const TokenPtr t); + +/* read data arrays */ +void ParseVectorDataArray(std::vector<Vector3> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<Color> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<Vector2> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<int> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<float> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<float> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<unsigned int> &out, const ElementPtr el); +void ParseVectorDataArray(std::vector<uint64_t> &out, const ElementPtr ep); +void ParseVectorDataArray(std::vector<int64_t> &out, const ElementPtr el); +bool HasElement(const ScopePtr sc, const std::string &index); + +// extract a required element from a scope, abort if the element cannot be found +ElementPtr GetRequiredElement(const ScopePtr sc, const std::string &index, const ElementPtr element = nullptr); +ScopePtr GetRequiredScope(const ElementPtr el); // New in 2020. (less likely to destroy application) +ElementPtr GetOptionalElement(const ScopePtr sc, const std::string &index, const ElementPtr element = nullptr); +// extract required compound scope +ScopePtr GetRequiredScope(const ElementPtr el); +// get token at a particular index +TokenPtr GetRequiredToken(const ElementPtr el, unsigned int index); + +// ------------------------------------------------------------------------------------------------ +// read a 4x4 matrix from an array of 16 floats +Transform ReadMatrix(const ElementPtr element); +} // namespace FBXDocParser + +#endif // FBX_PARSER_H diff --git a/modules/fbx/fbx_parser/FBXPose.cpp b/modules/fbx/fbx_parser/FBXPose.cpp new file mode 100644 index 0000000000..1bee2fa2f0 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXPose.cpp @@ -0,0 +1,104 @@ +/*************************************************************************/ +/* FBXPose.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXNoteAttribute.cpp + * @brief Assimp::FBX::NodeAttribute (and subclasses) implementation + */ + +#include "FBXDocument.h" +#include "FBXParser.h" +#include <iostream> + +namespace FBXDocParser { + +class FbxPoseNode; +// ------------------------------------------------------------------------------------------------ +FbxPose::FbxPose(uint64_t id, const ElementPtr element, const Document &doc, const std::string &name) : + Object(id, element, name) { + const ScopePtr sc = GetRequiredScope(element); + //const std::string &classname = ParseTokenAsString(GetRequiredToken(element, 2)); + + const ElementCollection &PoseNodes = sc->GetCollection("PoseNode"); + for (ElementMap::const_iterator it = PoseNodes.first; it != PoseNodes.second; ++it) { + std::string entry_name = (*it).first; + ElementPtr some_element = (*it).second; + FbxPoseNode *pose_node = new FbxPoseNode(some_element, doc, entry_name); + pose_nodes.push_back(pose_node); + } +} + +// ------------------------------------------------------------------------------------------------ +FbxPose::~FbxPose() { + pose_nodes.clear(); + // empty +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXProperties.cpp b/modules/fbx/fbx_parser/FBXProperties.cpp new file mode 100644 index 0000000000..2994a84dbd --- /dev/null +++ b/modules/fbx/fbx_parser/FBXProperties.cpp @@ -0,0 +1,237 @@ +/*************************************************************************/ +/* FBXProperties.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXProperties.cpp + * @brief Implementation of the FBX dynamic properties system + */ + +#include "FBXProperties.h" +#include "FBXDocumentUtil.h" +#include "FBXParser.h" +#include "FBXTokenizer.h" + +namespace FBXDocParser { + +using namespace Util; + +// ------------------------------------------------------------------------------------------------ +Property::Property() { +} + +// ------------------------------------------------------------------------------------------------ +Property::~Property() { +} + +namespace { + +// ------------------------------------------------------------------------------------------------ +// read a typed property out of a FBX element. The return value is NULL if the property cannot be read. +PropertyPtr ReadTypedProperty(const ElementPtr element) { + //ai_assert(element.KeyToken().StringContents() == "P"); + + const TokenList &tok = element->Tokens(); + //ai_assert(tok.size() >= 5); + + const std::string &s = ParseTokenAsString(tok[1]); + const char *const cs = s.c_str(); + if (!strcmp(cs, "KString")) { + return new TypedProperty<std::string>(ParseTokenAsString(tok[4])); + } else if (!strcmp(cs, "bool") || !strcmp(cs, "Bool")) { + return new TypedProperty<bool>(ParseTokenAsInt(tok[4]) != 0); + } else if (!strcmp(cs, "int") || !strcmp(cs, "Int") || !strcmp(cs, "enum") || !strcmp(cs, "Enum")) { + return new TypedProperty<int>(ParseTokenAsInt(tok[4])); + } else if (!strcmp(cs, "ULongLong")) { + return new TypedProperty<uint64_t>(ParseTokenAsID(tok[4])); + } else if (!strcmp(cs, "KTime")) { + return new TypedProperty<int64_t>(ParseTokenAsInt64(tok[4])); + } else if (!strcmp(cs, "Vector3D") || + !strcmp(cs, "ColorRGB") || + !strcmp(cs, "Vector") || + !strcmp(cs, "Color") || + !strcmp(cs, "Lcl Translation") || + !strcmp(cs, "Lcl Rotation") || + !strcmp(cs, "Lcl Scaling")) { + return new TypedProperty<Vector3>(Vector3( + ParseTokenAsFloat(tok[4]), + ParseTokenAsFloat(tok[5]), + ParseTokenAsFloat(tok[6]))); + } else if (!strcmp(cs, "double") || !strcmp(cs, "Number") || !strcmp(cs, "Float") || !strcmp(cs, "float") || !strcmp(cs, "FieldOfView") || !strcmp(cs, "UnitScaleFactor")) { + return new TypedProperty<float>(ParseTokenAsFloat(tok[4])); + } + + return nullptr; +} + +// ------------------------------------------------------------------------------------------------ +// peek into an element and check if it contains a FBX property, if so return its name. +std::string PeekPropertyName(const Element &element) { + //ai_assert(element.KeyToken().StringContents() == "P"); + const TokenList &tok = element.Tokens(); + if (tok.size() < 4) { + return ""; + } + + return ParseTokenAsString(tok[0]); +} +} // namespace + +// ------------------------------------------------------------------------------------------------ +PropertyTable::PropertyTable() : + templateProps(), element() { +} + +// ------------------------------------------------------------------------------------------------ +PropertyTable::PropertyTable(const ElementPtr element, const PropertyTable *templateProps) : + templateProps(templateProps), element(element) { + const ScopePtr scope = GetRequiredScope(element); + ERR_FAIL_COND(!scope); + for (const ElementMap::value_type &v : scope->Elements()) { + if (v.first != "P") { + DOMWarning("expected only P elements in property table", v.second); + continue; + } + + const std::string &name = PeekPropertyName(*v.second); + if (!name.length()) { + DOMWarning("could not read property name", v.second); + continue; + } + + LazyPropertyMap::const_iterator it = lazyProps.find(name); + if (it != lazyProps.end()) { + DOMWarning("duplicate property name, will hide previous value: " + name, v.second); + continue; + } + + // since the above checks for duplicates we can be sure to insert the only match here. + lazyProps[name] = v.second; + } +} + +// ------------------------------------------------------------------------------------------------ +PropertyTable::~PropertyTable() { + for (PropertyMap::value_type &v : props) { + delete v.second; + } +} + +// ------------------------------------------------------------------------------------------------ +PropertyPtr PropertyTable::Get(const std::string &name) const { + PropertyMap::const_iterator it = props.find(name); + if (it == props.end()) { + // hasn't been parsed yet? + LazyPropertyMap::const_iterator lit = lazyProps.find(name); + if (lit != lazyProps.end()) { + props[name] = ReadTypedProperty(lit->second); + it = props.find(name); + + //ai_assert(it != props.end()); + } + + if (it == props.end()) { + // check property template + if (templateProps) { + return templateProps->Get(name); + } + + return nullptr; + } + } + + return (*it).second; +} + +DirectPropertyMap PropertyTable::GetUnparsedProperties() const { + DirectPropertyMap result; + + // Loop through all the lazy properties (which is all the properties) + for (const LazyPropertyMap::value_type &element : lazyProps) { + // Skip parsed properties + if (props.end() != props.find(element.first)) + continue; + + // Read the element's value. + // Wrap the naked pointer (since the call site is required to acquire ownership) + // std::unique_ptr from C++11 would be preferred both as a wrapper and a return value. + Property *prop = ReadTypedProperty(element.second); + + // Element could not be read. Skip it. + if (!prop) + continue; + + // Add to result + result[element.first] = prop; + } + + return result; +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXProperties.h b/modules/fbx/fbx_parser/FBXProperties.h new file mode 100644 index 0000000000..a007660c45 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXProperties.h @@ -0,0 +1,221 @@ +/*************************************************************************/ +/* FBXProperties.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXProperties.h + * @brief FBX dynamic properties + */ +#ifndef FBX_PROPERTIES_H +#define FBX_PROPERTIES_H + +#include "FBXParser.h" +#include <map> +#include <memory> +#include <string> +#include <vector> + +namespace FBXDocParser { + +// Forward declarations +class Element; + +/** Represents a dynamic property. Type info added by deriving classes, + * see #TypedProperty. + Example: + @verbatim + P: "ShininessExponent", "double", "Number", "",0.5 + @endvebatim +*/ +class Property { +protected: + Property(); + +public: + virtual ~Property(); + +public: + template <typename T> + const T *As() const { + return dynamic_cast<const T *>(this); + } +}; + +template <typename T> +class TypedProperty : public Property { +public: + explicit TypedProperty(const T &value) : + value(value) { + // empty + } + + const T &Value() const { + return value; + } + +private: + T value; +}; + +#define new_Property new Property +typedef Property *PropertyPtr; +typedef std::map<std::string, PropertyPtr> DirectPropertyMap; +typedef std::map<std::string, PropertyPtr> PropertyMap; +typedef std::map<std::string, ElementPtr> LazyPropertyMap; + +/** + * Represents a property table as can be found in the newer FBX files (Properties60, Properties70) + */ +class PropertyTable { +public: + // in-memory property table with no source element + PropertyTable(); + PropertyTable(const ElementPtr element, const PropertyTable *templateProps); + ~PropertyTable(); + + PropertyPtr Get(const std::string &name) const; + + // PropertyTable's need not be coupled with FBX elements so this can be NULL + ElementPtr GetElement() const { + return element; + } + + PropertyMap &GetProperties() const { + return props; + } + + const LazyPropertyMap &GetLazyProperties() const { + return lazyProps; + } + + const PropertyTable *TemplateProps() const { + return templateProps; + } + + DirectPropertyMap GetUnparsedProperties() const; + +private: + LazyPropertyMap lazyProps; + mutable PropertyMap props; + const PropertyTable *templateProps = nullptr; + const ElementPtr element = nullptr; +}; + +// ------------------------------------------------------------------------------------------------ +template <typename T> +inline T PropertyGet(const PropertyTable *in, const std::string &name, const T &defaultValue) { + PropertyPtr prop = in->Get(name); + if (nullptr == prop) { + return defaultValue; + } + + // strong typing, no need to be lenient + const TypedProperty<T> *const tprop = prop->As<TypedProperty<T>>(); + if (nullptr == tprop) { + return defaultValue; + } + + return tprop->Value(); +} + +// ------------------------------------------------------------------------------------------------ +template <typename T> +inline T PropertyGet(const PropertyTable *in, const std::string &name, bool &result, bool useTemplate = false) { + PropertyPtr prop = in->Get(name); + if (nullptr == prop) { + if (!useTemplate) { + result = false; + return T(); + } + const PropertyTable *templ = in->TemplateProps(); + if (nullptr == templ) { + result = false; + return T(); + } + prop = templ->Get(name); + if (nullptr == prop) { + result = false; + return T(); + } + } + + // strong typing, no need to be lenient + const TypedProperty<T> *const tprop = prop->As<TypedProperty<T>>(); + if (nullptr == tprop) { + result = false; + return T(); + } + + result = true; + return tprop->Value(); +} +} // namespace FBXDocParser + +#endif // FBX_PROPERTIES_H diff --git a/modules/fbx/fbx_parser/FBXTokenizer.cpp b/modules/fbx/fbx_parser/FBXTokenizer.cpp new file mode 100644 index 0000000000..3748bfabf8 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXTokenizer.cpp @@ -0,0 +1,251 @@ +/*************************************************************************/ +/* FBXTokenizer.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXTokenizer.cpp + * @brief Implementation of the FBX broadphase lexer + */ + +// tab width for logging columns +#define ASSIMP_FBX_TAB_WIDTH 4 + +#include "FBXTokenizer.h" +#include "core/string/print_string.h" + +namespace FBXDocParser { + +// ------------------------------------------------------------------------------------------------ +Token::Token(const char *p_sbegin, const char *p_send, TokenType p_type, unsigned int p_line, unsigned int p_column) : + sbegin(p_sbegin), + send(p_send), + type(p_type), + line(p_line), + column(p_column) { +#ifdef DEBUG_ENABLED + contents = std::string(sbegin, static_cast<size_t>(send - sbegin)); +#endif +} + +// ------------------------------------------------------------------------------------------------ +Token::~Token() { +} + +namespace { + +// ------------------------------------------------------------------------------------------------ +void TokenizeError(const std::string &message, unsigned int line, unsigned int column) { + print_error("[FBX-Tokenize]" + String(message.c_str()) + " " + itos(line) + ":" + itos(column)); +} + +// process a potential data token up to 'cur', adding it to 'output_tokens'. +// ------------------------------------------------------------------------------------------------ +void ProcessDataToken(TokenList &output_tokens, const char *&start, const char *&end, + unsigned int line, + unsigned int column, + TokenType type = TokenType_DATA, + bool must_have_token = false) { + if (start && end) { + // sanity check: + // tokens should have no whitespace outside quoted text and [start,end] should + // properly delimit the valid range. + bool in_double_quotes = false; + for (const char *c = start; c != end + 1; ++c) { + if (*c == '\"') { + in_double_quotes = !in_double_quotes; + } + + if (!in_double_quotes && IsSpaceOrNewLine(*c)) { + TokenizeError("unexpected whitespace in token", line, column); + } + } + + if (in_double_quotes) { + TokenizeError("non-terminated double quotes", line, column); + } + + output_tokens.push_back(new_Token(start, end + 1, type, line, column)); + } else if (must_have_token) { + TokenizeError("unexpected character, expected data token", line, column); + } + + start = end = nullptr; +} +} // namespace + +// ------------------------------------------------------------------------------------------------ +void Tokenize(TokenList &output_tokens, const char *input, size_t length) { + // line and column numbers numbers are one-based + unsigned int line = 1; + unsigned int column = 1; + + bool comment = false; + bool in_double_quotes = false; + bool pending_data_token = false; + + const char *token_begin = nullptr, *token_end = nullptr; + + // input (starting string), *cur the current string, column += + // modified to fix strlen() and stop buffer overflow + for (size_t x = 0; x < length; x++) { + const char c = input[x]; + const char *cur = &input[x]; + column += (c == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1); + + if (IsLineEnd(c)) { + comment = false; + + column = 0; + ++line; + } + + if (comment) { + continue; + } + + if (in_double_quotes) { + if (c == '\"') { + in_double_quotes = false; + token_end = cur; + + ProcessDataToken(output_tokens, token_begin, token_end, line, column); + pending_data_token = false; + } + continue; + } + + switch (c) { + case '\"': + if (token_begin) { + TokenizeError("unexpected double-quote", line, column); + } + token_begin = cur; + in_double_quotes = true; + continue; + + case ';': + ProcessDataToken(output_tokens, token_begin, token_end, line, column); + comment = true; + continue; + + case '{': + ProcessDataToken(output_tokens, token_begin, token_end, line, column); + output_tokens.push_back(new_Token(cur, cur + 1, TokenType_OPEN_BRACKET, line, column)); + continue; + + case '}': + ProcessDataToken(output_tokens, token_begin, token_end, line, column); + output_tokens.push_back(new_Token(cur, cur + 1, TokenType_CLOSE_BRACKET, line, column)); + continue; + + case ',': + if (pending_data_token) { + ProcessDataToken(output_tokens, token_begin, token_end, line, column, TokenType_DATA, true); + } + output_tokens.push_back(new_Token(cur, cur + 1, TokenType_COMMA, line, column)); + continue; + + case ':': + if (pending_data_token) { + ProcessDataToken(output_tokens, token_begin, token_end, line, column, TokenType_KEY, true); + } else { + TokenizeError("unexpected colon", line, column); + } + continue; + } + + if (IsSpaceOrNewLine(c)) { + if (token_begin) { + // peek ahead and check if the next token is a colon in which + // case this counts as KEY token. + TokenType type = TokenType_DATA; + for (const char *peek = cur; *peek && IsSpaceOrNewLine(*peek); ++peek) { + if (*peek == ':') { + type = TokenType_KEY; + cur = peek; + break; + } + } + + ProcessDataToken(output_tokens, token_begin, token_end, line, column, type); + } + + pending_data_token = false; + } else { + token_end = cur; + if (!token_begin) { + token_begin = cur; + } + + pending_data_token = true; + } + } +} +} // namespace FBXDocParser diff --git a/modules/fbx/fbx_parser/FBXTokenizer.h b/modules/fbx/fbx_parser/FBXTokenizer.h new file mode 100644 index 0000000000..1761869c69 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXTokenizer.h @@ -0,0 +1,203 @@ +/*************************************************************************/ +/* FBXTokenizer.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXTokenizer.h + * @brief FBX lexer + */ +#ifndef FBX_TOKENIZER_H +#define FBX_TOKENIZER_H + +#include "FBXParseTools.h" +#include "core/string/ustring.h" +#include <iostream> +#include <memory> +#include <string> +#include <vector> + +namespace FBXDocParser { +/** Rough classification for text FBX tokens used for constructing the + * basic scope hierarchy. */ +enum TokenType { + // { + TokenType_OPEN_BRACKET = 0, + + // } + TokenType_CLOSE_BRACKET, + + // '"blablubb"', '2', '*14' - very general token class, + // further processing happens at a later stage. + TokenType_DATA, + + // + TokenType_BINARY_DATA, + + // , + TokenType_COMMA, + + // blubb: + TokenType_KEY +}; + +/** Represents a single token in a FBX file. Tokens are + * classified by the #TokenType enumerated types. + * + * Offers iterator protocol. Tokens are immutable. */ +class Token { +private: + static const unsigned int BINARY_MARKER = static_cast<unsigned int>(-1); + +public: + /** construct a textual token */ + Token(const char *p_sbegin, const char *p_send, TokenType p_type, unsigned int p_line, unsigned int p_column); + + /** construct a binary token */ + Token(const char *p_sbegin, const char *p_send, TokenType p_type, size_t p_offset); + ~Token(); + +public: + std::string StringContents() const { + return std::string(begin(), end()); + } + + bool IsBinary() const { + return column == BINARY_MARKER; + } + + const char *begin() const { + return sbegin; + } + + const char *end() const { + return send; + } + + TokenType Type() const { + return type; + } + + size_t Offset() const { + return offset; + } + + unsigned int Line() const { + return static_cast<unsigned int>(line); + } + + unsigned int Column() const { + return column; + } + +private: +#ifdef DEBUG_ENABLED + // full string copy for the sole purpose that it nicely appears + // in msvc's debugger window. + std::string contents; +#endif + + const char *sbegin = nullptr; + const char *send = nullptr; + const TokenType type; + + union { + size_t line; + size_t offset; + }; + const unsigned int column = 0; +}; + +// Fixed leak by using shared_ptr for tokens +typedef Token *TokenPtr; +typedef std::vector<TokenPtr> TokenList; + +#define new_Token new Token + +/** Main FBX tokenizer function. Transform input buffer into a list of preprocessed tokens. + * + * Skips over comments and generates line and column numbers. + * + * @param output_tokens Receives a list of all tokens in the input data. + * @param input_buffer Textual input buffer to be processed, 0-terminated. + * @print_error if something goes wrong */ +void Tokenize(TokenList &output_tokens, const char *input, size_t length); + +/** Tokenizer function for binary FBX files. + * + * Emits a token list suitable for direct parsing. + * + * @param output_tokens Receives a list of all tokens in the input data. + * @param input_buffer Binary input buffer to be processed. + * @param length Length of input buffer, in bytes. There is no 0-terminal. + * @print_error if something goes wrong */ +void TokenizeBinary(TokenList &output_tokens, const char *input, size_t length); +} // namespace FBXDocParser + +#endif // FBX_TOKENIZER_H diff --git a/modules/fbx/fbx_parser/FBXUtil.cpp b/modules/fbx/fbx_parser/FBXUtil.cpp new file mode 100644 index 0000000000..508407ea51 --- /dev/null +++ b/modules/fbx/fbx_parser/FBXUtil.cpp @@ -0,0 +1,220 @@ +/*************************************************************************/ +/* FBXUtil.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2019, assimp team + + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file FBXUtil.cpp + * @brief Implementation of internal FBX utility functions + */ + +#include "FBXUtil.h" +#include "FBXTokenizer.h" +#include <cstring> +#include <string> + +namespace FBXDocParser { +namespace Util { + +// ------------------------------------------------------------------------------------------------ +const char *TokenTypeString(TokenType t) { + switch (t) { + case TokenType_OPEN_BRACKET: + return "TOK_OPEN_BRACKET"; + + case TokenType_CLOSE_BRACKET: + return "TOK_CLOSE_BRACKET"; + + case TokenType_DATA: + return "TOK_DATA"; + + case TokenType_COMMA: + return "TOK_COMMA"; + + case TokenType_KEY: + return "TOK_KEY"; + + case TokenType_BINARY_DATA: + return "TOK_BINARY_DATA"; + } + + //ai_assert(false); + return ""; +} + +// Generated by this formula: T["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[i]] = i; +static const uint8_t base64DecodeTable[128] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 255, 255, 255, + 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, + 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255 +}; + +uint8_t DecodeBase64(char ch) { + const auto idx = static_cast<uint8_t>(ch); + if (idx > 127) + return 255; + return base64DecodeTable[idx]; +} + +size_t ComputeDecodedSizeBase64(const char *in, size_t inLength) { + if (inLength < 2) { + return 0; + } + const size_t equals = size_t(in[inLength - 1] == '=') + size_t(in[inLength - 2] == '='); + const size_t full_length = (inLength * 3) >> 2; // div by 4 + if (full_length < equals) { + return 0; + } + return full_length - equals; +} + +size_t DecodeBase64(const char *in, size_t inLength, uint8_t *out, size_t maxOutLength) { + if (maxOutLength == 0 || inLength < 2) { + return 0; + } + const size_t realLength = inLength - size_t(in[inLength - 1] == '=') - size_t(in[inLength - 2] == '='); + size_t dst_offset = 0; + int val = 0, valb = -8; + for (size_t src_offset = 0; src_offset < realLength; ++src_offset) { + const uint8_t table_value = Util::DecodeBase64(in[src_offset]); + if (table_value == 255) { + return 0; + } + val = (val << 6) + table_value; + valb += 6; + if (valb >= 0) { + out[dst_offset++] = static_cast<uint8_t>((val >> valb) & 0xFF); + valb -= 8; + val &= 0xFFF; + } + } + return dst_offset; +} + +static const char to_base64_string[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +char EncodeBase64(char byte) { + return to_base64_string[(size_t)byte]; +} + +/** Encodes a block of 4 bytes to base64 encoding +* @param bytes Bytes to encode. +* @param out_string String to write encoded values to. +* @param string_pos Position in out_string. +*/ +void EncodeByteBlock(const char *bytes, std::string &out_string, size_t string_pos) { + char b0 = (bytes[0] & 0xFC) >> 2; + char b1 = (bytes[0] & 0x03) << 4 | ((bytes[1] & 0xF0) >> 4); + char b2 = (bytes[1] & 0x0F) << 2 | ((bytes[2] & 0xC0) >> 6); + char b3 = (bytes[2] & 0x3F); + + out_string[string_pos + 0] = EncodeBase64(b0); + out_string[string_pos + 1] = EncodeBase64(b1); + out_string[string_pos + 2] = EncodeBase64(b2); + out_string[string_pos + 3] = EncodeBase64(b3); +} + +std::string EncodeBase64(const char *data, size_t length) { + // calculate extra bytes needed to get a multiple of 3 + size_t extraBytes = 3 - length % 3; + + // number of base64 bytes + size_t encodedBytes = 4 * (length + extraBytes) / 3; + + std::string encoded_string(encodedBytes, '='); + + // read blocks of 3 bytes + for (size_t ib3 = 0; ib3 < length / 3; ib3++) { + const size_t iByte = ib3 * 3; + const size_t iEncodedByte = ib3 * 4; + const char *currData = &data[iByte]; + + EncodeByteBlock(currData, encoded_string, iEncodedByte); + } + + // if size of data is not a multiple of 3, also encode the final bytes (and add zeros where needed) + if (extraBytes > 0) { + char finalBytes[4] = { 0, 0, 0, 0 }; + memcpy(&finalBytes[0], &data[length - length % 3], length % 3); + + const size_t iEncodedByte = encodedBytes - 4; + EncodeByteBlock(&finalBytes[0], encoded_string, iEncodedByte); + + // add '=' at the end + for (size_t i = 0; i < 4 * extraBytes / 3; i++) + encoded_string[encodedBytes - i - 1] = '='; + } + return encoded_string; +} +} // namespace Util +} // namespace FBXDocParser diff --git a/thirdparty/assimp/code/FBX/FBXUtil.h b/modules/fbx/fbx_parser/FBXUtil.h index b634418858..553d5fbc6b 100644 --- a/thirdparty/assimp/code/FBX/FBXUtil.h +++ b/modules/fbx/fbx_parser/FBXUtil.h @@ -1,3 +1,33 @@ +/*************************************************************************/ +/* FBXUtil.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + /* Open Asset Import Library (assimp) ---------------------------------------------------------------------- @@ -43,61 +73,18 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** @file FBXUtil.h * @brief FBX utility functions for internal use */ -#ifndef INCLUDED_AI_FBX_UTIL_H -#define INCLUDED_AI_FBX_UTIL_H +#ifndef FBX_UTIL_H +#define FBX_UTIL_H -#include "FBXCompileConfig.h" #include "FBXTokenizer.h" #include <stdint.h> -namespace Assimp { -namespace FBX { - +namespace FBXDocParser { namespace Util { - -/** helper for std::for_each to delete all heap-allocated items in a container */ -template<typename T> -struct delete_fun -{ - void operator()(const volatile T* del) { - delete del; - } -}; - /** Get a string representation for a #TokenType. */ -const char* TokenTypeString(TokenType t); - - - -/** Format log/error messages using a given offset in the source binary file - * - * @param prefix Message prefix to be preprended to the location info. - * @param text Message text - * @param line Line index, 1-based - * @param column Column index, 1-based - * @return A string of the following format: {prefix} (offset 0x{offset}) {text}*/ -std::string AddOffset(const std::string& prefix, const std::string& text, size_t offset); - - -/** Format log/error messages using a given line location in the source file. - * - * @param prefix Message prefix to be preprended to the location info. - * @param text Message text - * @param line Line index, 1-based - * @param column Column index, 1-based - * @return A string of the following format: {prefix} (line {line}, col {column}) {text}*/ -std::string AddLineAndColumn(const std::string& prefix, const std::string& text, unsigned int line, unsigned int column); - - -/** Format log/error messages using a given cursor token. - * - * @param prefix Message prefix to be preprended to the location info. - * @param text Message text - * @param tok Token where parsing/processing stopped - * @return A string of the following format: {prefix} ({token-type}, line {line}, col {column}) {text}*/ -std::string AddTokenText(const std::string& prefix, const std::string& text, const Token* tok); +const char *TokenTypeString(TokenType t); /** Decode a single Base64-encoded character. * @@ -110,7 +97,7 @@ uint8_t DecodeBase64(char ch); * @param in Characters to decode. * @param inLength Number of characters to decode. * @return size of the decoded data (number of bytes)*/ -size_t ComputeDecodedSizeBase64(const char* in, size_t inLength); +size_t ComputeDecodedSizeBase64(const char *in, size_t inLength); /** Decode a Base64-encoded string * @@ -119,7 +106,7 @@ size_t ComputeDecodedSizeBase64(const char* in, size_t inLength); * @param out Pointer where we will store the decoded data. * @param maxOutLength Size of output buffer. * @return size of the decoded data (number of bytes)*/ -size_t DecodeBase64(const char* in, size_t inLength, uint8_t* out, size_t maxOutLength); +size_t DecodeBase64(const char *in, size_t inLength, uint8_t *out, size_t maxOutLength); char EncodeBase64(char byte); @@ -128,10 +115,8 @@ char EncodeBase64(char byte); * @param data Binary data to encode. * @param inLength Number of bytes to encode. * @return base64-encoded string*/ -std::string EncodeBase64(const char* data, size_t length); - -} -} -} +std::string EncodeBase64(const char *data, size_t length); +} // namespace Util +} // namespace FBXDocParser -#endif // ! INCLUDED_AI_FBX_UTIL_H +#endif // FBX_UTIL_H diff --git a/thirdparty/assimp/include/assimp/Defines.h b/modules/fbx/fbx_parser/LICENSE index be3e2fafd6..b42fc6efe6 100644 --- a/thirdparty/assimp/include/assimp/Defines.h +++ b/modules/fbx/fbx_parser/LICENSE @@ -1,8 +1,10 @@ -/* +The files in this folder were originally from ASSIMP, but have been heavily modified to fix bugs and match coding +conventions of the Godot Engine project. We have kept a copy of the applicable licenses in the folder as required by +the license. + Open Asset Import Library (assimp) ----------------------------------------------------------------------- -Copyright (c) 2006-2012, assimp team +Copyright (c) 2006-2020, assimp team All rights reserved. Redistribution and use of this software in source and binary forms, @@ -35,24 +37,3 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ----------------------------------------------------------------------- -*/ - -#pragma once -#ifndef AI_DEFINES_H_INC -#define AI_DEFINES_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -// We need those constants, workaround for any platforms where nobody defined them yet -#if (!defined SIZE_MAX) -# define SIZE_MAX (~((size_t)0)) -#endif - -#if (!defined UINT_MAX) -# define UINT_MAX (~((unsigned int)0)) -#endif - -#endif // AI_DEINES_H_INC diff --git a/modules/assimp/register_types.cpp b/modules/fbx/register_types.cpp index 6cb0fc982f..28fa69282e 100644 --- a/modules/assimp/register_types.cpp +++ b/modules/fbx/register_types.cpp @@ -31,22 +31,22 @@ #include "register_types.h" #include "editor/editor_node.h" -#include "editor_scene_importer_assimp.h" +#include "editor_scene_importer_fbx.h" #ifdef TOOLS_ENABLED static void _editor_init() { - Ref<EditorSceneImporterAssimp> import_assimp; - import_assimp.instance(); - ResourceImporterScene::get_singleton()->add_importer(import_assimp); + Ref<EditorSceneImporterFBX> import_fbx; + import_fbx.instance(); + ResourceImporterScene::get_singleton()->add_importer(import_fbx); } #endif -void register_assimp_types() { +void register_fbx_types() { #ifdef TOOLS_ENABLED ClassDB::APIType prev_api = ClassDB::get_current_api(); ClassDB::set_current_api(ClassDB::API_EDITOR); - ClassDB::register_class<EditorSceneImporterAssimp>(); + ClassDB::register_class<EditorSceneImporterFBX>(); ClassDB::set_current_api(prev_api); @@ -54,5 +54,5 @@ void register_assimp_types() { #endif } -void unregister_assimp_types() { +void unregister_fbx_types() { } diff --git a/modules/assimp/register_types.h b/modules/fbx/register_types.h index f399a7acc6..26328c4c15 100644 --- a/modules/assimp/register_types.h +++ b/modules/fbx/register_types.h @@ -28,10 +28,10 @@ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ -#ifndef ASSIMP_REGISTER_TYPES_H -#define ASSIMP_REGISTER_TYPES_H +#ifndef FBX_REGISTER_TYPES_H +#define FBX_REGISTER_TYPES_H -void register_assimp_types(); -void unregister_assimp_types(); +void register_fbx_types(); +void unregister_fbx_types(); -#endif // ASSIMP_REGISTER_TYPES_H +#endif // FBX_REGISTER_TYPES_H diff --git a/modules/fbx/tools/import_utils.cpp b/modules/fbx/tools/import_utils.cpp new file mode 100644 index 0000000000..5c9e433ab8 --- /dev/null +++ b/modules/fbx/tools/import_utils.cpp @@ -0,0 +1,151 @@ +/*************************************************************************/ +/* import_utils.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "import_utils.h" + +Vector3 ImportUtils::deg2rad(const Vector3 &p_rotation) { + return p_rotation / 180.0 * Math_PI; +} + +Vector3 ImportUtils::rad2deg(const Vector3 &p_rotation) { + return p_rotation / Math_PI * 180.0; +} + +Basis ImportUtils::EulerToBasis(FBXDocParser::Model::RotOrder mode, const Vector3 &p_rotation) { + Basis ret; + + // FBX is using intrinsic euler, we can convert intrinsic to extrinsic (the one used in godot + // by simply invert its order: https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf + switch (mode) { + case FBXDocParser::Model::RotOrder_EulerXYZ: + ret.set_euler_zyx(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_EulerXZY: + ret.set_euler_yzx(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_EulerYZX: + ret.set_euler_xzy(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_EulerYXZ: + ret.set_euler_zxy(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_EulerZXY: + ret.set_euler_yxz(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_EulerZYX: + ret.set_euler_xyz(p_rotation); + break; + + case FBXDocParser::Model::RotOrder_SphericXYZ: + // TODO do this. + break; + + default: + // If you land here, Please integrate all enums. + CRASH_NOW_MSG("This is not unreachable."); + } + + return ret; +} + +Quat ImportUtils::EulerToQuaternion(FBXDocParser::Model::RotOrder mode, const Vector3 &p_rotation) { + return ImportUtils::EulerToBasis(mode, p_rotation); +} + +Vector3 ImportUtils::BasisToEuler(FBXDocParser::Model::RotOrder mode, const Basis &p_rotation) { + // FBX is using intrinsic euler, we can convert intrinsic to extrinsic (the one used in godot + // by simply invert its order: https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf + switch (mode) { + case FBXDocParser::Model::RotOrder_EulerXYZ: + return p_rotation.get_euler_zyx(); + + case FBXDocParser::Model::RotOrder_EulerXZY: + return p_rotation.get_euler_yzx(); + + case FBXDocParser::Model::RotOrder_EulerYZX: + return p_rotation.get_euler_xzy(); + + case FBXDocParser::Model::RotOrder_EulerYXZ: + return p_rotation.get_euler_zxy(); + + case FBXDocParser::Model::RotOrder_EulerZXY: + return p_rotation.get_euler_yxz(); + + case FBXDocParser::Model::RotOrder_EulerZYX: + return p_rotation.get_euler_xyz(); + + case FBXDocParser::Model::RotOrder_SphericXYZ: + // TODO + return Vector3(); + + default: + // If you land here, Please integrate all enums. + CRASH_NOW_MSG("This is not unreachable."); + return Vector3(); + } +} + +Vector3 ImportUtils::QuaternionToEuler(FBXDocParser::Model::RotOrder mode, const Quat &p_rotation) { + return BasisToEuler(mode, p_rotation); +} + +Transform get_unscaled_transform(const Transform &p_initial, real_t p_scale) { + Transform unscaled = Transform(p_initial.basis, p_initial.origin * p_scale); + ERR_FAIL_COND_V_MSG(unscaled.basis.determinant() == 0, Transform(), "det is zero unscaled?"); + return unscaled; +} + +Vector3 get_poly_normal(const std::vector<Vector3> &p_vertices) { + ERR_FAIL_COND_V_MSG(p_vertices.size() < 3, Vector3(0, 0, 0), "At least 3 vertices are necesary"); + // Using long double to make sure that normal is computed for even really tiny objects. + typedef long double ldouble; + ldouble x = 0.0; + ldouble y = 0.0; + ldouble z = 0.0; + for (size_t i = 0; i < p_vertices.size(); i += 1) { + const Vector3 current = p_vertices[i]; + const Vector3 next = p_vertices[(i + 1) % p_vertices.size()]; + x += (ldouble(current.y) - ldouble(next.y)) * (ldouble(current.z) + ldouble(next.z)); + y += (ldouble(current.z) - ldouble(next.z)) * (ldouble(current.x) + ldouble(next.x)); + z += (ldouble(current.x) - ldouble(next.x)) * (ldouble(current.y) + ldouble(next.y)); + } + const ldouble l2 = x * x + y * y + z * z; + if (l2 == 0.0) { + return (p_vertices[0] - p_vertices[1]).normalized().cross((p_vertices[0] - p_vertices[2]).normalized()).normalized(); + } else { + const double l = Math::sqrt(double(l2)); + return Vector3(x / l, y / l, z / l); + } +} diff --git a/modules/fbx/tools/import_utils.h b/modules/fbx/tools/import_utils.h new file mode 100644 index 0000000000..98a0cef908 --- /dev/null +++ b/modules/fbx/tools/import_utils.h @@ -0,0 +1,400 @@ +/*************************************************************************/ +/* import_utils.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef IMPORT_UTILS_FBX_IMPORTER_H +#define IMPORT_UTILS_FBX_IMPORTER_H + +#include "core/io/image_loader.h" + +#include "data/import_state.h" +#include "fbx_parser/FBXDocument.h" + +#include <string> + +#define CONVERT_FBX_TIME(time) static_cast<double>(time) / 46186158000LL + +/** + * Import Utils + * Conversion tools / glue code to convert from FBX to Godot +*/ +class ImportUtils { +public: + /// Convert a vector from degrees to radians. + static Vector3 deg2rad(const Vector3 &p_rotation); + + /// Convert a vector from radians to degrees. + static Vector3 rad2deg(const Vector3 &p_rotation); + + /// Converts rotation order vector (in rad) to quaternion. + static Basis EulerToBasis(FBXDocParser::Model::RotOrder mode, const Vector3 &p_rotation); + + /// Converts rotation order vector (in rad) to quaternion. + static Quat EulerToQuaternion(FBXDocParser::Model::RotOrder mode, const Vector3 &p_rotation); + + /// Converts basis into rotation order vector (in rad). + static Vector3 BasisToEuler(FBXDocParser::Model::RotOrder mode, const Basis &p_rotation); + + /// Converts quaternion into rotation order vector (in rad). + static Vector3 QuaternionToEuler(FBXDocParser::Model::RotOrder mode, const Quat &p_rotation); + + static void debug_xform(String name, const Transform &t) { + print_verbose(name + " " + t.origin + " rotation: " + (t.basis.get_euler() * (180 / Math_PI))); + } + + static String FBXNodeToName(const std::string &name) { + // strip Model:: prefix, avoiding ambiguities (i.e. don't strip if + // this causes ambiguities, well possible between empty identifiers, + // such as "Model::" and ""). Make sure the behaviour is consistent + // across multiple calls to FixNodeName(). + + // We must remove this from the name + // Some bones have this + // SubDeformer:: + // Meshes, Joints have this, some other IK elements too. + // Model:: + + String node_name = String(name.c_str()); + + if (node_name.substr(0, 7) == "Model::") { + node_name = node_name.substr(7, node_name.length() - 7); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 13) == "SubDeformer::") { + node_name = node_name.substr(13, node_name.length() - 13); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 11) == "AnimStack::") { + node_name = node_name.substr(11, node_name.length() - 11); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 15) == "AnimCurveNode::") { + node_name = node_name.substr(15, node_name.length() - 15); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 11) == "AnimCurve::") { + node_name = node_name.substr(11, node_name.length() - 11); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 10) == "Geometry::") { + node_name = node_name.substr(10, node_name.length() - 10); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 10) == "Material::") { + node_name = node_name.substr(10, node_name.length() - 10); + return node_name.replace(":", ""); + } + + if (node_name.substr(0, 9) == "Texture::") { + node_name = node_name.substr(9, node_name.length() - 9); + return node_name.replace(":", ""); + } + + return node_name.replace(":", ""); + } + + static std::string FBXAnimMeshName(const std::string &name) { + if (name.length()) { + size_t indexOf = name.find_first_of("::"); + if (indexOf != std::string::npos && indexOf < name.size() - 2) { + return name.substr(indexOf + 2); + } + } + return name.length() ? name : "AnimMesh"; + } + + static Vector3 safe_import_vector3(const Vector3 &p_vec) { + Vector3 vector = p_vec; + if (Math::is_equal_approx(0, vector.x)) { + vector.x = 0; + } + + if (Math::is_equal_approx(0, vector.y)) { + vector.y = 0; + } + + if (Math::is_equal_approx(0, vector.z)) { + vector.z = 0; + } + return vector; + } + + static void debug_xform(String name, const Basis &t) { + //print_verbose(name + " rotation: " + (t.get_euler() * (180 / Math_PI))); + } + + static Vector3 FixAxisConversions(Vector3 input) { + return Vector3(input.x, input.y, input.z); + } + + static void AlignMeshAxes(std::vector<Vector3> &vertex_data) { + for (size_t x = 0; x < vertex_data.size(); x++) { + vertex_data[x] = FixAxisConversions(vertex_data[x]); + } + } + + struct AssetImportFbx { + enum ETimeMode { + TIME_MODE_DEFAULT = 0, + TIME_MODE_120 = 1, + TIME_MODE_100 = 2, + TIME_MODE_60 = 3, + TIME_MODE_50 = 4, + TIME_MODE_48 = 5, + TIME_MODE_30 = 6, + TIME_MODE_30_DROP = 7, + TIME_MODE_NTSC_DROP_FRAME = 8, + TIME_MODE_NTSC_FULL_FRAME = 9, + TIME_MODE_PAL = 10, + TIME_MODE_CINEMA = 11, + TIME_MODE_1000 = 12, + TIME_MODE_CINEMA_ND = 13, + TIME_MODE_CUSTOM = 14, + TIME_MODE_TIME_MODE_COUNT = 15 + }; + enum UpAxis { + UP_VECTOR_AXIS_X = 1, + UP_VECTOR_AXIS_Y = 2, + UP_VECTOR_AXIS_Z = 3 + }; + enum FrontAxis { + FRONT_PARITY_EVEN = 1, + FRONT_PARITY_ODD = 2, + }; + + enum CoordAxis { + COORD_RIGHT = 0, + COORD_LEFT = 1 + }; + }; + + /** Get fbx fps for time mode meta data + */ + static float get_fbx_fps(int32_t time_mode) { + switch (time_mode) { + case AssetImportFbx::TIME_MODE_DEFAULT: + return 24; + case AssetImportFbx::TIME_MODE_120: + return 120; + case AssetImportFbx::TIME_MODE_100: + return 100; + case AssetImportFbx::TIME_MODE_60: + return 60; + case AssetImportFbx::TIME_MODE_50: + return 50; + case AssetImportFbx::TIME_MODE_48: + return 48; + case AssetImportFbx::TIME_MODE_30: + return 30; + case AssetImportFbx::TIME_MODE_30_DROP: + return 30; + case AssetImportFbx::TIME_MODE_NTSC_DROP_FRAME: + return 29.9700262f; + case AssetImportFbx::TIME_MODE_NTSC_FULL_FRAME: + return 29.9700262f; + case AssetImportFbx::TIME_MODE_PAL: + return 25; + case AssetImportFbx::TIME_MODE_CINEMA: + return 24; + case AssetImportFbx::TIME_MODE_1000: + return 1000; + case AssetImportFbx::TIME_MODE_CINEMA_ND: + return 23.976f; + case AssetImportFbx::TIME_MODE_CUSTOM: + return -1; + } + return 0; + } + + static float get_fbx_fps(const FBXDocParser::FileGlobalSettings *FBXSettings) { + int time_mode = FBXSettings->TimeMode(); + + // get the animation FPS + float frames_per_second = get_fbx_fps(time_mode); + + // handle animation custom FPS time. + if (time_mode == ImportUtils::AssetImportFbx::TIME_MODE_CUSTOM) { + print_verbose("FBX Animation has custom FPS setting"); + frames_per_second = FBXSettings->CustomFrameRate(); + + // not our problem this is the modeller, we can print as an error so they can fix the source. + if (frames_per_second == 0) { + print_error("Custom animation time in file is set to 0 value, animation won't play, please edit your file to correct the FPS value"); + } + } + return frames_per_second; + } + + /** + * Find hardcoded textures from assimp which could be in many different directories + */ + + /** + * set_texture_mapping_mode + * Helper to check the mapping mode of the texture (repeat, clamp and mirror) + */ + // static void set_texture_mapping_mode(aiTextureMapMode *map_mode, Ref<ImageTexture> texture) { + // ERR_FAIL_COND(texture.is_null()); + // ERR_FAIL_COND(map_mode == NULL); + // aiTextureMapMode tex_mode = map_mode[0]; + + // int32_t flags = Texture::FLAGS_DEFAULT; + // if (tex_mode == aiTextureMapMode_Wrap) { + // //Default + // } else if (tex_mode == aiTextureMapMode_Clamp) { + // flags = flags & ~Texture::FLAG_REPEAT; + // } else if (tex_mode == aiTextureMapMode_Mirror) { + // flags = flags | Texture::FLAG_MIRRORED_REPEAT; + // } + // texture->set_flags(flags); + // } + + /** + * Load or load from cache image :) + * We need to upgrade this in the later version :) should not be hard + */ + //static Ref<Image> load_image(ImportState &state, const aiScene *p_scene, String p_path){ + // Map<String, Ref<Image> >::Element *match = state.path_to_image_cache.find(p_path); + + // // if our cache contains this image then don't bother + // if (match) { + // return match->get(); + // } + + // Vector<String> split_path = p_path.get_basename().split("*"); + // if (split_path.size() == 2) { + // size_t texture_idx = split_path[1].to_int(); + // ERR_FAIL_COND_V(texture_idx >= p_scene->mNumTextures, Ref<Image>()); + // aiTexture *tex = p_scene->mTextures[texture_idx]; + // String filename = AssimpUtils::get_raw_string_from_assimp(tex->mFilename); + // filename = filename.get_file(); + // print_verbose("Open Asset Import: Loading embedded texture " + filename); + // if (tex->mHeight == 0) { + // if (tex->CheckFormat("png")) { + // Ref<Image> img = Image::_png_mem_loader_func((uint8_t *)tex->pcData, tex->mWidth); + // ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); + // state.path_to_image_cache.insert(p_path, img); + // return img; + // } else if (tex->CheckFormat("jpg")) { + // Ref<Image> img = Image::_jpg_mem_loader_func((uint8_t *)tex->pcData, tex->mWidth); + // ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); + // state.path_to_image_cache.insert(p_path, img); + // return img; + // } else if (tex->CheckFormat("dds")) { + // ERR_FAIL_COND_V_MSG(true, Ref<Image>(), "Open Asset Import: Embedded dds not implemented"); + // } + // } else { + // Ref<Image> img; + // img.instance(); + // PoolByteArray arr; + // uint32_t size = tex->mWidth * tex->mHeight; + // arr.resize(size); + // memcpy(arr.write().ptr(), tex->pcData, size); + // ERR_FAIL_COND_V(arr.size() % 4 != 0, Ref<Image>()); + // //ARGB8888 to RGBA8888 + // for (int32_t i = 0; i < arr.size() / 4; i++) { + // arr.write().ptr()[(4 * i) + 3] = arr[(4 * i) + 0]; + // arr.write().ptr()[(4 * i) + 0] = arr[(4 * i) + 1]; + // arr.write().ptr()[(4 * i) + 1] = arr[(4 * i) + 2]; + // arr.write().ptr()[(4 * i) + 2] = arr[(4 * i) + 3]; + // } + // img->create(tex->mWidth, tex->mHeight, true, Image::FORMAT_RGBA8, arr); + // ERR_FAIL_COND_V(img.is_null(), Ref<Image>()); + // state.path_to_image_cache.insert(p_path, img); + // return img; + // } + // return Ref<Image>(); + // } else { + // Ref<Texture> texture = ResourceLoader::load(p_path); + // ERR_FAIL_COND_V(texture.is_null(), Ref<Image>()); + // Ref<Image> image = texture->get_data(); + // ERR_FAIL_COND_V(image.is_null(), Ref<Image>()); + // state.path_to_image_cache.insert(p_path, image); + // return image; + // } + + // return Ref<Image>(); + //} + + // /* create texture from assimp data, if found in path */ + // static bool CreateAssimpTexture( + // AssimpImporter::ImportState &state, + // aiString texture_path, + // String &filename, + // String &path, + // AssimpImageData &image_state) { + // filename = get_raw_string_from_assimp(texture_path); + // path = state.path.get_base_dir().plus_file(filename.replace("\\", "/")); + // bool found = false; + // find_texture_path(state.path, path, found); + // if (found) { + // image_state.raw_image = AssimpUtils::load_image(state, state.assimp_scene, path); + // if (image_state.raw_image.is_valid()) { + // image_state.texture.instance(); + // image_state.texture->create_from_image(image_state.raw_image); + // image_state.texture->set_storage(ImageTexture::STORAGE_COMPRESS_LOSSY); + // return true; + // } + // } + + // return false; + // } + // /** GetAssimpTexture + // * Designed to retrieve textures for you + // */ + // static bool GetAssimpTexture( + // AssimpImporter::ImportState &state, + // aiMaterial *ai_material, + // aiTextureType texture_type, + // String &filename, + // String &path, + // AssimpImageData &image_state) { + // aiString ai_filename = aiString(); + // if (AI_SUCCESS == ai_material->GetTexture(texture_type, 0, &ai_filename, NULL, NULL, NULL, NULL, image_state.map_mode)) { + // return CreateAssimpTexture(state, ai_filename, filename, path, image_state); + // } + + // return false; + // } +}; + +// Apply the transforms so the basis will have scale 1. +Transform get_unscaled_transform(const Transform &p_initial, real_t p_scale); + +/// Uses the Newell's method to compute any polygon normal. +/// The polygon must be at least size of 3 or bigger. +Vector3 get_poly_normal(const std::vector<Vector3> &p_vertices); + +#endif // IMPORT_UTILS_FBX_IMPORTER_H diff --git a/modules/fbx/tools/validation_tools.cpp b/modules/fbx/tools/validation_tools.cpp new file mode 100644 index 0000000000..0c4d7abe8d --- /dev/null +++ b/modules/fbx/tools/validation_tools.cpp @@ -0,0 +1,48 @@ +/*************************************************************************/ +/* validation_tools.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "validation_tools.h" + +#ifdef TOOLS_ENABLED + +#include "core/string/print_string.h" +#include "core/string/ustring.h" + +ValidationTracker::Entries *ValidationTracker::entries_singleton = memnew(ValidationTracker::Entries); + +// for printing our CSV to dump validation problems of files +// later we can make some agnostic tooling for this but this is fine for the time being. +void ValidationTracker::Entries::add_validation_error(String asset_path, String message) { + print_error(message); + // note: implementation is static + validation_entries[asset_path].push_back(message); +} + +#endif // TOOLS_ENABLED diff --git a/modules/fbx/tools/validation_tools.h b/modules/fbx/tools/validation_tools.h new file mode 100644 index 0000000000..649842a1cb --- /dev/null +++ b/modules/fbx/tools/validation_tools.h @@ -0,0 +1,92 @@ +/*************************************************************************/ +/* validation_tools.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef FBX_VALIDATION_TOOLS_H +#define FBX_VALIDATION_TOOLS_H + +#ifdef TOOLS_ENABLED + +#include "core/io/json.h" +#include "core/os/file_access.h" +#include "core/string/ustring.h" +#include "core/templates/local_vector.h" +#include "core/templates/map.h" + +class ValidationTracker { +protected: + struct Entries { + Map<String, LocalVector<String>> validation_entries = Map<String, LocalVector<String>>(); + + // for printing our CSV to dump validation problems of files + // later we can make some agnostic tooling for this but this is fine for the time being. + void add_validation_error(String asset_path, String message); + void print_to_csv() { + print_verbose("Exporting assset validation log please wait"); + String massive_log_file; + + String csv_header = "file_path, error message, extra data\n"; + massive_log_file += csv_header; + + for (Map<String, LocalVector<String>>::Element *element = validation_entries.front(); element; element = element->next()) { + for (unsigned int x = 0; x < element->value().size(); x++) { + const String &line_entry = element->key() + ", " + element->value()[x].c_escape() + "\n"; + massive_log_file += line_entry; + } + } + + String path = "asset_validation_errors.csv"; + Error err; + FileAccess *file = FileAccess::open(path, FileAccess::WRITE, &err); + if (!file || err) { + if (file) + memdelete(file); + print_error("ValidationTracker Error - failed to create file - path: %s\n" + path); + return; + } + + file->store_string(massive_log_file); + if (file->get_error() != OK && file->get_error() != ERR_FILE_EOF) { + print_error("ValidationTracker Error - failed to write to file - path: %s\n" + path); + } + file->close(); + memdelete(file); + } + }; + // asset path, error messages + static Entries *entries_singleton; + +public: + static Entries *get_singleton() { + return entries_singleton; + } +}; + +#endif // TOOLS_ENABLED +#endif // FBX_VALIDATION_TOOLS_H diff --git a/modules/gltf/SCsub b/modules/gltf/SCsub new file mode 100644 index 0000000000..5d03ee8361 --- /dev/null +++ b/modules/gltf/SCsub @@ -0,0 +1,10 @@ +#!/usr/bin/env python + +Import("env") +Import("env_modules") + +env_gltf = env_modules.Clone() +env_gltf.Prepend(CPPPATH=["."]) + +# Godot's own source files +env_gltf.add_source_files(env.modules_sources, "*.cpp") diff --git a/modules/assimp/config.py b/modules/gltf/config.py index 53b8f2f2e3..1505a456d7 100644 --- a/modules/assimp/config.py +++ b/modules/gltf/config.py @@ -1,5 +1,5 @@ def can_build(env, platform): - return env["tools"] + return env["tools"] and not env["disable_3d"] def configure(env): diff --git a/modules/gltf/editor_scene_exporter_gltf_plugin.cpp b/modules/gltf/editor_scene_exporter_gltf_plugin.cpp new file mode 100644 index 0000000000..0680b124b4 --- /dev/null +++ b/modules/gltf/editor_scene_exporter_gltf_plugin.cpp @@ -0,0 +1,90 @@ +/*************************************************************************/ +/* editor_scene_exporter_gltf_plugin.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "editor_scene_exporter_gltf_plugin.h" +#include "core/config/project_settings.h" +#include "core/object/object.h" +#include "core/templates/vector.h" +#include "editor/editor_file_system.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/gui/check_box.h" +#include "scene/main/node.h" + +#include "editor/editor_node.h" + +String SceneExporterGLTFPlugin::get_name() const { + return "ConvertGLTF2"; +} + +bool SceneExporterGLTFPlugin::has_main_screen() const { + return false; +} + +SceneExporterGLTFPlugin::SceneExporterGLTFPlugin(EditorNode *p_node) { + editor = p_node; + convert_gltf2.instance(); + file_export_lib = memnew(EditorFileDialog); + editor->get_gui_base()->add_child(file_export_lib); + file_export_lib->connect("file_selected", callable_mp(this, &SceneExporterGLTFPlugin::_gltf2_dialog_action)); + file_export_lib->set_title(TTR("Export Library")); + file_export_lib->set_file_mode(EditorFileDialog::FILE_MODE_SAVE_FILE); + file_export_lib->set_access(EditorFileDialog::ACCESS_FILESYSTEM); + file_export_lib->clear_filters(); + file_export_lib->add_filter("*.glb"); + file_export_lib->add_filter("*.gltf"); + file_export_lib->set_title(TTR("Export Mesh GLTF2")); + String gltf_scene_name = TTR("Export GLTF..."); + add_tool_menu_item(gltf_scene_name, callable_mp(this, &SceneExporterGLTFPlugin::convert_scene_to_gltf2)); +} + +void SceneExporterGLTFPlugin::_gltf2_dialog_action(String p_file) { + Node *root = editor->get_tree()->get_edited_scene_root(); + if (!root) { + editor->show_accept(TTR("This operation can't be done without a scene."), TTR("OK")); + return; + } + List<String> deps; + convert_gltf2->save_scene(root, p_file, p_file, 0, 1000.0f, &deps); + EditorFileSystem::get_singleton()->scan_changes(); +} + +void SceneExporterGLTFPlugin::convert_scene_to_gltf2() { + Node *root = editor->get_tree()->get_edited_scene_root(); + if (!root) { + editor->show_accept(TTR("This operation can't be done without a scene."), TTR("OK")); + return; + } + String filename = String(root->get_filename().get_file().get_basename()); + if (filename.empty()) { + filename = root->get_name(); + } + file_export_lib->set_current_file(filename + String(".gltf")); + file_export_lib->popup_centered_ratio(); +} diff --git a/modules/gltf/editor_scene_exporter_gltf_plugin.h b/modules/gltf/editor_scene_exporter_gltf_plugin.h new file mode 100644 index 0000000000..1a8910d866 --- /dev/null +++ b/modules/gltf/editor_scene_exporter_gltf_plugin.h @@ -0,0 +1,52 @@ +/*************************************************************************/ +/* editor_scene_exporter_gltf_plugin.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef EDITOR_SCENE_EXPORTER_GLTF_PLUGIN_H +#define EDITOR_SCENE_EXPORTER_GLTF_PLUGIN_H + +#include "editor/editor_plugin.h" +#include "editor_scene_importer_gltf.h" + +class SceneExporterGLTFPlugin : public EditorPlugin { + GDCLASS(SceneExporterGLTFPlugin, EditorPlugin); + + Ref<PackedSceneGLTF> convert_gltf2; + EditorNode *editor = nullptr; + EditorFileDialog *file_export_lib = nullptr; + void _gltf2_dialog_action(String p_file); + void convert_scene_to_gltf2(); + +public: + virtual String get_name() const override; + bool has_main_screen() const override; + SceneExporterGLTFPlugin(class EditorNode *p_node); +}; + +#endif // EDITOR_SCENE_EXPORTER_GLTF_PLUGIN_H diff --git a/modules/gltf/editor_scene_importer_gltf.cpp b/modules/gltf/editor_scene_importer_gltf.cpp new file mode 100644 index 0000000000..51cb3a6d2e --- /dev/null +++ b/modules/gltf/editor_scene_importer_gltf.cpp @@ -0,0 +1,180 @@ +/*************************************************************************/ +/* editor_scene_importer_gltf.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "core/crypto/crypto_core.h" +#include "core/io/json.h" +#include "core/math/disjoint_set.h" +#include "core/math/math_defs.h" +#include "core/os/file_access.h" +#include "core/os/os.h" +#include "editor/import/resource_importer_scene.h" +#include "modules/gltf/gltf_state.h" +#include "modules/regex/regex.h" +#include "scene/3d/bone_attachment_3d.h" +#include "scene/3d/camera_3d.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/animation/animation_player.h" +#include "scene/resources/packed_scene.h" +#include "scene/resources/surface_tool.h" + +#include "modules/gltf/editor_scene_importer_gltf.h" + +uint32_t EditorSceneImporterGLTF::get_import_flags() const { + return ImportFlags::IMPORT_SCENE | ImportFlags::IMPORT_ANIMATION; +} + +void EditorSceneImporterGLTF::get_extensions(List<String> *r_extensions) const { + r_extensions->push_back("gltf"); + r_extensions->push_back("glb"); +} + +Node *EditorSceneImporterGLTF::import_scene(const String &p_path, + uint32_t p_flags, int p_bake_fps, + List<String> *r_missing_deps, + Error *r_err) { + Ref<PackedSceneGLTF> importer; + importer.instance(); + return importer->import_scene(p_path, p_flags, p_bake_fps, r_missing_deps, r_err, Ref<GLTFState>()); +} + +Ref<Animation> EditorSceneImporterGLTF::import_animation(const String &p_path, + uint32_t p_flags, + int p_bake_fps) { + return Ref<Animation>(); +} + +void PackedSceneGLTF::_bind_methods() { + ClassDB::bind_method( + D_METHOD("export_gltf", "node", "path", "flags", "bake_fps"), + &PackedSceneGLTF::export_gltf, DEFVAL(0), DEFVAL(1000.0f)); + ClassDB::bind_method(D_METHOD("pack_gltf", "path", "flags", "bake_fps", "state"), + &PackedSceneGLTF::pack_gltf, DEFVAL(0), DEFVAL(1000.0f), DEFVAL(Ref<GLTFState>())); + ClassDB::bind_method(D_METHOD("import_gltf_scene", "path", "flags", "bake_fps", "state"), + &PackedSceneGLTF::import_gltf_scene, DEFVAL(0), DEFVAL(1000.0f), DEFVAL(Ref<GLTFState>())); +} +Node *PackedSceneGLTF::import_gltf_scene(const String &p_path, uint32_t p_flags, float p_bake_fps, Ref<GLTFState> r_state) { + Error err = FAILED; + List<String> deps; + return import_scene(p_path, p_flags, p_bake_fps, &deps, &err, r_state); +} + +Node *PackedSceneGLTF::import_scene(const String &p_path, uint32_t p_flags, + int p_bake_fps, + List<String> *r_missing_deps, + Error *r_err, + Ref<GLTFState> r_state) { + if (r_state == Ref<GLTFState>()) { + r_state.instance(); + } + r_state->use_named_skin_binds = + p_flags & EditorSceneImporter::IMPORT_USE_NAMED_SKIN_BINDS; + + Ref<GLTFDocument> gltf_document; + gltf_document.instance(); + Error err = gltf_document->parse(r_state, p_path); + *r_err = err; + ERR_FAIL_COND_V(err != Error::OK, nullptr); + + Node3D *root = memnew(Node3D); + for (int32_t root_i = 0; root_i < r_state->root_nodes.size(); root_i++) { + gltf_document->_generate_scene_node(r_state, root, root, r_state->root_nodes[root_i]); + } + gltf_document->_process_mesh_instances(r_state, root); + if (r_state->animations.size()) { + AnimationPlayer *ap = memnew(AnimationPlayer); + root->add_child(ap); + ap->set_owner(root); + for (int i = 0; i < r_state->animations.size(); i++) { + gltf_document->_import_animation(r_state, ap, i, p_bake_fps); + } + } + + return cast_to<Node3D>(root); +} + +void PackedSceneGLTF::pack_gltf(String p_path, int32_t p_flags, + real_t p_bake_fps, Ref<GLTFState> r_state) { + Error err = FAILED; + List<String> deps; + Node *root = import_scene(p_path, p_flags, p_bake_fps, &deps, &err, r_state); + ERR_FAIL_COND(err != OK); + pack(root); +} + +void PackedSceneGLTF::save_scene(Node *p_node, const String &p_path, + const String &p_src_path, uint32_t p_flags, + int p_bake_fps, List<String> *r_missing_deps, + Error *r_err) { + Error err = FAILED; + if (r_err) { + *r_err = err; + } + Ref<GLTFDocument> gltf_document; + gltf_document.instance(); + Ref<GLTFState> state; + state.instance(); + err = gltf_document->serialize(state, p_node, p_path); + if (r_err) { + *r_err = err; + } +} + +void PackedSceneGLTF::_build_parent_hierachy(Ref<GLTFState> state) { + // build the hierarchy + for (GLTFNodeIndex node_i = 0; node_i < state->nodes.size(); node_i++) { + for (int j = 0; j < state->nodes[node_i]->children.size(); j++) { + GLTFNodeIndex child_i = state->nodes[node_i]->children[j]; + ERR_FAIL_INDEX(child_i, state->nodes.size()); + if (state->nodes.write[child_i]->parent != -1) { + continue; + } + state->nodes.write[child_i]->parent = node_i; + } + } +} + +Error PackedSceneGLTF::export_gltf(Node *p_root, String p_path, + int32_t p_flags, + real_t p_bake_fps) { + ERR_FAIL_COND_V(!p_root, FAILED); + List<String> deps; + Error err; + String path = p_path; + int32_t flags = p_flags; + real_t baked_fps = p_bake_fps; + Ref<PackedSceneGLTF> exporter; + exporter.instance(); + exporter->save_scene(p_root, path, "", flags, baked_fps, &deps, &err); + int32_t error_code = err; + if (error_code != 0) { + return Error(error_code); + } + return OK; +} diff --git a/modules/gltf/editor_scene_importer_gltf.h b/modules/gltf/editor_scene_importer_gltf.h new file mode 100644 index 0000000000..3da987493c --- /dev/null +++ b/modules/gltf/editor_scene_importer_gltf.h @@ -0,0 +1,96 @@ +/*************************************************************************/ +/* editor_scene_importer_gltf.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef EDITOR_SCENE_IMPORTER_GLTF_H +#define EDITOR_SCENE_IMPORTER_GLTF_H + +#include "core/config/project_settings.h" +#include "core/io/json.h" +#include "core/object/object.h" +#include "core/templates/vector.h" +#include "editor/import/resource_importer_scene.h" +#include "modules/csg/csg_shape.h" +#include "modules/gridmap/grid_map.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/3d/multimesh_instance_3d.h" +#include "scene/3d/node_3d.h" +#include "scene/3d/skeleton_3d.h" +#include "scene/animation/animation_player.h" +#include "scene/gui/check_box.h" +#include "scene/main/node.h" +#include "scene/resources/packed_scene.h" +#include "scene/resources/surface_tool.h" + +#include "gltf_document.h" +#include "gltf_state.h" + +class AnimationPlayer; +class BoneAttachment; +class EditorSceneImporterMeshNode3D; + +#ifdef TOOLS_ENABLED +class EditorSceneImporterGLTF : public EditorSceneImporter { + GDCLASS(EditorSceneImporterGLTF, EditorSceneImporter); + +public: + virtual uint32_t get_import_flags() const override; + virtual void get_extensions(List<String> *r_extensions) const override; + virtual Node *import_scene(const String &p_path, uint32_t p_flags, + int p_bake_fps, + List<String> *r_missing_deps = NULL, + Error *r_err = NULL) override; + virtual Ref<Animation> import_animation(const String &p_path, + uint32_t p_flags, int p_bake_fps) override; +}; +#endif + +class PackedSceneGLTF : public PackedScene { + GDCLASS(PackedSceneGLTF, PackedScene); + +protected: + static void _bind_methods(); + +public: + virtual void save_scene(Node *p_node, const String &p_path, const String &p_src_path, + uint32_t p_flags, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err = NULL); + virtual void _build_parent_hierachy(Ref<GLTFState> state); + virtual Error export_gltf(Node *p_root, String p_path, int32_t p_flags = 0, + real_t p_bake_fps = 1000.0f); + virtual Node *import_scene(const String &p_path, uint32_t p_flags, + int p_bake_fps, + List<String> *r_missing_deps, + Error *r_err, + Ref<GLTFState> r_state); + virtual Node *import_gltf_scene(const String &p_path, uint32_t p_flags, float p_bake_fps, Ref<GLTFState> r_state = Ref<GLTFState>()); + virtual void pack_gltf(String p_path, int32_t p_flags = 0, + real_t p_bake_fps = 1000.0f, Ref<GLTFState> r_state = Ref<GLTFState>()); +}; +#endif // EDITOR_SCENE_IMPORTER_GLTF_H diff --git a/modules/gltf/gltf_accessor.cpp b/modules/gltf/gltf_accessor.cpp new file mode 100644 index 0000000000..9267c58598 --- /dev/null +++ b/modules/gltf/gltf_accessor.cpp @@ -0,0 +1,189 @@ +/*************************************************************************/ +/* gltf_accessor.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_accessor.h" + +void GLTFAccessor::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_buffer_view"), &GLTFAccessor::get_buffer_view); + ClassDB::bind_method(D_METHOD("set_buffer_view", "buffer_view"), &GLTFAccessor::set_buffer_view); + ClassDB::bind_method(D_METHOD("get_byte_offset"), &GLTFAccessor::get_byte_offset); + ClassDB::bind_method(D_METHOD("set_byte_offset", "byte_offset"), &GLTFAccessor::set_byte_offset); + ClassDB::bind_method(D_METHOD("get_component_type"), &GLTFAccessor::get_component_type); + ClassDB::bind_method(D_METHOD("set_component_type", "component_type"), &GLTFAccessor::set_component_type); + ClassDB::bind_method(D_METHOD("get_normalized"), &GLTFAccessor::get_normalized); + ClassDB::bind_method(D_METHOD("set_normalized", "normalized"), &GLTFAccessor::set_normalized); + ClassDB::bind_method(D_METHOD("get_count"), &GLTFAccessor::get_count); + ClassDB::bind_method(D_METHOD("set_count", "count"), &GLTFAccessor::set_count); + ClassDB::bind_method(D_METHOD("get_type"), &GLTFAccessor::get_type); + ClassDB::bind_method(D_METHOD("set_type", "type"), &GLTFAccessor::set_type); + ClassDB::bind_method(D_METHOD("get_min"), &GLTFAccessor::get_min); + ClassDB::bind_method(D_METHOD("set_min", "min"), &GLTFAccessor::set_min); + ClassDB::bind_method(D_METHOD("get_max"), &GLTFAccessor::get_max); + ClassDB::bind_method(D_METHOD("set_max", "max"), &GLTFAccessor::set_max); + ClassDB::bind_method(D_METHOD("get_sparse_count"), &GLTFAccessor::get_sparse_count); + ClassDB::bind_method(D_METHOD("set_sparse_count", "sparse_count"), &GLTFAccessor::set_sparse_count); + ClassDB::bind_method(D_METHOD("get_sparse_indices_buffer_view"), &GLTFAccessor::get_sparse_indices_buffer_view); + ClassDB::bind_method(D_METHOD("set_sparse_indices_buffer_view", "sparse_indices_buffer_view"), &GLTFAccessor::set_sparse_indices_buffer_view); + ClassDB::bind_method(D_METHOD("get_sparse_indices_byte_offset"), &GLTFAccessor::get_sparse_indices_byte_offset); + ClassDB::bind_method(D_METHOD("set_sparse_indices_byte_offset", "sparse_indices_byte_offset"), &GLTFAccessor::set_sparse_indices_byte_offset); + ClassDB::bind_method(D_METHOD("get_sparse_indices_component_type"), &GLTFAccessor::get_sparse_indices_component_type); + ClassDB::bind_method(D_METHOD("set_sparse_indices_component_type", "sparse_indices_component_type"), &GLTFAccessor::set_sparse_indices_component_type); + ClassDB::bind_method(D_METHOD("get_sparse_values_buffer_view"), &GLTFAccessor::get_sparse_values_buffer_view); + ClassDB::bind_method(D_METHOD("set_sparse_values_buffer_view", "sparse_values_buffer_view"), &GLTFAccessor::set_sparse_values_buffer_view); + ClassDB::bind_method(D_METHOD("get_sparse_values_byte_offset"), &GLTFAccessor::get_sparse_values_byte_offset); + ClassDB::bind_method(D_METHOD("set_sparse_values_byte_offset", "sparse_values_byte_offset"), &GLTFAccessor::set_sparse_values_byte_offset); + + ADD_PROPERTY(PropertyInfo(Variant::INT, "buffer_view"), "set_buffer_view", "get_buffer_view"); // GLTFBufferViewIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "byte_offset"), "set_byte_offset", "get_byte_offset"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "component_type"), "set_component_type", "get_component_type"); // int + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "normalized"), "set_normalized", "get_normalized"); // bool + ADD_PROPERTY(PropertyInfo(Variant::INT, "count"), "set_count", "get_count"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "type"), "set_type", "get_type"); // GLTFDocument::GLTFType + ADD_PROPERTY(PropertyInfo(Variant::PACKED_FLOAT64_ARRAY, "min"), "set_min", "get_min"); // Vector<real_t> + ADD_PROPERTY(PropertyInfo(Variant::PACKED_FLOAT64_ARRAY, "max"), "set_max", "get_max"); // Vector<real_t> + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_count"), "set_sparse_count", "get_sparse_count"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_indices_buffer_view"), "set_sparse_indices_buffer_view", "get_sparse_indices_buffer_view"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_indices_byte_offset"), "set_sparse_indices_byte_offset", "get_sparse_indices_byte_offset"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_indices_component_type"), "set_sparse_indices_component_type", "get_sparse_indices_component_type"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_values_buffer_view"), "set_sparse_values_buffer_view", "get_sparse_values_buffer_view"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "sparse_values_byte_offset"), "set_sparse_values_byte_offset", "get_sparse_values_byte_offset"); // int +} + +GLTFBufferViewIndex GLTFAccessor::get_buffer_view() { + return buffer_view; +} + +void GLTFAccessor::set_buffer_view(GLTFBufferViewIndex p_buffer_view) { + buffer_view = p_buffer_view; +} + +int GLTFAccessor::get_byte_offset() { + return byte_offset; +} + +void GLTFAccessor::set_byte_offset(int p_byte_offset) { + byte_offset = p_byte_offset; +} + +int GLTFAccessor::get_component_type() { + return component_type; +} + +void GLTFAccessor::set_component_type(int p_component_type) { + component_type = p_component_type; +} + +bool GLTFAccessor::get_normalized() { + return normalized; +} + +void GLTFAccessor::set_normalized(bool p_normalized) { + normalized = p_normalized; +} + +int GLTFAccessor::get_count() { + return count; +} + +void GLTFAccessor::set_count(int p_count) { + count = p_count; +} + +int GLTFAccessor::get_type() { + return (int)type; +} + +void GLTFAccessor::set_type(int p_type) { + type = (GLTFDocument::GLTFType)p_type; // TODO: Register enum +} + +Vector<double> GLTFAccessor::get_min() { + return min; +} + +void GLTFAccessor::set_min(Vector<double> p_min) { + min = p_min; +} + +Vector<double> GLTFAccessor::get_max() { + return max; +} + +void GLTFAccessor::set_max(Vector<double> p_max) { + max = p_max; +} + +int GLTFAccessor::get_sparse_count() { + return sparse_count; +} + +void GLTFAccessor::set_sparse_count(int p_sparse_count) { + sparse_count = p_sparse_count; +} + +int GLTFAccessor::get_sparse_indices_buffer_view() { + return sparse_indices_buffer_view; +} + +void GLTFAccessor::set_sparse_indices_buffer_view(int p_sparse_indices_buffer_view) { + sparse_indices_buffer_view = p_sparse_indices_buffer_view; +} + +int GLTFAccessor::get_sparse_indices_byte_offset() { + return sparse_indices_byte_offset; +} + +void GLTFAccessor::set_sparse_indices_byte_offset(int p_sparse_indices_byte_offset) { + sparse_indices_byte_offset = p_sparse_indices_byte_offset; +} + +int GLTFAccessor::get_sparse_indices_component_type() { + return sparse_indices_component_type; +} + +void GLTFAccessor::set_sparse_indices_component_type(int p_sparse_indices_component_type) { + sparse_indices_component_type = p_sparse_indices_component_type; +} + +int GLTFAccessor::get_sparse_values_buffer_view() { + return sparse_values_buffer_view; +} + +void GLTFAccessor::set_sparse_values_buffer_view(int p_sparse_values_buffer_view) { + sparse_values_buffer_view = p_sparse_values_buffer_view; +} + +int GLTFAccessor::get_sparse_values_byte_offset() { + return sparse_values_byte_offset; +} + +void GLTFAccessor::set_sparse_values_byte_offset(int p_sparse_values_byte_offset) { + sparse_values_byte_offset = p_sparse_values_byte_offset; +} diff --git a/modules/gltf/gltf_accessor.h b/modules/gltf/gltf_accessor.h new file mode 100644 index 0000000000..7317928848 --- /dev/null +++ b/modules/gltf/gltf_accessor.h @@ -0,0 +1,104 @@ +/*************************************************************************/ +/* gltf_accessor.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_ACCESSOR_H +#define GLTF_ACCESSOR_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +struct GLTFAccessor : public Resource { + GDCLASS(GLTFAccessor, Resource); + friend class GLTFDocument; + +private: + GLTFBufferViewIndex buffer_view = 0; + int byte_offset = 0; + int component_type = 0; + bool normalized = false; + int count = 0; + GLTFDocument::GLTFType + type = GLTFDocument::TYPE_SCALAR; + Vector<double> min; + Vector<double> max; + int sparse_count = 0; + int sparse_indices_buffer_view = 0; + int sparse_indices_byte_offset = 0; + int sparse_indices_component_type = 0; + int sparse_values_buffer_view = 0; + int sparse_values_byte_offset = 0; + +protected: + static void _bind_methods(); + +public: + GLTFBufferViewIndex get_buffer_view(); + void set_buffer_view(GLTFBufferViewIndex p_buffer_view); + + int get_byte_offset(); + void set_byte_offset(int p_byte_offset); + + int get_component_type(); + void set_component_type(int p_component_type); + + bool get_normalized(); + void set_normalized(bool p_normalized); + + int get_count(); + void set_count(int p_count); + + int get_type(); + void set_type(int p_type); + + Vector<double> get_min(); + void set_min(Vector<double> p_min); + + Vector<double> get_max(); + void set_max(Vector<double> p_max); + + int get_sparse_count(); + void set_sparse_count(int p_sparse_count); + + int get_sparse_indices_buffer_view(); + void set_sparse_indices_buffer_view(int p_sparse_indices_buffer_view); + + int get_sparse_indices_byte_offset(); + void set_sparse_indices_byte_offset(int p_sparse_indices_byte_offset); + + int get_sparse_indices_component_type(); + void set_sparse_indices_component_type(int p_sparse_indices_component_type); + + int get_sparse_values_buffer_view(); + void set_sparse_values_buffer_view(int p_sparse_values_buffer_view); + + int get_sparse_values_byte_offset(); + void set_sparse_values_byte_offset(int p_sparse_values_byte_offset); +}; +#endif // GLTF_ACCESSOR_H diff --git a/modules/gltf/gltf_animation.cpp b/modules/gltf/gltf_animation.cpp new file mode 100644 index 0000000000..85ba117627 --- /dev/null +++ b/modules/gltf/gltf_animation.cpp @@ -0,0 +1,53 @@ +/*************************************************************************/ +/* gltf_animation.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_animation.h" + +void GLTFAnimation::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_loop"), &GLTFAnimation::get_loop); + ClassDB::bind_method(D_METHOD("set_loop", "loop"), &GLTFAnimation::set_loop); + + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "loop"), "set_loop", "get_loop"); // bool +} + +bool GLTFAnimation::get_loop() const { + return loop; +} + +void GLTFAnimation::set_loop(bool p_val) { + loop = p_val; +} + +Map<int, GLTFAnimation::Track> &GLTFAnimation::get_tracks() { + return tracks; +} + +GLTFAnimation::GLTFAnimation() { +} diff --git a/modules/gltf/gltf_animation.h b/modules/gltf/gltf_animation.h new file mode 100644 index 0000000000..37fd1a2007 --- /dev/null +++ b/modules/gltf/gltf_animation.h @@ -0,0 +1,74 @@ +/*************************************************************************/ +/* gltf_animation.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_ANIMATION_H +#define GLTF_ANIMATION_H + +#include "core/io/resource.h" + +class GLTFAnimation : public Resource { + GDCLASS(GLTFAnimation, Resource); + +protected: + static void _bind_methods(); + +public: + enum Interpolation { + INTERP_LINEAR, + INTERP_STEP, + INTERP_CATMULLROMSPLINE, + INTERP_CUBIC_SPLINE, + }; + + template <class T> + struct Channel { + Interpolation interpolation; + Vector<float> times; + Vector<T> values; + }; + + struct Track { + Channel<Vector3> translation_track; + Channel<Quat> rotation_track; + Channel<Vector3> scale_track; + Vector<Channel<float>> weight_tracks; + }; + +public: + bool get_loop() const; + void set_loop(bool p_val); + Map<int, GLTFAnimation::Track> &get_tracks(); + GLTFAnimation(); + +private: + bool loop = false; + Map<int, Track> tracks; +}; +#endif // GLTF_ANIMATION_H diff --git a/modules/gltf/gltf_buffer_view.cpp b/modules/gltf/gltf_buffer_view.cpp new file mode 100644 index 0000000000..edfdad40bb --- /dev/null +++ b/modules/gltf/gltf_buffer_view.cpp @@ -0,0 +1,90 @@ +/*************************************************************************/ +/* gltf_buffer_view.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_buffer_view.h" + +void GLTFBufferView::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_buffer"), &GLTFBufferView::get_buffer); + ClassDB::bind_method(D_METHOD("set_buffer", "buffer"), &GLTFBufferView::set_buffer); + ClassDB::bind_method(D_METHOD("get_byte_offset"), &GLTFBufferView::get_byte_offset); + ClassDB::bind_method(D_METHOD("set_byte_offset", "byte_offset"), &GLTFBufferView::set_byte_offset); + ClassDB::bind_method(D_METHOD("get_byte_length"), &GLTFBufferView::get_byte_length); + ClassDB::bind_method(D_METHOD("set_byte_length", "byte_length"), &GLTFBufferView::set_byte_length); + ClassDB::bind_method(D_METHOD("get_byte_stride"), &GLTFBufferView::get_byte_stride); + ClassDB::bind_method(D_METHOD("set_byte_stride", "byte_stride"), &GLTFBufferView::set_byte_stride); + ClassDB::bind_method(D_METHOD("get_indices"), &GLTFBufferView::get_indices); + ClassDB::bind_method(D_METHOD("set_indices", "indices"), &GLTFBufferView::set_indices); + + ADD_PROPERTY(PropertyInfo(Variant::INT, "buffer"), "set_buffer", "get_buffer"); // GLTFBufferIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "byte_offset"), "set_byte_offset", "get_byte_offset"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "byte_length"), "set_byte_length", "get_byte_length"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "byte_stride"), "set_byte_stride", "get_byte_stride"); // int + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "indices"), "set_indices", "get_indices"); // bool +} + +GLTFBufferIndex GLTFBufferView::get_buffer() { + return buffer; +} + +void GLTFBufferView::set_buffer(GLTFBufferIndex p_buffer) { + buffer = p_buffer; +} + +int GLTFBufferView::get_byte_offset() { + return byte_offset; +} + +void GLTFBufferView::set_byte_offset(int p_byte_offset) { + byte_offset = p_byte_offset; +} + +int GLTFBufferView::get_byte_length() { + return byte_length; +} + +void GLTFBufferView::set_byte_length(int p_byte_length) { + byte_length = p_byte_length; +} + +int GLTFBufferView::get_byte_stride() { + return byte_stride; +} + +void GLTFBufferView::set_byte_stride(int p_byte_stride) { + byte_stride = p_byte_stride; +} + +bool GLTFBufferView::get_indices() { + return indices; +} + +void GLTFBufferView::set_indices(bool p_indices) { + indices = p_indices; +} diff --git a/modules/gltf/gltf_buffer_view.h b/modules/gltf/gltf_buffer_view.h new file mode 100644 index 0000000000..0b0c8af173 --- /dev/null +++ b/modules/gltf/gltf_buffer_view.h @@ -0,0 +1,68 @@ +/*************************************************************************/ +/* gltf_buffer_view.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_BUFFER_VIEW_H +#define GLTF_BUFFER_VIEW_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +class GLTFBufferView : public Resource { + GDCLASS(GLTFBufferView, Resource); + friend class GLTFDocument; + +private: + GLTFBufferIndex buffer = -1; + int byte_offset = 0; + int byte_length = 0; + int byte_stride = -1; + bool indices = false; + +protected: + static void _bind_methods(); + +public: + GLTFBufferIndex get_buffer(); + void set_buffer(GLTFBufferIndex p_buffer); + + int get_byte_offset(); + void set_byte_offset(int p_byte_offset); + + int get_byte_length(); + void set_byte_length(int p_byte_length); + + int get_byte_stride(); + void set_byte_stride(int p_byte_stride); + + bool get_indices(); + void set_indices(bool p_indices); + // matrices need to be transformed to this +}; +#endif // GLTF_BUFFER_VIEW_H diff --git a/modules/gltf/gltf_camera.cpp b/modules/gltf/gltf_camera.cpp new file mode 100644 index 0000000000..e5cfc1cbb1 --- /dev/null +++ b/modules/gltf/gltf_camera.cpp @@ -0,0 +1,47 @@ +/*************************************************************************/ +/* gltf_camera.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_camera.h" + +void GLTFCamera::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_perspective"), &GLTFCamera::get_perspective); + ClassDB::bind_method(D_METHOD("set_perspective", "perspective"), &GLTFCamera::set_perspective); + ClassDB::bind_method(D_METHOD("get_fov_size"), &GLTFCamera::get_fov_size); + ClassDB::bind_method(D_METHOD("set_fov_size", "fov_size"), &GLTFCamera::set_fov_size); + ClassDB::bind_method(D_METHOD("get_zfar"), &GLTFCamera::get_zfar); + ClassDB::bind_method(D_METHOD("set_zfar", "zfar"), &GLTFCamera::set_zfar); + ClassDB::bind_method(D_METHOD("get_znear"), &GLTFCamera::get_znear); + ClassDB::bind_method(D_METHOD("set_znear", "znear"), &GLTFCamera::set_znear); + + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "perspective"), "set_perspective", "get_perspective"); // bool + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "fov_size"), "set_fov_size", "get_fov_size"); // float + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "zfar"), "set_zfar", "get_zfar"); // float + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "znear"), "set_znear", "get_znear"); // float +} diff --git a/modules/gltf/gltf_camera.h b/modules/gltf/gltf_camera.h new file mode 100644 index 0000000000..7e167af7be --- /dev/null +++ b/modules/gltf/gltf_camera.h @@ -0,0 +1,58 @@ +/*************************************************************************/ +/* gltf_camera.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_CAMERA_H +#define GLTF_CAMERA_H + +#include "core/io/resource.h" + +class GLTFCamera : public Resource { + GDCLASS(GLTFCamera, Resource); + +private: + bool perspective = true; + float fov_size = 75; + float zfar = 4000; + float znear = 0.05; + +protected: + static void _bind_methods(); + +public: + bool get_perspective() const { return perspective; } + void set_perspective(bool p_val) { perspective = p_val; } + float get_fov_size() const { return fov_size; } + void set_fov_size(float p_val) { fov_size = p_val; } + float get_zfar() const { return zfar; } + void set_zfar(float p_val) { zfar = p_val; } + float get_znear() const { return znear; } + void set_znear(float p_val) { znear = p_val; } +}; +#endif // GLTF_CAMERA_H diff --git a/modules/gltf/gltf_document.cpp b/modules/gltf/gltf_document.cpp new file mode 100644 index 0000000000..675f5002f7 --- /dev/null +++ b/modules/gltf/gltf_document.cpp @@ -0,0 +1,6398 @@ +/*************************************************************************/ +/* gltf_document.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_document.h" +#include "gltf_accessor.h" +#include "gltf_animation.h" +#include "gltf_camera.h" +#include "gltf_light.h" +#include "gltf_mesh.h" +#include "gltf_node.h" +#include "gltf_skeleton.h" +#include "gltf_skin.h" +#include "gltf_spec_gloss.h" +#include "gltf_state.h" +#include "gltf_texture.h" + +#include <stdio.h> +#include <stdlib.h> + +#include "core/core_bind.h" +#include "core/crypto/crypto_core.h" +#include "core/io/json.h" +#include "core/math/disjoint_set.h" +#include "core/os/file_access.h" +#include "core/variant/typed_array.h" +#include "core/version.h" +#include "core/version_hash.gen.h" +#include "drivers/png/png_driver_common.h" +#include "editor/import/resource_importer_scene.h" +#include "modules/csg/csg_shape.h" +#include "modules/gridmap/grid_map.h" +#include "modules/regex/regex.h" +#include "scene/2d/node_2d.h" +#include "scene/3d/bone_attachment_3d.h" +#include "scene/3d/camera_3d.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/3d/multimesh_instance_3d.h" +#include "scene/3d/node_3d.h" +#include "scene/3d/skeleton_3d.h" +#include "scene/animation/animation_player.h" +#include "scene/resources/surface_tool.h" +#include <limits> + +Error GLTFDocument::serialize(Ref<GLTFState> state, Node *p_root, const String &p_path) { + uint64_t begin_time = OS::get_singleton()->get_ticks_usec(); + + _convert_scene_node(state, p_root, p_root, -1, -1); + if (!state->buffers.size()) { + state->buffers.push_back(Vector<uint8_t>()); + } + + /* STEP 1 CONVERT MESH INSTANCES */ + _convert_mesh_instances(state); + + /* STEP 2 SERIALIZE CAMERAS */ + Error err = _serialize_cameras(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 3 CREATE SKINS */ + err = _serialize_skins(state); + if (err != OK) { + return Error::FAILED; + } + /* STEP 4 CREATE BONE ATTACHMENTS */ + err = _serialize_bone_attachment(state); + if (err != OK) { + return Error::FAILED; + } + /* STEP 5 SERIALIZE MESHES (we have enough info now) */ + err = _serialize_meshes(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 6 SERIALIZE TEXTURES */ + err = _serialize_materials(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 7 SERIALIZE IMAGES */ + err = _serialize_images(state, p_path); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 8 SERIALIZE TEXTURES */ + err = _serialize_textures(state); + if (err != OK) { + return Error::FAILED; + } + + // /* STEP 9 SERIALIZE ANIMATIONS */ + err = _serialize_animations(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 10 SERIALIZE ACCESSORS */ + err = _encode_accessors(state); + if (err != OK) { + return Error::FAILED; + } + + for (GLTFBufferViewIndex i = 0; i < state->buffer_views.size(); i++) { + state->buffer_views.write[i]->buffer = 0; + } + + /* STEP 11 SERIALIZE BUFFER VIEWS */ + err = _encode_buffer_views(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 12 SERIALIZE NODES */ + err = _serialize_nodes(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 13 SERIALIZE SCENE */ + err = _serialize_scenes(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 14 SERIALIZE SCENE */ + err = _serialize_lights(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 15 SERIALIZE EXTENSIONS */ + err = _serialize_extensions(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 16 SERIALIZE VERSION */ + err = _serialize_version(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 17 SERIALIZE FILE */ + err = _serialize_file(state, p_path); + if (err != OK) { + return Error::FAILED; + } + uint64_t elapsed = OS::get_singleton()->get_ticks_usec() - begin_time; + float elapsed_sec = double(elapsed) / 1000000.0; + elapsed_sec = Math::stepify(elapsed_sec, 0.01f); + print_line("glTF: Export time elapsed seconds " + rtos(elapsed_sec).pad_decimals(2)); + + return OK; +} + +Error GLTFDocument::_serialize_extensions(Ref<GLTFState> state) const { + const String texture_transform = "KHR_texture_transform"; + const String punctual_lights = "KHR_lights_punctual"; + Array extensions_used; + extensions_used.push_back(punctual_lights); + extensions_used.push_back(texture_transform); + state->json["extensionsUsed"] = extensions_used; + Array extensions_required; + extensions_required.push_back(texture_transform); + state->json["extensionsRequired"] = extensions_required; + return OK; +} + +Error GLTFDocument::_serialize_scenes(Ref<GLTFState> state) { + Array scenes; + const int loaded_scene = 0; + state->json["scene"] = loaded_scene; + + if (state->nodes.size()) { + Dictionary s; + if (!state->scene_name.empty()) { + s["name"] = state->scene_name; + } + + Array nodes; + nodes.push_back(0); + s["nodes"] = nodes; + scenes.push_back(s); + } + state->json["scenes"] = scenes; + + return OK; +} + +Error GLTFDocument::_parse_json(const String &p_path, Ref<GLTFState> state) { + Error err; + FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); + if (!f) { + return err; + } + + Vector<uint8_t> array; + array.resize(f->get_len()); + f->get_buffer(array.ptrw(), array.size()); + String text; + text.parse_utf8((const char *)array.ptr(), array.size()); + + String err_txt; + int err_line; + Variant v; + err = JSON::parse(text, v, err_txt, err_line); + if (err != OK) { + _err_print_error("", p_path.utf8().get_data(), err_line, err_txt.utf8().get_data(), ERR_HANDLER_SCRIPT); + return err; + } + state->json = v; + + return OK; +} + +Error GLTFDocument::_serialize_bone_attachment(Ref<GLTFState> state) { + for (int skeleton_i = 0; skeleton_i < state->skeletons.size(); skeleton_i++) { + for (int attachment_i = 0; attachment_i < state->skeletons[skeleton_i]->bone_attachments.size(); attachment_i++) { + BoneAttachment3D *bone_attachment = state->skeletons[skeleton_i]->bone_attachments[attachment_i]; + String bone_name = bone_attachment->get_bone_name(); + bone_name = _sanitize_bone_name(bone_name); + int32_t bone = state->skeletons[skeleton_i]->godot_skeleton->find_bone(bone_name); + ERR_CONTINUE(bone == -1); + for (int skin_i = 0; skin_i < state->skins.size(); skin_i++) { + if (state->skins[skin_i]->skeleton != skeleton_i) { + continue; + } + + for (int node_i = 0; node_i < bone_attachment->get_child_count(); node_i++) { + ERR_CONTINUE(bone >= state->skins[skin_i]->joints.size()); + _convert_scene_node(state, bone_attachment->get_child(node_i), bone_attachment->get_owner(), state->skins[skin_i]->joints[bone], 0); + } + break; + } + } + } + return OK; +} + +Error GLTFDocument::_parse_glb(const String &p_path, Ref<GLTFState> state) { + Error err; + FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); + if (!f) { + return err; + } + + uint32_t magic = f->get_32(); + ERR_FAIL_COND_V(magic != 0x46546C67, ERR_FILE_UNRECOGNIZED); //glTF + f->get_32(); // version + f->get_32(); // length + + uint32_t chunk_length = f->get_32(); + uint32_t chunk_type = f->get_32(); + + ERR_FAIL_COND_V(chunk_type != 0x4E4F534A, ERR_PARSE_ERROR); //JSON + Vector<uint8_t> json_data; + json_data.resize(chunk_length); + uint32_t len = f->get_buffer(json_data.ptrw(), chunk_length); + ERR_FAIL_COND_V(len != chunk_length, ERR_FILE_CORRUPT); + + String text; + text.parse_utf8((const char *)json_data.ptr(), json_data.size()); + + String err_txt; + int err_line; + Variant v; + err = JSON::parse(text, v, err_txt, err_line); + if (err != OK) { + _err_print_error("", p_path.utf8().get_data(), err_line, err_txt.utf8().get_data(), ERR_HANDLER_SCRIPT); + return err; + } + + state->json = v; + + //data? + + chunk_length = f->get_32(); + chunk_type = f->get_32(); + + if (f->eof_reached()) { + return OK; //all good + } + + ERR_FAIL_COND_V(chunk_type != 0x004E4942, ERR_PARSE_ERROR); //BIN + + state->glb_data.resize(chunk_length); + len = f->get_buffer(state->glb_data.ptrw(), chunk_length); + ERR_FAIL_COND_V(len != chunk_length, ERR_FILE_CORRUPT); + + return OK; +} + +static Array _vec3_to_arr(const Vector3 &p_vec3) { + Array array; + array.resize(3); + array[0] = p_vec3.x; + array[1] = p_vec3.y; + array[2] = p_vec3.z; + return array; +} + +static Vector3 _arr_to_vec3(const Array &p_array) { + ERR_FAIL_COND_V(p_array.size() != 3, Vector3()); + return Vector3(p_array[0], p_array[1], p_array[2]); +} + +static Array _quat_to_array(const Quat &p_quat) { + Array array; + array.resize(4); + array[0] = p_quat.x; + array[1] = p_quat.y; + array[2] = p_quat.z; + array[3] = p_quat.w; + return array; +} + +static Quat _arr_to_quat(const Array &p_array) { + ERR_FAIL_COND_V(p_array.size() != 4, Quat()); + return Quat(p_array[0], p_array[1], p_array[2], p_array[3]); +} + +static Transform _arr_to_xform(const Array &p_array) { + ERR_FAIL_COND_V(p_array.size() != 16, Transform()); + + Transform xform; + xform.basis.set_axis(Vector3::AXIS_X, Vector3(p_array[0], p_array[1], p_array[2])); + xform.basis.set_axis(Vector3::AXIS_Y, Vector3(p_array[4], p_array[5], p_array[6])); + xform.basis.set_axis(Vector3::AXIS_Z, Vector3(p_array[8], p_array[9], p_array[10])); + xform.set_origin(Vector3(p_array[12], p_array[13], p_array[14])); + + return xform; +} + +static Vector<real_t> _xform_to_array(const Transform p_transform) { + Vector<real_t> array; + array.resize(16); + Vector3 axis_x = p_transform.get_basis().get_axis(Vector3::AXIS_X); + array.write[0] = axis_x.x; + array.write[1] = axis_x.y; + array.write[2] = axis_x.z; + array.write[3] = 0.0f; + Vector3 axis_y = p_transform.get_basis().get_axis(Vector3::AXIS_Y); + array.write[4] = axis_y.x; + array.write[5] = axis_y.y; + array.write[6] = axis_y.z; + array.write[7] = 0.0f; + Vector3 axis_z = p_transform.get_basis().get_axis(Vector3::AXIS_Z); + array.write[8] = axis_z.x; + array.write[9] = axis_z.y; + array.write[10] = axis_z.z; + array.write[11] = 0.0f; + Vector3 origin = p_transform.get_origin(); + array.write[12] = origin.x; + array.write[13] = origin.y; + array.write[14] = origin.z; + array.write[15] = 1.0f; + return array; +} + +Error GLTFDocument::_serialize_nodes(Ref<GLTFState> state) { + Array nodes; + for (int i = 0; i < state->nodes.size(); i++) { + Dictionary node; + Ref<GLTFNode> n = state->nodes[i]; + Dictionary extensions; + node["extensions"] = extensions; + if (!n->get_name().empty()) { + node["name"] = n->get_name(); + } + if (n->camera != -1) { + node["camera"] = n->camera; + } + if (n->light != -1) { + Dictionary lights_punctual; + extensions["KHR_lights_punctual"] = lights_punctual; + lights_punctual["light"] = n->light; + } + if (n->mesh != -1) { + node["mesh"] = n->mesh; + } + if (n->skin != -1) { + node["skin"] = n->skin; + } + if (n->skeleton != -1 && n->skin < 0) { + } + if (n->xform != Transform()) { + node["matrix"] = _xform_to_array(n->xform); + } + + if (!n->rotation.is_equal_approx(Quat())) { + node["rotation"] = _quat_to_array(n->rotation); + } + + if (!n->scale.is_equal_approx(Vector3(1.0f, 1.0f, 1.0f))) { + node["scale"] = _vec3_to_arr(n->scale); + } + + if (!n->translation.is_equal_approx(Vector3())) { + node["translation"] = _vec3_to_arr(n->translation); + } + if (n->children.size()) { + Array children; + for (int j = 0; j < n->children.size(); j++) { + children.push_back(n->children[j]); + } + node["children"] = children; + } + nodes.push_back(node); + } + state->json["nodes"] = nodes; + return OK; +} + +String GLTFDocument::_sanitize_scene_name(const String &name) { + RegEx regex("([^a-zA-Z0-9_ -]+)"); + String p_name = regex.sub(name, "", true); + return p_name; +} + +String GLTFDocument::_gen_unique_name(Ref<GLTFState> state, const String &p_name) { + const String s_name = _sanitize_scene_name(p_name); + + String name; + int index = 1; + while (true) { + name = s_name; + + if (index > 1) { + name += " " + itos(index); + } + if (!state->unique_names.has(name)) { + break; + } + index++; + } + + state->unique_names.insert(name); + + return name; +} + +String GLTFDocument::_sanitize_bone_name(const String &name) { + String p_name = name.camelcase_to_underscore(true); + + RegEx pattern_nocolon(":"); + p_name = pattern_nocolon.sub(p_name, "_", true); + + RegEx pattern_noslash("/"); + p_name = pattern_noslash.sub(p_name, "_", true); + + RegEx pattern_nospace(" +"); + p_name = pattern_nospace.sub(p_name, "_", true); + + RegEx pattern_multiple("_+"); + p_name = pattern_multiple.sub(p_name, "_", true); + + RegEx pattern_padded("0+(\\d+)"); + p_name = pattern_padded.sub(p_name, "$1", true); + + return p_name; +} + +String GLTFDocument::_gen_unique_bone_name(Ref<GLTFState> state, const GLTFSkeletonIndex skel_i, const String &p_name) { + String s_name = _sanitize_bone_name(p_name); + if (s_name.empty()) { + s_name = "bone"; + } + String name; + int index = 1; + while (true) { + name = s_name; + + if (index > 1) { + name += "_" + itos(index); + } + if (!state->skeletons[skel_i]->unique_names.has(name)) { + break; + } + index++; + } + + state->skeletons.write[skel_i]->unique_names.insert(name); + + return name; +} + +Error GLTFDocument::_parse_scenes(Ref<GLTFState> state) { + ERR_FAIL_COND_V(!state->json.has("scenes"), ERR_FILE_CORRUPT); + const Array &scenes = state->json["scenes"]; + int loaded_scene = 0; + if (state->json.has("scene")) { + loaded_scene = state->json["scene"]; + } else { + WARN_PRINT("The load-time scene is not defined in the glTF2 file. Picking the first scene."); + } + + if (scenes.size()) { + ERR_FAIL_COND_V(loaded_scene >= scenes.size(), ERR_FILE_CORRUPT); + const Dictionary &s = scenes[loaded_scene]; + ERR_FAIL_COND_V(!s.has("nodes"), ERR_UNAVAILABLE); + const Array &nodes = s["nodes"]; + for (int j = 0; j < nodes.size(); j++) { + state->root_nodes.push_back(nodes[j]); + } + + if (s.has("name") && s["name"] != "") { + state->scene_name = _gen_unique_name(state, s["name"]); + } else { + state->scene_name = _gen_unique_name(state, "Scene"); + } + } + + return OK; +} + +Error GLTFDocument::_parse_nodes(Ref<GLTFState> state) { + ERR_FAIL_COND_V(!state->json.has("nodes"), ERR_FILE_CORRUPT); + const Array &nodes = state->json["nodes"]; + for (int i = 0; i < nodes.size(); i++) { + Ref<GLTFNode> node; + node.instance(); + const Dictionary &n = nodes[i]; + + if (n.has("name")) { + node->set_name(n["name"]); + } + if (n.has("camera")) { + node->camera = n["camera"]; + } + if (n.has("mesh")) { + node->mesh = n["mesh"]; + } + if (n.has("skin")) { + node->skin = n["skin"]; + } + if (n.has("matrix")) { + node->xform = _arr_to_xform(n["matrix"]); + } else { + if (n.has("translation")) { + node->translation = _arr_to_vec3(n["translation"]); + } + if (n.has("rotation")) { + node->rotation = _arr_to_quat(n["rotation"]); + } + if (n.has("scale")) { + node->scale = _arr_to_vec3(n["scale"]); + } + + node->xform.basis.set_quat_scale(node->rotation, node->scale); + node->xform.origin = node->translation; + } + + if (n.has("extensions")) { + Dictionary extensions = n["extensions"]; + if (extensions.has("KHR_lights_punctual")) { + Dictionary lights_punctual = extensions["KHR_lights_punctual"]; + if (lights_punctual.has("light")) { + GLTFLightIndex light = lights_punctual["light"]; + node->light = light; + } + } + } + + if (n.has("children")) { + const Array &children = n["children"]; + for (int j = 0; j < children.size(); j++) { + node->children.push_back(children[j]); + } + } + + state->nodes.push_back(node); + } + + // build the hierarchy + for (GLTFNodeIndex node_i = 0; node_i < state->nodes.size(); node_i++) { + for (int j = 0; j < state->nodes[node_i]->children.size(); j++) { + GLTFNodeIndex child_i = state->nodes[node_i]->children[j]; + + ERR_FAIL_INDEX_V(child_i, state->nodes.size(), ERR_FILE_CORRUPT); + ERR_CONTINUE(state->nodes[child_i]->parent != -1); //node already has a parent, wtf. + + state->nodes.write[child_i]->parent = node_i; + } + } + + _compute_node_heights(state); + + return OK; +} + +void GLTFDocument::_compute_node_heights(Ref<GLTFState> state) { + state->root_nodes.clear(); + for (GLTFNodeIndex node_i = 0; node_i < state->nodes.size(); ++node_i) { + Ref<GLTFNode> node = state->nodes[node_i]; + node->height = 0; + + GLTFNodeIndex current_i = node_i; + while (current_i >= 0) { + const GLTFNodeIndex parent_i = state->nodes[current_i]->parent; + if (parent_i >= 0) { + ++node->height; + } + current_i = parent_i; + } + + if (node->height == 0) { + state->root_nodes.push_back(node_i); + } + } +} + +static Vector<uint8_t> _parse_base64_uri(const String &uri) { + int start = uri.find(","); + ERR_FAIL_COND_V(start == -1, Vector<uint8_t>()); + + CharString substr = uri.right(start + 1).ascii(); + + int strlen = substr.length(); + + Vector<uint8_t> buf; + buf.resize(strlen / 4 * 3 + 1 + 1); + + size_t len = 0; + ERR_FAIL_COND_V(CryptoCore::b64_decode(buf.ptrw(), buf.size(), &len, (unsigned char *)substr.get_data(), strlen) != OK, Vector<uint8_t>()); + + buf.resize(len); + + return buf; +} +Error GLTFDocument::_encode_buffer_glb(Ref<GLTFState> state, const String &p_path) { + print_verbose("glTF: Total buffers: " + itos(state->buffers.size())); + + if (!state->buffers.size()) { + return OK; + } + Array buffers; + if (state->buffers.size()) { + Vector<uint8_t> buffer_data = state->buffers[0]; + Dictionary gltf_buffer; + + gltf_buffer["byteLength"] = buffer_data.size(); + buffers.push_back(gltf_buffer); + } + + for (GLTFBufferIndex i = 1; i < state->buffers.size() - 1; i++) { + Vector<uint8_t> buffer_data = state->buffers[i]; + Dictionary gltf_buffer; + String filename = p_path.get_basename().get_file() + itos(i) + ".bin"; + String path = p_path.get_base_dir() + "/" + filename; + Error err; + FileAccessRef f = FileAccess::open(path, FileAccess::WRITE, &err); + if (!f) { + return err; + } + if (buffer_data.size() == 0) { + return OK; + } + f->create(FileAccess::ACCESS_RESOURCES); + f->store_buffer(buffer_data.ptr(), buffer_data.size()); + f->close(); + gltf_buffer["uri"] = filename; + gltf_buffer["byteLength"] = buffer_data.size(); + buffers.push_back(gltf_buffer); + } + state->json["buffers"] = buffers; + + return OK; +} + +Error GLTFDocument::_encode_buffer_bins(Ref<GLTFState> state, const String &p_path) { + print_verbose("glTF: Total buffers: " + itos(state->buffers.size())); + + if (!state->buffers.size()) { + return OK; + } + Array buffers; + + for (GLTFBufferIndex i = 0; i < state->buffers.size(); i++) { + Vector<uint8_t> buffer_data = state->buffers[i]; + Dictionary gltf_buffer; + String filename = p_path.get_basename().get_file() + itos(i) + ".bin"; + String path = p_path.get_base_dir() + "/" + filename; + Error err; + FileAccessRef f = FileAccess::open(path, FileAccess::WRITE, &err); + if (!f) { + return err; + } + if (buffer_data.size() == 0) { + return OK; + } + f->create(FileAccess::ACCESS_RESOURCES); + f->store_buffer(buffer_data.ptr(), buffer_data.size()); + f->close(); + gltf_buffer["uri"] = filename; + gltf_buffer["byteLength"] = buffer_data.size(); + buffers.push_back(gltf_buffer); + } + state->json["buffers"] = buffers; + + return OK; +} + +Error GLTFDocument::_parse_buffers(Ref<GLTFState> state, const String &p_base_path) { + if (!state->json.has("buffers")) { + return OK; + } + + const Array &buffers = state->json["buffers"]; + for (GLTFBufferIndex i = 0; i < buffers.size(); i++) { + if (i == 0 && state->glb_data.size()) { + state->buffers.push_back(state->glb_data); + + } else { + const Dictionary &buffer = buffers[i]; + if (buffer.has("uri")) { + Vector<uint8_t> buffer_data; + String uri = buffer["uri"]; + + if (uri.begins_with("data:")) { // Embedded data using base64. + // Validate data MIME types and throw an error if it's one we don't know/support. + if (!uri.begins_with("data:application/octet-stream;base64") && + !uri.begins_with("data:application/gltf-buffer;base64")) { + ERR_PRINT("glTF: Got buffer with an unknown URI data type: " + uri); + } + buffer_data = _parse_base64_uri(uri); + } else { // Relative path to an external image file. + uri = p_base_path.plus_file(uri).replace("\\", "/"); // Fix for Windows. + buffer_data = FileAccess::get_file_as_array(uri); + ERR_FAIL_COND_V_MSG(buffer.size() == 0, ERR_PARSE_ERROR, "glTF: Couldn't load binary file as an array: " + uri); + } + + ERR_FAIL_COND_V(!buffer.has("byteLength"), ERR_PARSE_ERROR); + int byteLength = buffer["byteLength"]; + ERR_FAIL_COND_V(byteLength < buffer_data.size(), ERR_PARSE_ERROR); + state->buffers.push_back(buffer_data); + } + } + } + + print_verbose("glTF: Total buffers: " + itos(state->buffers.size())); + + return OK; +} + +Error GLTFDocument::_encode_buffer_views(Ref<GLTFState> state) { + Array buffers; + for (GLTFBufferViewIndex i = 0; i < state->buffer_views.size(); i++) { + Dictionary d; + + Ref<GLTFBufferView> buffer_view = state->buffer_views[i]; + + d["buffer"] = buffer_view->buffer; + d["byteLength"] = buffer_view->byte_length; + + d["byteOffset"] = buffer_view->byte_offset; + + if (buffer_view->byte_stride != -1) { + d["byteStride"] = buffer_view->byte_stride; + } + + // TODO Sparse + // d["target"] = buffer_view->indices; + + ERR_FAIL_COND_V(!d.has("buffer"), ERR_INVALID_DATA); + ERR_FAIL_COND_V(!d.has("byteLength"), ERR_INVALID_DATA); + buffers.push_back(d); + } + print_verbose("glTF: Total buffer views: " + itos(state->buffer_views.size())); + state->json["bufferViews"] = buffers; + return OK; +} + +Error GLTFDocument::_parse_buffer_views(Ref<GLTFState> state) { + ERR_FAIL_COND_V(!state->json.has("bufferViews"), ERR_FILE_CORRUPT); + const Array &buffers = state->json["bufferViews"]; + for (GLTFBufferViewIndex i = 0; i < buffers.size(); i++) { + const Dictionary &d = buffers[i]; + + Ref<GLTFBufferView> buffer_view; + buffer_view.instance(); + + ERR_FAIL_COND_V(!d.has("buffer"), ERR_PARSE_ERROR); + buffer_view->buffer = d["buffer"]; + ERR_FAIL_COND_V(!d.has("byteLength"), ERR_PARSE_ERROR); + buffer_view->byte_length = d["byteLength"]; + + if (d.has("byteOffset")) { + buffer_view->byte_offset = d["byteOffset"]; + } + + if (d.has("byteStride")) { + buffer_view->byte_stride = d["byteStride"]; + } + + if (d.has("target")) { + const int target = d["target"]; + buffer_view->indices = target == GLTFDocument::ELEMENT_ARRAY_BUFFER; + } + + state->buffer_views.push_back(buffer_view); + } + + print_verbose("glTF: Total buffer views: " + itos(state->buffer_views.size())); + + return OK; +} + +Error GLTFDocument::_encode_accessors(Ref<GLTFState> state) { + Array accessors; + for (GLTFAccessorIndex i = 0; i < state->accessors.size(); i++) { + Dictionary d; + + Ref<GLTFAccessor> accessor = state->accessors[i]; + d["componentType"] = accessor->component_type; + d["count"] = accessor->count; + d["type"] = _get_accessor_type_name(accessor->type); + d["byteOffset"] = accessor->byte_offset; + d["max"] = accessor->max; + d["min"] = accessor->min; + d["bufferView"] = accessor->buffer_view; //optional because it may be sparse... + + // Dictionary s; + // s["count"] = accessor->sparse_count; + // ERR_FAIL_COND_V(!s.has("count"), ERR_PARSE_ERROR); + + // s["indices"] = accessor->sparse_accessors; + // ERR_FAIL_COND_V(!s.has("indices"), ERR_PARSE_ERROR); + + // Dictionary si; + + // si["bufferView"] = accessor->sparse_indices_buffer_view; + + // ERR_FAIL_COND_V(!si.has("bufferView"), ERR_PARSE_ERROR); + // si["componentType"] = accessor->sparse_indices_component_type; + + // if (si.has("byteOffset")) { + // si["byteOffset"] = accessor->sparse_indices_byte_offset; + // } + + // ERR_FAIL_COND_V(!si.has("componentType"), ERR_PARSE_ERROR); + // s["indices"] = si; + // Dictionary sv; + + // sv["bufferView"] = accessor->sparse_values_buffer_view; + // if (sv.has("byteOffset")) { + // sv["byteOffset"] = accessor->sparse_values_byte_offset; + // } + // ERR_FAIL_COND_V(!sv.has("bufferView"), ERR_PARSE_ERROR); + // s["values"] = sv; + // ERR_FAIL_COND_V(!s.has("values"), ERR_PARSE_ERROR); + // d["sparse"] = s; + accessors.push_back(d); + } + + state->json["accessors"] = accessors; + ERR_FAIL_COND_V(!state->json.has("accessors"), ERR_FILE_CORRUPT); + print_verbose("glTF: Total accessors: " + itos(state->accessors.size())); + + return OK; +} + +String GLTFDocument::_get_accessor_type_name(const GLTFDocument::GLTFType p_type) { + if (p_type == GLTFDocument::TYPE_SCALAR) { + return "SCALAR"; + } + if (p_type == GLTFDocument::TYPE_VEC2) { + return "VEC2"; + } + if (p_type == GLTFDocument::TYPE_VEC3) { + return "VEC3"; + } + if (p_type == GLTFDocument::TYPE_VEC4) { + return "VEC4"; + } + + if (p_type == GLTFDocument::TYPE_MAT2) { + return "MAT2"; + } + if (p_type == GLTFDocument::TYPE_MAT3) { + return "MAT3"; + } + if (p_type == GLTFDocument::TYPE_MAT4) { + return "MAT4"; + } + ERR_FAIL_V("SCALAR"); +} + +GLTFDocument::GLTFType GLTFDocument::_get_type_from_str(const String &p_string) { + if (p_string == "SCALAR") + return GLTFDocument::TYPE_SCALAR; + + if (p_string == "VEC2") + return GLTFDocument::TYPE_VEC2; + if (p_string == "VEC3") + return GLTFDocument::TYPE_VEC3; + if (p_string == "VEC4") + return GLTFDocument::TYPE_VEC4; + + if (p_string == "MAT2") + return GLTFDocument::TYPE_MAT2; + if (p_string == "MAT3") + return GLTFDocument::TYPE_MAT3; + if (p_string == "MAT4") + return GLTFDocument::TYPE_MAT4; + + ERR_FAIL_V(GLTFDocument::TYPE_SCALAR); +} + +Error GLTFDocument::_parse_accessors(Ref<GLTFState> state) { + ERR_FAIL_COND_V(!state->json.has("accessors"), ERR_FILE_CORRUPT); + const Array &accessors = state->json["accessors"]; + for (GLTFAccessorIndex i = 0; i < accessors.size(); i++) { + const Dictionary &d = accessors[i]; + + Ref<GLTFAccessor> accessor; + accessor.instance(); + + ERR_FAIL_COND_V(!d.has("componentType"), ERR_PARSE_ERROR); + accessor->component_type = d["componentType"]; + ERR_FAIL_COND_V(!d.has("count"), ERR_PARSE_ERROR); + accessor->count = d["count"]; + ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); + accessor->type = _get_type_from_str(d["type"]); + + if (d.has("bufferView")) { + accessor->buffer_view = d["bufferView"]; //optional because it may be sparse... + } + + if (d.has("byteOffset")) { + accessor->byte_offset = d["byteOffset"]; + } + + if (d.has("max")) { + accessor->max = d["max"]; + } + + if (d.has("min")) { + accessor->min = d["min"]; + } + + if (d.has("sparse")) { + //eeh.. + + const Dictionary &s = d["sparse"]; + + ERR_FAIL_COND_V(!s.has("count"), ERR_PARSE_ERROR); + accessor->sparse_count = s["count"]; + ERR_FAIL_COND_V(!s.has("indices"), ERR_PARSE_ERROR); + const Dictionary &si = s["indices"]; + + ERR_FAIL_COND_V(!si.has("bufferView"), ERR_PARSE_ERROR); + accessor->sparse_indices_buffer_view = si["bufferView"]; + ERR_FAIL_COND_V(!si.has("componentType"), ERR_PARSE_ERROR); + accessor->sparse_indices_component_type = si["componentType"]; + + if (si.has("byteOffset")) { + accessor->sparse_indices_byte_offset = si["byteOffset"]; + } + + ERR_FAIL_COND_V(!s.has("values"), ERR_PARSE_ERROR); + const Dictionary &sv = s["values"]; + + ERR_FAIL_COND_V(!sv.has("bufferView"), ERR_PARSE_ERROR); + accessor->sparse_values_buffer_view = sv["bufferView"]; + if (sv.has("byteOffset")) { + accessor->sparse_values_byte_offset = sv["byteOffset"]; + } + } + + state->accessors.push_back(accessor); + } + + print_verbose("glTF: Total accessors: " + itos(state->accessors.size())); + + return OK; +} + +double GLTFDocument::_filter_number(double p_float) { + if (Math::is_nan(p_float)) { + return 0.0f; + } + return p_float; +} + +String GLTFDocument::_get_component_type_name(const uint32_t p_component) { + switch (p_component) { + case GLTFDocument::COMPONENT_TYPE_BYTE: + return "Byte"; + case GLTFDocument::COMPONENT_TYPE_UNSIGNED_BYTE: + return "UByte"; + case GLTFDocument::COMPONENT_TYPE_SHORT: + return "Short"; + case GLTFDocument::COMPONENT_TYPE_UNSIGNED_SHORT: + return "UShort"; + case GLTFDocument::COMPONENT_TYPE_INT: + return "Int"; + case GLTFDocument::COMPONENT_TYPE_FLOAT: + return "Float"; + } + + return "<Error>"; +} + +String GLTFDocument::_get_type_name(const GLTFType p_component) { + static const char *names[] = { + "float", + "vec2", + "vec3", + "vec4", + "mat2", + "mat3", + "mat4" + }; + + return names[p_component]; +} + +Error GLTFDocument::_encode_buffer_view(Ref<GLTFState> state, const double *src, const int count, const GLTFType type, const int component_type, const bool normalized, const int byte_offset, const bool for_vertex, GLTFBufferViewIndex &r_accessor) { + const int component_count_for_type[7] = { + 1, 2, 3, 4, 4, 9, 16 + }; + + const int component_count = component_count_for_type[type]; + const int component_size = _get_component_type_size(component_type); + ERR_FAIL_COND_V(component_size == 0, FAILED); + + int skip_every = 0; + int skip_bytes = 0; + //special case of alignments, as described in spec + switch (component_type) { + case COMPONENT_TYPE_BYTE: + case COMPONENT_TYPE_UNSIGNED_BYTE: { + if (type == TYPE_MAT2) { + skip_every = 2; + skip_bytes = 2; + } + if (type == TYPE_MAT3) { + skip_every = 3; + skip_bytes = 1; + } + } break; + case COMPONENT_TYPE_SHORT: + case COMPONENT_TYPE_UNSIGNED_SHORT: { + if (type == TYPE_MAT3) { + skip_every = 6; + skip_bytes = 4; + } + } break; + default: { + } + } + + Ref<GLTFBufferView> bv; + bv.instance(); + const uint32_t offset = bv->byte_offset = byte_offset; + Vector<uint8_t> &gltf_buffer = state->buffers.write[0]; + + int stride = _get_component_type_size(component_type); + if (for_vertex && stride % 4) { + stride += 4 - (stride % 4); //according to spec must be multiple of 4 + } + //use to debug + print_verbose("glTF: encoding type " + _get_type_name(type) + " component type: " + _get_component_type_name(component_type) + " stride: " + itos(stride) + " amount " + itos(count)); + + print_verbose("glTF: encoding accessor offset " + itos(byte_offset) + " view offset: " + itos(bv->byte_offset) + " total buffer len: " + itos(gltf_buffer.size()) + " view len " + itos(bv->byte_length)); + + const int buffer_end = (stride * (count - 1)) + _get_component_type_size(component_type); + // TODO define bv->byte_stride + bv->byte_offset = gltf_buffer.size(); + + switch (component_type) { + case COMPONENT_TYPE_BYTE: { + Vector<int8_t> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + if (normalized) { + buffer.write[dst_i] = d * 128.0; + } else { + buffer.write[dst_i] = d; + } + src++; + dst_i++; + } + } + int64_t old_size = gltf_buffer.size(); + gltf_buffer.resize(old_size + (buffer.size() * sizeof(int8_t))); + copymem(gltf_buffer.ptrw() + old_size, buffer.ptrw(), buffer.size() * sizeof(int8_t)); + bv->byte_length = buffer.size() * sizeof(int8_t); + } break; + case COMPONENT_TYPE_UNSIGNED_BYTE: { + Vector<uint8_t> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + if (normalized) { + buffer.write[dst_i] = d * 255.0; + } else { + buffer.write[dst_i] = d; + } + src++; + dst_i++; + } + } + gltf_buffer.append_array(buffer); + bv->byte_length = buffer.size() * sizeof(uint8_t); + } break; + case COMPONENT_TYPE_SHORT: { + Vector<int16_t> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + if (normalized) { + buffer.write[dst_i] = d * 32768.0; + } else { + buffer.write[dst_i] = d; + } + src++; + dst_i++; + } + } + int64_t old_size = gltf_buffer.size(); + gltf_buffer.resize(old_size + (buffer.size() * sizeof(int16_t))); + copymem(gltf_buffer.ptrw() + old_size, buffer.ptrw(), buffer.size() * sizeof(int16_t)); + bv->byte_length = buffer.size() * sizeof(int16_t); + } break; + case COMPONENT_TYPE_UNSIGNED_SHORT: { + Vector<uint16_t> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + if (normalized) { + buffer.write[dst_i] = d * 65535.0; + } else { + buffer.write[dst_i] = d; + } + src++; + dst_i++; + } + } + int64_t old_size = gltf_buffer.size(); + gltf_buffer.resize(old_size + (buffer.size() * sizeof(uint16_t))); + copymem(gltf_buffer.ptrw() + old_size, buffer.ptrw(), buffer.size() * sizeof(uint16_t)); + bv->byte_length = buffer.size() * sizeof(uint16_t); + } break; + case COMPONENT_TYPE_INT: { + Vector<int> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + buffer.write[dst_i] = d; + src++; + dst_i++; + } + } + int64_t old_size = gltf_buffer.size(); + gltf_buffer.resize(old_size + (buffer.size() * sizeof(int32_t))); + copymem(gltf_buffer.ptrw() + old_size, buffer.ptrw(), buffer.size() * sizeof(int32_t)); + bv->byte_length = buffer.size() * sizeof(int32_t); + } break; + case COMPONENT_TYPE_FLOAT: { + Vector<float> buffer; + buffer.resize(count * component_count); + int32_t dst_i = 0; + for (int i = 0; i < count; i++) { + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + dst_i += skip_bytes; + } + double d = *src; + buffer.write[dst_i] = d; + src++; + dst_i++; + } + } + int64_t old_size = gltf_buffer.size(); + gltf_buffer.resize(old_size + (buffer.size() * sizeof(float))); + copymem(gltf_buffer.ptrw() + old_size, buffer.ptrw(), buffer.size() * sizeof(float)); + bv->byte_length = buffer.size() * sizeof(float); + } break; + } + ERR_FAIL_COND_V(buffer_end > bv->byte_length, ERR_INVALID_DATA); + + ERR_FAIL_COND_V((int)(offset + buffer_end) > gltf_buffer.size(), ERR_INVALID_DATA); + r_accessor = bv->buffer = state->buffer_views.size(); + state->buffer_views.push_back(bv); + return OK; +} + +Error GLTFDocument::_decode_buffer_view(Ref<GLTFState> state, double *dst, const GLTFBufferViewIndex p_buffer_view, const int skip_every, const int skip_bytes, const int element_size, const int count, const GLTFType type, const int component_count, const int component_type, const int component_size, const bool normalized, const int byte_offset, const bool for_vertex) { + const Ref<GLTFBufferView> bv = state->buffer_views[p_buffer_view]; + + int stride = element_size; + if (bv->byte_stride != -1) { + stride = bv->byte_stride; + } + if (for_vertex && stride % 4) { + stride += 4 - (stride % 4); //according to spec must be multiple of 4 + } + + ERR_FAIL_INDEX_V(bv->buffer, state->buffers.size(), ERR_PARSE_ERROR); + + const uint32_t offset = bv->byte_offset + byte_offset; + Vector<uint8_t> buffer = state->buffers[bv->buffer]; //copy on write, so no performance hit + const uint8_t *bufptr = buffer.ptr(); + + //use to debug + print_verbose("glTF: type " + _get_type_name(type) + " component type: " + _get_component_type_name(component_type) + " stride: " + itos(stride) + " amount " + itos(count)); + print_verbose("glTF: accessor offset " + itos(byte_offset) + " view offset: " + itos(bv->byte_offset) + " total buffer len: " + itos(buffer.size()) + " view len " + itos(bv->byte_length)); + + const int buffer_end = (stride * (count - 1)) + element_size; + ERR_FAIL_COND_V(buffer_end > bv->byte_length, ERR_PARSE_ERROR); + + ERR_FAIL_COND_V((int)(offset + buffer_end) > buffer.size(), ERR_PARSE_ERROR); + + //fill everything as doubles + + for (int i = 0; i < count; i++) { + const uint8_t *src = &bufptr[offset + i * stride]; + + for (int j = 0; j < component_count; j++) { + if (skip_every && j > 0 && (j % skip_every) == 0) { + src += skip_bytes; + } + + double d = 0; + + switch (component_type) { + case COMPONENT_TYPE_BYTE: { + int8_t b = int8_t(*src); + if (normalized) { + d = (double(b) / 128.0); + } else { + d = double(b); + } + } break; + case COMPONENT_TYPE_UNSIGNED_BYTE: { + uint8_t b = *src; + if (normalized) { + d = (double(b) / 255.0); + } else { + d = double(b); + } + } break; + case COMPONENT_TYPE_SHORT: { + int16_t s = *(int16_t *)src; + if (normalized) { + d = (double(s) / 32768.0); + } else { + d = double(s); + } + } break; + case COMPONENT_TYPE_UNSIGNED_SHORT: { + uint16_t s = *(uint16_t *)src; + if (normalized) { + d = (double(s) / 65535.0); + } else { + d = double(s); + } + } break; + case COMPONENT_TYPE_INT: { + d = *(int *)src; + } break; + case COMPONENT_TYPE_FLOAT: { + d = *(float *)src; + } break; + } + + *dst++ = d; + src += component_size; + } + } + + return OK; +} + +int GLTFDocument::_get_component_type_size(const int component_type) { + switch (component_type) { + case COMPONENT_TYPE_BYTE: + case COMPONENT_TYPE_UNSIGNED_BYTE: + return 1; + break; + case COMPONENT_TYPE_SHORT: + case COMPONENT_TYPE_UNSIGNED_SHORT: + return 2; + break; + case COMPONENT_TYPE_INT: + case COMPONENT_TYPE_FLOAT: + return 4; + break; + default: { + ERR_FAIL_V(0); + } + } + return 0; +} + +Vector<double> GLTFDocument::_decode_accessor(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + //spec, for reference: + //https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#data-alignment + + ERR_FAIL_INDEX_V(p_accessor, state->accessors.size(), Vector<double>()); + + const Ref<GLTFAccessor> a = state->accessors[p_accessor]; + + const int component_count_for_type[7] = { + 1, 2, 3, 4, 4, 9, 16 + }; + + const int component_count = component_count_for_type[a->type]; + const int component_size = _get_component_type_size(a->component_type); + ERR_FAIL_COND_V(component_size == 0, Vector<double>()); + int element_size = component_count * component_size; + + int skip_every = 0; + int skip_bytes = 0; + //special case of alignments, as described in spec + switch (a->component_type) { + case COMPONENT_TYPE_BYTE: + case COMPONENT_TYPE_UNSIGNED_BYTE: { + if (a->type == TYPE_MAT2) { + skip_every = 2; + skip_bytes = 2; + element_size = 8; //override for this case + } + if (a->type == TYPE_MAT3) { + skip_every = 3; + skip_bytes = 1; + element_size = 12; //override for this case + } + } break; + case COMPONENT_TYPE_SHORT: + case COMPONENT_TYPE_UNSIGNED_SHORT: { + if (a->type == TYPE_MAT3) { + skip_every = 6; + skip_bytes = 4; + element_size = 16; //override for this case + } + } break; + default: { + } + } + + Vector<double> dst_buffer; + dst_buffer.resize(component_count * a->count); + double *dst = dst_buffer.ptrw(); + + if (a->buffer_view >= 0) { + ERR_FAIL_INDEX_V(a->buffer_view, state->buffer_views.size(), Vector<double>()); + + const Error err = _decode_buffer_view(state, dst, a->buffer_view, skip_every, skip_bytes, element_size, a->count, a->type, component_count, a->component_type, component_size, a->normalized, a->byte_offset, p_for_vertex); + if (err != OK) + return Vector<double>(); + } else { + //fill with zeros, as bufferview is not defined. + for (int i = 0; i < (a->count * component_count); i++) { + dst_buffer.write[i] = 0; + } + } + + if (a->sparse_count > 0) { + // I could not find any file using this, so this code is so far untested + Vector<double> indices; + indices.resize(a->sparse_count); + const int indices_component_size = _get_component_type_size(a->sparse_indices_component_type); + + Error err = _decode_buffer_view(state, indices.ptrw(), a->sparse_indices_buffer_view, 0, 0, indices_component_size, a->sparse_count, TYPE_SCALAR, 1, a->sparse_indices_component_type, indices_component_size, false, a->sparse_indices_byte_offset, false); + if (err != OK) + return Vector<double>(); + + Vector<double> data; + data.resize(component_count * a->sparse_count); + err = _decode_buffer_view(state, data.ptrw(), a->sparse_values_buffer_view, skip_every, skip_bytes, element_size, a->sparse_count, a->type, component_count, a->component_type, component_size, a->normalized, a->sparse_values_byte_offset, p_for_vertex); + if (err != OK) + return Vector<double>(); + + for (int i = 0; i < indices.size(); i++) { + const int write_offset = int(indices[i]) * component_count; + + for (int j = 0; j < component_count; j++) { + dst[write_offset + j] = data[i * component_count + j]; + } + } + } + + return dst_buffer; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_ints(Ref<GLTFState> state, const Vector<int32_t> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 1; + const int ret_size = p_attribs.size(); + Vector<double> attribs; + attribs.resize(ret_size); + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + attribs.write[i] = Math::stepify(p_attribs[i], 1.0); + if (i == 0) { + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = attribs[(i * element_count) + type_i]; + type_min.write[type_i] = attribs[(i * element_count) + type_i]; + } + } + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = MAX(attribs[(i * element_count) + type_i], type_max[type_i]); + type_min.write[type_i] = MIN(attribs[(i * element_count) + type_i], type_min[type_i]); + type_max.write[type_i] = _filter_number(type_max.write[type_i]); + type_min.write[type_i] = _filter_number(type_min.write[type_i]); + } + } + + ERR_FAIL_COND_V(attribs.size() == 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_SCALAR; + const int component_type = GLTFDocument::COMPONENT_TYPE_INT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = ret_size; + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +Vector<int> GLTFDocument::_decode_accessor_as_ints(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<int> ret; + + if (attribs.size() == 0) + return ret; + + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size(); + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = int(attribs_ptr[i]); + } + } + return ret; +} + +Vector<float> GLTFDocument::_decode_accessor_as_floats(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<float> ret; + + if (attribs.size() == 0) + return ret; + + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size(); + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = float(attribs_ptr[i]); + } + } + return ret; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_vec2(Ref<GLTFState> state, const Vector<Vector2> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 2; + + const int ret_size = p_attribs.size() * element_count; + Vector<double> attribs; + attribs.resize(ret_size); + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + + for (int i = 0; i < p_attribs.size(); i++) { + Vector2 attrib = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(attrib.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(attrib.y, CMP_NORMALIZE_TOLERANCE); + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC2; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_color(Ref<GLTFState> state, const Vector<Color> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + + const int ret_size = p_attribs.size() * 4; + Vector<double> attribs; + attribs.resize(ret_size); + + const int element_count = 4; + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Color attrib = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(attrib.r, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(attrib.g, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 2] = Math::stepify(attrib.b, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 3] = Math::stepify(attrib.a, CMP_NORMALIZE_TOLERANCE); + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC4; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +void GLTFDocument::_calc_accessor_min_max(int i, const int element_count, Vector<double> &type_max, Vector<double> attribs, Vector<double> &type_min) { + if (i == 0) { + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = attribs[(i * element_count) + type_i]; + type_min.write[type_i] = attribs[(i * element_count) + type_i]; + } + } + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = MAX(attribs[(i * element_count) + type_i], type_max[type_i]); + type_min.write[type_i] = MIN(attribs[(i * element_count) + type_i], type_min[type_i]); + type_max.write[type_i] = _filter_number(type_max.write[type_i]); + type_min.write[type_i] = _filter_number(type_min.write[type_i]); + } +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_weights(Ref<GLTFState> state, const Vector<Color> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + + const int ret_size = p_attribs.size() * 4; + Vector<double> attribs; + attribs.resize(ret_size); + + const int element_count = 4; + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Color attrib = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(attrib.r, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(attrib.g, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 2] = Math::stepify(attrib.b, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 3] = Math::stepify(attrib.a, CMP_NORMALIZE_TOLERANCE); + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC4; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_joints(Ref<GLTFState> state, const Vector<Color> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + + const int element_count = 4; + const int ret_size = p_attribs.size() * element_count; + Vector<double> attribs; + attribs.resize(ret_size); + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Color attrib = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(attrib.r, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(attrib.g, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 2] = Math::stepify(attrib.b, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 3] = Math::stepify(attrib.a, CMP_NORMALIZE_TOLERANCE); + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC4; + const int component_type = GLTFDocument::COMPONENT_TYPE_UNSIGNED_SHORT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_quats(Ref<GLTFState> state, const Vector<Quat> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 4; + + const int ret_size = p_attribs.size() * element_count; + Vector<double> attribs; + attribs.resize(ret_size); + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Quat quat = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(quat.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(quat.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 2] = Math::stepify(quat.z, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 3] = Math::stepify(quat.w, CMP_NORMALIZE_TOLERANCE); + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC4; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +Vector<Vector2> GLTFDocument::_decode_accessor_as_vec2(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Vector2> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 2 != 0, ret); + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size() / 2; + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = Vector2(attribs_ptr[i * 2 + 0], attribs_ptr[i * 2 + 1]); + } + } + return ret; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_floats(Ref<GLTFState> state, const Vector<real_t> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 1; + const int ret_size = p_attribs.size(); + Vector<double> attribs; + attribs.resize(ret_size); + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + + for (int i = 0; i < p_attribs.size(); i++) { + attribs.write[i] = Math::stepify(p_attribs[i], CMP_NORMALIZE_TOLERANCE); + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + + ERR_FAIL_COND_V(!attribs.size(), -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_SCALAR; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = ret_size; + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_vec3(Ref<GLTFState> state, const Vector<Vector3> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 3; + const int ret_size = p_attribs.size() * element_count; + Vector<double> attribs; + attribs.resize(ret_size); + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Vector3 attrib = p_attribs[i]; + attribs.write[(i * element_count) + 0] = Math::stepify(attrib.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 1] = Math::stepify(attrib.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[(i * element_count) + 2] = Math::stepify(attrib.z, CMP_NORMALIZE_TOLERANCE); + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_VEC3; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +GLTFAccessorIndex GLTFDocument::_encode_accessor_as_xform(Ref<GLTFState> state, const Vector<Transform> p_attribs, const bool p_for_vertex) { + if (p_attribs.size() == 0) { + return -1; + } + const int element_count = 16; + const int ret_size = p_attribs.size() * element_count; + Vector<double> attribs; + attribs.resize(ret_size); + + Vector<double> type_max; + type_max.resize(element_count); + Vector<double> type_min; + type_min.resize(element_count); + for (int i = 0; i < p_attribs.size(); i++) { + Transform attrib = p_attribs[i]; + Basis basis = attrib.get_basis(); + Vector3 axis_0 = basis.get_axis(Vector3::AXIS_X); + + attribs.write[i * element_count + 0] = Math::stepify(axis_0.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 1] = Math::stepify(axis_0.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 2] = Math::stepify(axis_0.z, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 3] = 0.0; + + Vector3 axis_1 = basis.get_axis(Vector3::AXIS_Y); + attribs.write[i * element_count + 4] = Math::stepify(axis_1.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 5] = Math::stepify(axis_1.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 6] = Math::stepify(axis_1.z, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 7] = 0.0; + + Vector3 axis_2 = basis.get_axis(Vector3::AXIS_Z); + attribs.write[i * element_count + 8] = Math::stepify(axis_2.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 9] = Math::stepify(axis_2.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 10] = Math::stepify(axis_2.z, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 11] = 0.0; + + Vector3 origin = attrib.get_origin(); + attribs.write[i * element_count + 12] = Math::stepify(origin.x, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 13] = Math::stepify(origin.y, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 14] = Math::stepify(origin.z, CMP_NORMALIZE_TOLERANCE); + attribs.write[i * element_count + 15] = 1.0; + + _calc_accessor_min_max(i, element_count, type_max, attribs, type_min); + } + ERR_FAIL_COND_V(attribs.size() % element_count != 0, -1); + + Ref<GLTFAccessor> accessor; + accessor.instance(); + GLTFBufferIndex buffer_view_i; + int64_t size = state->buffers[0].size(); + const GLTFDocument::GLTFType type = GLTFDocument::TYPE_MAT4; + const int component_type = GLTFDocument::COMPONENT_TYPE_FLOAT; + + accessor->max = type_max; + accessor->min = type_min; + accessor->normalized = false; + accessor->count = p_attribs.size(); + accessor->type = type; + accessor->component_type = component_type; + accessor->byte_offset = 0; + Error err = _encode_buffer_view(state, attribs.ptr(), p_attribs.size(), type, component_type, accessor->normalized, size, p_for_vertex, buffer_view_i); + if (err != OK) { + return -1; + } + accessor->buffer_view = buffer_view_i; + state->accessors.push_back(accessor); + return state->accessors.size() - 1; +} + +Vector<Vector3> GLTFDocument::_decode_accessor_as_vec3(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Vector3> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 3 != 0, ret); + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size() / 3; + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = Vector3(attribs_ptr[i * 3 + 0], attribs_ptr[i * 3 + 1], attribs_ptr[i * 3 + 2]); + } + } + return ret; +} + +Vector<Color> GLTFDocument::_decode_accessor_as_color(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Color> ret; + + if (attribs.size() == 0) + return ret; + + const int type = state->accessors[p_accessor]->type; + ERR_FAIL_COND_V(!(type == TYPE_VEC3 || type == TYPE_VEC4), ret); + int vec_len = 3; + if (type == TYPE_VEC4) { + vec_len = 4; + } + + ERR_FAIL_COND_V(attribs.size() % vec_len != 0, ret); + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size() / vec_len; + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = Color(attribs_ptr[i * vec_len + 0], attribs_ptr[i * vec_len + 1], attribs_ptr[i * vec_len + 2], vec_len == 4 ? attribs_ptr[i * 4 + 3] : 1.0); + } + } + return ret; +} +Vector<Quat> GLTFDocument::_decode_accessor_as_quat(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Quat> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 4 != 0, ret); + const double *attribs_ptr = attribs.ptr(); + const int ret_size = attribs.size() / 4; + ret.resize(ret_size); + { + for (int i = 0; i < ret_size; i++) { + ret.write[i] = Quat(attribs_ptr[i * 4 + 0], attribs_ptr[i * 4 + 1], attribs_ptr[i * 4 + 2], attribs_ptr[i * 4 + 3]).normalized(); + } + } + return ret; +} +Vector<Transform2D> GLTFDocument::_decode_accessor_as_xform2d(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Transform2D> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 4 != 0, ret); + ret.resize(attribs.size() / 4); + for (int i = 0; i < ret.size(); i++) { + ret.write[i][0] = Vector2(attribs[i * 4 + 0], attribs[i * 4 + 1]); + ret.write[i][1] = Vector2(attribs[i * 4 + 2], attribs[i * 4 + 3]); + } + return ret; +} + +Vector<Basis> GLTFDocument::_decode_accessor_as_basis(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Basis> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 9 != 0, ret); + ret.resize(attribs.size() / 9); + for (int i = 0; i < ret.size(); i++) { + ret.write[i].set_axis(0, Vector3(attribs[i * 9 + 0], attribs[i * 9 + 1], attribs[i * 9 + 2])); + ret.write[i].set_axis(1, Vector3(attribs[i * 9 + 3], attribs[i * 9 + 4], attribs[i * 9 + 5])); + ret.write[i].set_axis(2, Vector3(attribs[i * 9 + 6], attribs[i * 9 + 7], attribs[i * 9 + 8])); + } + return ret; +} + +Vector<Transform> GLTFDocument::_decode_accessor_as_xform(Ref<GLTFState> state, const GLTFAccessorIndex p_accessor, const bool p_for_vertex) { + const Vector<double> attribs = _decode_accessor(state, p_accessor, p_for_vertex); + Vector<Transform> ret; + + if (attribs.size() == 0) + return ret; + + ERR_FAIL_COND_V(attribs.size() % 16 != 0, ret); + ret.resize(attribs.size() / 16); + for (int i = 0; i < ret.size(); i++) { + ret.write[i].basis.set_axis(0, Vector3(attribs[i * 16 + 0], attribs[i * 16 + 1], attribs[i * 16 + 2])); + ret.write[i].basis.set_axis(1, Vector3(attribs[i * 16 + 4], attribs[i * 16 + 5], attribs[i * 16 + 6])); + ret.write[i].basis.set_axis(2, Vector3(attribs[i * 16 + 8], attribs[i * 16 + 9], attribs[i * 16 + 10])); + ret.write[i].set_origin(Vector3(attribs[i * 16 + 12], attribs[i * 16 + 13], attribs[i * 16 + 14])); + } + return ret; +} + +Error GLTFDocument::_serialize_meshes(Ref<GLTFState> state) { + Array meshes; + for (GLTFMeshIndex gltf_mesh_i = 0; gltf_mesh_i < state->meshes.size(); gltf_mesh_i++) { + print_verbose("glTF: Serializing mesh: " + itos(gltf_mesh_i)); + Ref<EditorSceneImporterMesh> import_mesh = state->meshes.write[gltf_mesh_i]->get_mesh(); + if (import_mesh.is_null()) { + continue; + } + Array primitives; + Array targets; + Dictionary gltf_mesh; + Array target_names; + Array weights; + for (int surface_i = 0; surface_i < import_mesh->get_surface_count(); surface_i++) { + Dictionary primitive; + Mesh::PrimitiveType primitive_type = import_mesh->get_surface_primitive_type(surface_i); + switch (primitive_type) { + case Mesh::PRIMITIVE_POINTS: { + primitive["mode"] = 0; + break; + } + case Mesh::PRIMITIVE_LINES: { + primitive["mode"] = 1; + break; + } + // case Mesh::PRIMITIVE_LINE_LOOP: { + // primitive["mode"] = 2; + // break; + // } + case Mesh::PRIMITIVE_LINE_STRIP: { + primitive["mode"] = 3; + break; + } + case Mesh::PRIMITIVE_TRIANGLES: { + primitive["mode"] = 4; + break; + } + case Mesh::PRIMITIVE_TRIANGLE_STRIP: { + primitive["mode"] = 5; + break; + } + // case Mesh::PRIMITIVE_TRIANGLE_FAN: { + // primitive["mode"] = 6; + // break; + // } + default: { + ERR_FAIL_V(FAILED); + } + } + + Array array = import_mesh->get_surface_arrays(surface_i); + Dictionary attributes; + { + Vector<Vector3> a = array[Mesh::ARRAY_VERTEX]; + ERR_FAIL_COND_V(!a.size(), ERR_INVALID_DATA); + attributes["POSITION"] = _encode_accessor_as_vec3(state, a, true); + } + { + Vector<real_t> a = array[Mesh::ARRAY_TANGENT]; + if (a.size()) { + const int ret_size = a.size() / 4; + Vector<Color> attribs; + attribs.resize(ret_size); + for (int i = 0; i < ret_size; i++) { + Color out; + out.r = a[(i * 4) + 0]; + out.g = a[(i * 4) + 1]; + out.b = a[(i * 4) + 2]; + out.a = a[(i * 4) + 3]; + attribs.write[i] = out; + } + attributes["TANGENT"] = _encode_accessor_as_color(state, attribs, true); + } + } + { + Vector<Vector3> a = array[Mesh::ARRAY_NORMAL]; + if (a.size()) { + const int ret_size = a.size(); + Vector<Vector3> attribs; + attribs.resize(ret_size); + for (int i = 0; i < ret_size; i++) { + attribs.write[i] = Vector3(a[i]).normalized(); + } + attributes["NORMAL"] = _encode_accessor_as_vec3(state, attribs, true); + } + } + { + Vector<Vector2> a = array[Mesh::ARRAY_TEX_UV]; + if (a.size()) { + attributes["TEXCOORD_0"] = _encode_accessor_as_vec2(state, a, true); + } + } + { + Vector<Vector2> a = array[Mesh::ARRAY_TEX_UV2]; + if (a.size()) { + attributes["TEXCOORD_1"] = _encode_accessor_as_vec2(state, a, true); + } + } + { + Vector<Color> a = array[Mesh::ARRAY_COLOR]; + if (a.size()) { + attributes["COLOR_0"] = _encode_accessor_as_color(state, a, true); + } + } + Map<int, int> joint_i_to_bone_i; + for (GLTFNodeIndex node_i = 0; node_i < state->nodes.size(); node_i++) { + GLTFSkinIndex skin_i = -1; + if (state->nodes[node_i]->mesh == gltf_mesh_i) { + skin_i = state->nodes[node_i]->skin; + } + if (skin_i != -1) { + joint_i_to_bone_i = state->skins[skin_i]->joint_i_to_bone_i; + break; + } + } + { + Array a = array[Mesh::ARRAY_BONES]; + if (a.size()) { + const int ret_size = a.size() / 4; + Vector<Color> attribs; + attribs.resize(ret_size); + { + for (int array_i = 0; array_i < attribs.size(); array_i++) { + int32_t joint_0 = a[(array_i * 4) + 0]; + int32_t joint_1 = a[(array_i * 4) + 1]; + int32_t joint_2 = a[(array_i * 4) + 2]; + int32_t joint_3 = a[(array_i * 4) + 3]; + attribs.write[array_i] = Color(joint_0, joint_1, joint_2, joint_3); + } + } + attributes["JOINTS_0"] = _encode_accessor_as_joints(state, attribs, true); + } + } + { + Array a = array[Mesh::ARRAY_WEIGHTS]; + if (a.size()) { + const int ret_size = a.size() / 4; + Vector<Color> attribs; + attribs.resize(ret_size); + for (int i = 0; i < ret_size; i++) { + attribs.write[i] = Color(a[(i * 4) + 0], a[(i * 4) + 1], a[(i * 4) + 2], a[(i * 4) + 3]); + } + attributes["WEIGHTS_0"] = _encode_accessor_as_weights(state, attribs, true); + } + } + { + Vector<int32_t> mesh_indices = array[Mesh::ARRAY_INDEX]; + if (mesh_indices.size()) { + if (primitive_type == Mesh::PRIMITIVE_TRIANGLES) { + //swap around indices, convert ccw to cw for front face + const int is = mesh_indices.size(); + for (int k = 0; k < is; k += 3) { + SWAP(mesh_indices.write[k + 0], mesh_indices.write[k + 2]); + } + } + primitive["indices"] = _encode_accessor_as_ints(state, mesh_indices, true); + } else { + if (primitive_type == Mesh::PRIMITIVE_TRIANGLES) { + //generate indices because they need to be swapped for CW/CCW + const Vector<Vector3> &vertices = array[Mesh::ARRAY_VERTEX]; + Ref<SurfaceTool> st; + st.instance(); + st->create_from_triangle_arrays(array); + st->index(); + Vector<int32_t> generated_indices = st->commit_to_arrays()[Mesh::ARRAY_INDEX]; + const int vs = vertices.size(); + generated_indices.resize(vs); + { + for (int k = 0; k < vs; k += 3) { + generated_indices.write[k] = k; + generated_indices.write[k + 1] = k + 2; + generated_indices.write[k + 2] = k + 1; + } + } + primitive["indices"] = _encode_accessor_as_ints(state, generated_indices, true); + } + } + } + + primitive["attributes"] = attributes; + + //blend shapes + print_verbose("glTF: Mesh has targets"); + if (import_mesh->get_blend_shape_count()) { + ArrayMesh::BlendShapeMode shape_mode = import_mesh->get_blend_shape_mode(); + for (int morph_i = 0; morph_i < import_mesh->get_blend_shape_count(); morph_i++) { + Array array_morph = import_mesh->get_surface_blend_shape_arrays(surface_i, morph_i); + target_names.push_back(import_mesh->get_blend_shape_name(morph_i)); + Dictionary t; + Vector<Vector3> varr = array_morph[Mesh::ARRAY_VERTEX]; + Array mesh_arrays = import_mesh->get_surface_arrays(surface_i); + if (varr.size()) { + Vector<Vector3> src_varr = array[Mesh::ARRAY_VERTEX]; + if (shape_mode == ArrayMesh::BlendShapeMode::BLEND_SHAPE_MODE_NORMALIZED) { + const int max_idx = src_varr.size(); + for (int blend_i = 0; blend_i < max_idx; blend_i++) { + varr.write[blend_i] = Vector3(varr[blend_i]) - src_varr[blend_i]; + } + } + + t["POSITION"] = _encode_accessor_as_vec3(state, varr, true); + } + + Vector<Vector3> narr = array_morph[Mesh::ARRAY_NORMAL]; + if (varr.size()) { + t["NORMAL"] = _encode_accessor_as_vec3(state, narr, true); + } + Vector<real_t> tarr = array_morph[Mesh::ARRAY_TANGENT]; + if (tarr.size()) { + const int ret_size = tarr.size() / 4; + Vector<Color> attribs; + attribs.resize(ret_size); + for (int i = 0; i < ret_size; i++) { + Color tangent; + tangent.r = tarr[(i * 4) + 0]; + tangent.r = tarr[(i * 4) + 1]; + tangent.r = tarr[(i * 4) + 2]; + tangent.r = tarr[(i * 4) + 3]; + } + t["TANGENT"] = _encode_accessor_as_color(state, attribs, true); + } + targets.push_back(t); + } + } + + Ref<BaseMaterial3D> mat = import_mesh->get_surface_material(surface_i); + if (mat.is_valid()) { + Map<Ref<BaseMaterial3D>, GLTFMaterialIndex>::Element *material_cache_i = state->material_cache.find(mat); + if (material_cache_i && material_cache_i->get() != -1) { + primitive["material"] = material_cache_i->get(); + } else { + GLTFMaterialIndex mat_i = state->materials.size(); + state->materials.push_back(mat); + primitive["material"] = mat_i; + state->material_cache.insert(mat, mat_i); + } + } + + if (targets.size()) { + primitive["targets"] = targets; + } + + primitives.push_back(primitive); + } + + Dictionary e; + e["targetNames"] = target_names; + + for (int j = 0; j < target_names.size(); j++) { + real_t weight = 0; + if (j < state->meshes.write[gltf_mesh_i]->get_blend_weights().size()) { + weight = state->meshes.write[gltf_mesh_i]->get_blend_weights()[j]; + } + weights.push_back(weight); + } + if (weights.size()) { + gltf_mesh["weights"] = weights; + } + + ERR_FAIL_COND_V(target_names.size() != weights.size(), FAILED); + + gltf_mesh["extras"] = e; + + gltf_mesh["primitives"] = primitives; + + meshes.push_back(gltf_mesh); + } + + state->json["meshes"] = meshes; + print_verbose("glTF: Total meshes: " + itos(meshes.size())); + + return OK; +} + +Error GLTFDocument::_parse_meshes(Ref<GLTFState> state) { + if (!state->json.has("meshes")) { + return OK; + } + + Array meshes = state->json["meshes"]; + for (GLTFMeshIndex i = 0; i < meshes.size(); i++) { + print_verbose("glTF: Parsing mesh: " + itos(i)); + Dictionary d = meshes[i]; + + Ref<GLTFMesh> mesh; + mesh.instance(); + bool has_vertex_color = false; + + ERR_FAIL_COND_V(!d.has("primitives"), ERR_PARSE_ERROR); + + Array primitives = d["primitives"]; + const Dictionary &extras = d.has("extras") ? (Dictionary)d["extras"] : Dictionary(); + Ref<EditorSceneImporterMesh> import_mesh; + import_mesh.instance(); + for (int j = 0; j < primitives.size(); j++) { + Dictionary p = primitives[j]; + + Array array; + array.resize(Mesh::ARRAY_MAX); + + ERR_FAIL_COND_V(!p.has("attributes"), ERR_PARSE_ERROR); + + Dictionary a = p["attributes"]; + + Mesh::PrimitiveType primitive = Mesh::PRIMITIVE_TRIANGLES; + if (p.has("mode")) { + const int mode = p["mode"]; + ERR_FAIL_INDEX_V(mode, 7, ERR_FILE_CORRUPT); + static const Mesh::PrimitiveType primitives2[7] = { + Mesh::PRIMITIVE_POINTS, + Mesh::PRIMITIVE_LINES, + Mesh::PRIMITIVE_LINES, //loop not supported, should ce converted + Mesh::PRIMITIVE_LINES, + Mesh::PRIMITIVE_TRIANGLES, + Mesh::PRIMITIVE_TRIANGLE_STRIP, + Mesh::PRIMITIVE_TRIANGLES, //fan not supported, should be converted +#ifndef _MSC_VER +#warning line loop and triangle fan are not supported and need to be converted to lines and triangles +#endif + + }; + + primitive = primitives2[mode]; + } + + ERR_FAIL_COND_V(!a.has("POSITION"), ERR_PARSE_ERROR); + if (a.has("POSITION")) { + array[Mesh::ARRAY_VERTEX] = _decode_accessor_as_vec3(state, a["POSITION"], true); + } + if (a.has("NORMAL")) { + array[Mesh::ARRAY_NORMAL] = _decode_accessor_as_vec3(state, a["NORMAL"], true); + } + if (a.has("TANGENT")) { + array[Mesh::ARRAY_TANGENT] = _decode_accessor_as_floats(state, a["TANGENT"], true); + } + if (a.has("TEXCOORD_0")) { + array[Mesh::ARRAY_TEX_UV] = _decode_accessor_as_vec2(state, a["TEXCOORD_0"], true); + } + if (a.has("TEXCOORD_1")) { + array[Mesh::ARRAY_TEX_UV2] = _decode_accessor_as_vec2(state, a["TEXCOORD_1"], true); + } + if (a.has("COLOR_0")) { + array[Mesh::ARRAY_COLOR] = _decode_accessor_as_color(state, a["COLOR_0"], true); + has_vertex_color = true; + } + if (a.has("JOINTS_0")) { + array[Mesh::ARRAY_BONES] = _decode_accessor_as_ints(state, a["JOINTS_0"], true); + } + if (a.has("WEIGHTS_0")) { + Vector<float> weights = _decode_accessor_as_floats(state, a["WEIGHTS_0"], true); + { //gltf does not seem to normalize the weights for some reason.. + int wc = weights.size(); + float *w = weights.ptrw(); + + for (int k = 0; k < wc; k += 4) { + float total = 0.0; + total += w[k + 0]; + total += w[k + 1]; + total += w[k + 2]; + total += w[k + 3]; + if (total > 0.0) { + w[k + 0] /= total; + w[k + 1] /= total; + w[k + 2] /= total; + w[k + 3] /= total; + } + } + } + array[Mesh::ARRAY_WEIGHTS] = weights; + } + + if (p.has("indices")) { + Vector<int> indices = _decode_accessor_as_ints(state, p["indices"], false); + + if (primitive == Mesh::PRIMITIVE_TRIANGLES) { + //swap around indices, convert ccw to cw for front face + + const int is = indices.size(); + int *w = indices.ptrw(); + for (int k = 0; k < is; k += 3) { + SWAP(w[k + 1], w[k + 2]); + } + } + array[Mesh::ARRAY_INDEX] = indices; + + } else if (primitive == Mesh::PRIMITIVE_TRIANGLES) { + //generate indices because they need to be swapped for CW/CCW + const Vector<Vector3> &vertices = array[Mesh::ARRAY_VERTEX]; + ERR_FAIL_COND_V(vertices.size() == 0, ERR_PARSE_ERROR); + Vector<int> indices; + const int vs = vertices.size(); + indices.resize(vs); + { + int *w = indices.ptrw(); + for (int k = 0; k < vs; k += 3) { + w[k] = k; + w[k + 1] = k + 2; + w[k + 2] = k + 1; + } + } + array[Mesh::ARRAY_INDEX] = indices; + } + + bool generate_tangents = (primitive == Mesh::PRIMITIVE_TRIANGLES && !a.has("TANGENT") && a.has("TEXCOORD_0") && a.has("NORMAL")); + + if (generate_tangents) { + //must generate mikktspace tangents.. ergh.. + Ref<SurfaceTool> st; + st.instance(); + st->create_from_triangle_arrays(array); + st->generate_tangents(); + array = st->commit_to_arrays(); + } + + Array morphs; + //blend shapes + if (p.has("targets")) { + print_verbose("glTF: Mesh has targets"); + const Array &targets = p["targets"]; + + //ideally BLEND_SHAPE_MODE_RELATIVE since gltf2 stores in displacement + //but it could require a larger refactor? + import_mesh->set_blend_shape_mode(Mesh::BLEND_SHAPE_MODE_NORMALIZED); + + if (j == 0) { + const Array &target_names = extras.has("targetNames") ? (Array)extras["targetNames"] : Array(); + for (int k = 0; k < targets.size(); k++) { + const String name = k < target_names.size() ? (String)target_names[k] : String("morph_") + itos(k); + import_mesh->add_blend_shape(name); + } + } + + for (int k = 0; k < targets.size(); k++) { + const Dictionary &t = targets[k]; + + Array array_copy; + array_copy.resize(Mesh::ARRAY_MAX); + + for (int l = 0; l < Mesh::ARRAY_MAX; l++) { + array_copy[l] = array[l]; + } + + array_copy[Mesh::ARRAY_INDEX] = Variant(); + + if (t.has("POSITION")) { + Vector<Vector3> varr = _decode_accessor_as_vec3(state, t["POSITION"], true); + const Vector<Vector3> src_varr = array[Mesh::ARRAY_VERTEX]; + const int size = src_varr.size(); + ERR_FAIL_COND_V(size == 0, ERR_PARSE_ERROR); + { + const int max_idx = varr.size(); + varr.resize(size); + + Vector3 *w_varr = varr.ptrw(); + const Vector3 *r_varr = varr.ptr(); + const Vector3 *r_src_varr = src_varr.ptr(); + for (int l = 0; l < size; l++) { + if (l < max_idx) { + w_varr[l] = r_varr[l] + r_src_varr[l]; + } else { + w_varr[l] = r_src_varr[l]; + } + } + } + array_copy[Mesh::ARRAY_VERTEX] = varr; + } + if (t.has("NORMAL")) { + Vector<Vector3> narr = _decode_accessor_as_vec3(state, t["NORMAL"], true); + const Vector<Vector3> src_narr = array[Mesh::ARRAY_NORMAL]; + int size = src_narr.size(); + ERR_FAIL_COND_V(size == 0, ERR_PARSE_ERROR); + { + int max_idx = narr.size(); + narr.resize(size); + + Vector3 *w_narr = narr.ptrw(); + const Vector3 *r_narr = narr.ptr(); + const Vector3 *r_src_narr = src_narr.ptr(); + for (int l = 0; l < size; l++) { + if (l < max_idx) { + w_narr[l] = r_narr[l] + r_src_narr[l]; + } else { + w_narr[l] = r_src_narr[l]; + } + } + } + array_copy[Mesh::ARRAY_NORMAL] = narr; + } + if (t.has("TANGENT")) { + const Vector<Vector3> tangents_v3 = _decode_accessor_as_vec3(state, t["TANGENT"], true); + const Vector<float> src_tangents = array[Mesh::ARRAY_TANGENT]; + ERR_FAIL_COND_V(src_tangents.size() == 0, ERR_PARSE_ERROR); + + Vector<float> tangents_v4; + + { + int max_idx = tangents_v3.size(); + + int size4 = src_tangents.size(); + tangents_v4.resize(size4); + float *w4 = tangents_v4.ptrw(); + + const Vector3 *r3 = tangents_v3.ptr(); + const float *r4 = src_tangents.ptr(); + + for (int l = 0; l < size4 / 4; l++) { + if (l < max_idx) { + w4[l * 4 + 0] = r3[l].x + r4[l * 4 + 0]; + w4[l * 4 + 1] = r3[l].y + r4[l * 4 + 1]; + w4[l * 4 + 2] = r3[l].z + r4[l * 4 + 2]; + } else { + w4[l * 4 + 0] = r4[l * 4 + 0]; + w4[l * 4 + 1] = r4[l * 4 + 1]; + w4[l * 4 + 2] = r4[l * 4 + 2]; + } + w4[l * 4 + 3] = r4[l * 4 + 3]; //copy flip value + } + } + + array_copy[Mesh::ARRAY_TANGENT] = tangents_v4; + } + + if (generate_tangents) { + Ref<SurfaceTool> st; + st.instance(); + st->create_from_triangle_arrays(array_copy); + st->deindex(); + st->generate_tangents(); + array_copy = st->commit_to_arrays(); + } + + morphs.push_back(array_copy); + } + } + + //just add it + + Ref<BaseMaterial3D> mat; + if (p.has("material")) { + const int material = p["material"]; + ERR_FAIL_INDEX_V(material, state->materials.size(), ERR_FILE_CORRUPT); + Ref<BaseMaterial3D> mat3d = state->materials[material]; + if (has_vertex_color) { + mat3d->set_flag(BaseMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); + } + mat = mat3d; + + } else if (has_vertex_color) { + Ref<StandardMaterial3D> mat3d; + mat3d.instance(); + mat3d->set_flag(BaseMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); + mat = mat3d; + } + + import_mesh->add_surface(primitive, array, morphs, Dictionary(), mat); + } + + Vector<float> blend_weights; + blend_weights.resize(import_mesh->get_blend_shape_count()); + for (int32_t weight_i = 0; weight_i < blend_weights.size(); weight_i++) { + blend_weights.write[weight_i] = 0.0f; + } + + if (d.has("weights")) { + const Array &weights = d["weights"]; + for (int j = 0; j < weights.size(); j++) { + blend_weights.write[j] = weights[j]; + } + mesh->set_blend_weights(blend_weights); + } + mesh->set_mesh(import_mesh); + + state->meshes.push_back(mesh); + } + + print_verbose("glTF: Total meshes: " + itos(state->meshes.size())); + + return OK; +} + +Error GLTFDocument::_serialize_images(Ref<GLTFState> state, const String &p_path) { + Array images; + for (int i = 0; i < state->images.size(); i++) { + Dictionary d; + + ERR_CONTINUE(state->images[i].is_null()); + + Ref<Image> image = state->images[i]->get_data(); + ERR_CONTINUE(image.is_null()); + + if (p_path.to_lower().ends_with("glb")) { + GLTFBufferViewIndex bvi; + + Ref<GLTFBufferView> bv; + bv.instance(); + + const GLTFBufferIndex bi = 0; + bv->buffer = bi; + bv->byte_offset = state->buffers[bi].size(); + ERR_FAIL_INDEX_V(bi, state->buffers.size(), ERR_PARAMETER_RANGE_ERROR); + + Vector<uint8_t> buffer; + Ref<ImageTexture> img_tex = image; + if (img_tex.is_valid()) { + image = img_tex->get_data(); + } + Error err = PNGDriverCommon::image_to_png(image, buffer); + ERR_FAIL_COND_V_MSG(err, err, "Can't convert image to PNG."); + + bv->byte_length = buffer.size(); + state->buffers.write[bi].resize(state->buffers[bi].size() + bv->byte_length); + copymem(&state->buffers.write[bi].write[bv->byte_offset], buffer.ptr(), buffer.size()); + ERR_FAIL_COND_V(bv->byte_offset + bv->byte_length > state->buffers[bi].size(), ERR_FILE_CORRUPT); + + state->buffer_views.push_back(bv); + bvi = state->buffer_views.size() - 1; + d["bufferView"] = bvi; + d["mimeType"] = "image/png"; + } else { + String name = state->images[i]->get_name(); + if (name.empty()) { + name = itos(i); + } + name = _gen_unique_name(state, name); + name = name.pad_zeros(3); + Ref<_Directory> dir; + dir.instance(); + String texture_dir = "textures"; + String new_texture_dir = p_path.get_base_dir() + "/" + texture_dir; + dir->open(p_path.get_base_dir()); + if (!dir->dir_exists(new_texture_dir)) { + dir->make_dir(new_texture_dir); + } + name = name + ".png"; + image->save_png(new_texture_dir.plus_file(name)); + d["uri"] = texture_dir.plus_file(name); + } + images.push_back(d); + } + + print_verbose("Total images: " + itos(state->images.size())); + + if (!images.size()) { + return OK; + } + state->json["images"] = images; + + return OK; +} + +Error GLTFDocument::_parse_images(Ref<GLTFState> state, const String &p_base_path) { + if (!state->json.has("images")) { + return OK; + } + + // Ref: https://github.com/KhronosGroup/glTF/blob/master/specification/2.0/README.md#images + + const Array &images = state->json["images"]; + for (int i = 0; i < images.size(); i++) { + const Dictionary &d = images[i]; + + // glTF 2.0 supports PNG and JPEG types, which can be specified as (from spec): + // "- a URI to an external file in one of the supported images formats, or + // - a URI with embedded base64-encoded data, or + // - a reference to a bufferView; in that case mimeType must be defined." + // Since mimeType is optional for external files and base64 data, we'll have to + // fall back on letting Godot parse the data to figure out if it's PNG or JPEG. + + // We'll assume that we use either URI or bufferView, so let's warn the user + // if their image somehow uses both. And fail if it has neither. + ERR_CONTINUE_MSG(!d.has("uri") && !d.has("bufferView"), "Invalid image definition in glTF file, it should specific an 'uri' or 'bufferView'."); + if (d.has("uri") && d.has("bufferView")) { + WARN_PRINT("Invalid image definition in glTF file using both 'uri' and 'bufferView'. 'bufferView' will take precedence."); + } + + String mimetype; + if (d.has("mimeType")) { // Should be "image/png" or "image/jpeg". + mimetype = d["mimeType"]; + } + + Vector<uint8_t> data; + const uint8_t *data_ptr = nullptr; + int data_size = 0; + + if (d.has("uri")) { + // Handles the first two bullet points from the spec (embedded data, or external file). + String uri = d["uri"]; + + if (uri.begins_with("data:")) { // Embedded data using base64. + // Validate data MIME types and throw a warning if it's one we don't know/support. + if (!uri.begins_with("data:application/octet-stream;base64") && + !uri.begins_with("data:application/gltf-buffer;base64") && + !uri.begins_with("data:image/png;base64") && + !uri.begins_with("data:image/jpeg;base64")) { + WARN_PRINT(vformat("glTF: Image index '%d' uses an unsupported URI data type: %s. Skipping it.", i, uri)); + state->images.push_back(Ref<Texture2D>()); // Placeholder to keep count. + continue; + } + data = _parse_base64_uri(uri); + data_ptr = data.ptr(); + data_size = data.size(); + // mimeType is optional, but if we have it defined in the URI, let's use it. + if (mimetype.empty()) { + if (uri.begins_with("data:image/png;base64")) { + mimetype = "image/png"; + } else if (uri.begins_with("data:image/jpeg;base64")) { + mimetype = "image/jpeg"; + } + } + } else { // Relative path to an external image file. + uri = p_base_path.plus_file(uri).replace("\\", "/"); // Fix for Windows. + // The spec says that if mimeType is defined, we should enforce it. + // So we should only rely on ResourceLoader::load if mimeType is not defined, + // otherwise we should use the same logic as for buffers. + if (mimetype == "image/png" || mimetype == "image/jpeg") { + // Load data buffer and rely on PNG and JPEG-specific logic below to load the image. + // This makes it possible to load a file with a wrong extension but correct MIME type, + // e.g. "foo.jpg" containing PNG data and with MIME type "image/png". ResourceLoader would fail. + data = FileAccess::get_file_as_array(uri); + ERR_FAIL_COND_V_MSG(data.size() == 0, ERR_PARSE_ERROR, "glTF: Couldn't load image file as an array: " + uri); + data_ptr = data.ptr(); + data_size = data.size(); + } else { + // Good old ResourceLoader will rely on file extension. + Ref<Texture2D> texture = ResourceLoader::load(uri); + state->images.push_back(texture); + continue; + } + } + } else if (d.has("bufferView")) { + // Handles the third bullet point from the spec (bufferView). + ERR_FAIL_COND_V_MSG(mimetype.empty(), ERR_FILE_CORRUPT, + vformat("glTF: Image index '%d' specifies 'bufferView' but no 'mimeType', which is invalid.", i)); + + const GLTFBufferViewIndex bvi = d["bufferView"]; + + ERR_FAIL_INDEX_V(bvi, state->buffer_views.size(), ERR_PARAMETER_RANGE_ERROR); + + Ref<GLTFBufferView> bv = state->buffer_views[bvi]; + + const GLTFBufferIndex bi = bv->buffer; + ERR_FAIL_INDEX_V(bi, state->buffers.size(), ERR_PARAMETER_RANGE_ERROR); + + ERR_FAIL_COND_V(bv->byte_offset + bv->byte_length > state->buffers[bi].size(), ERR_FILE_CORRUPT); + + data_ptr = &state->buffers[bi][bv->byte_offset]; + data_size = bv->byte_length; + } + + Ref<Image> img; + + if (mimetype == "image/png") { // Load buffer as PNG. + ERR_FAIL_COND_V(Image::_png_mem_loader_func == nullptr, ERR_UNAVAILABLE); + img = Image::_png_mem_loader_func(data_ptr, data_size); + } else if (mimetype == "image/jpeg") { // Loader buffer as JPEG. + ERR_FAIL_COND_V(Image::_jpg_mem_loader_func == nullptr, ERR_UNAVAILABLE); + img = Image::_jpg_mem_loader_func(data_ptr, data_size); + } else { + // We can land here if we got an URI with base64-encoded data with application/* MIME type, + // and the optional mimeType property was not defined to tell us how to handle this data (or was invalid). + // So let's try PNG first, then JPEG. + ERR_FAIL_COND_V(Image::_png_mem_loader_func == nullptr, ERR_UNAVAILABLE); + img = Image::_png_mem_loader_func(data_ptr, data_size); + if (img.is_null()) { + ERR_FAIL_COND_V(Image::_jpg_mem_loader_func == nullptr, ERR_UNAVAILABLE); + img = Image::_jpg_mem_loader_func(data_ptr, data_size); + } + } + + ERR_FAIL_COND_V_MSG(img.is_null(), ERR_FILE_CORRUPT, + vformat("glTF: Couldn't load image index '%d' with its given mimetype: %s.", i, mimetype)); + + Ref<ImageTexture> t; + t.instance(); + t->create_from_image(img); + + state->images.push_back(t); + } + + print_verbose("glTF: Total images: " + itos(state->images.size())); + + return OK; +} + +Error GLTFDocument::_serialize_textures(Ref<GLTFState> state) { + if (!state->textures.size()) { + return OK; + } + + Array textures; + for (int32_t i = 0; i < state->textures.size(); i++) { + Dictionary d; + Ref<GLTFTexture> t = state->textures[i]; + ERR_CONTINUE(t->get_src_image() == -1); + d["source"] = t->get_src_image(); + textures.push_back(d); + } + state->json["textures"] = textures; + + return OK; +} + +Error GLTFDocument::_parse_textures(Ref<GLTFState> state) { + if (!state->json.has("textures")) + return OK; + + const Array &textures = state->json["textures"]; + for (GLTFTextureIndex i = 0; i < textures.size(); i++) { + const Dictionary &d = textures[i]; + + ERR_FAIL_COND_V(!d.has("source"), ERR_PARSE_ERROR); + + Ref<GLTFTexture> t; + t.instance(); + t->set_src_image(d["source"]); + state->textures.push_back(t); + } + + return OK; +} + +GLTFTextureIndex GLTFDocument::_set_texture(Ref<GLTFState> state, Ref<Texture2D> p_texture) { + ERR_FAIL_COND_V(p_texture.is_null(), -1); + Ref<GLTFTexture> gltf_texture; + gltf_texture.instance(); + ERR_FAIL_COND_V(p_texture->get_data().is_null(), -1); + GLTFImageIndex gltf_src_image_i = state->images.size(); + state->images.push_back(p_texture); + gltf_texture->set_src_image(gltf_src_image_i); + GLTFTextureIndex gltf_texture_i = state->textures.size(); + state->textures.push_back(gltf_texture); + return gltf_texture_i; +} + +Ref<Texture2D> GLTFDocument::_get_texture(Ref<GLTFState> state, const GLTFTextureIndex p_texture) { + ERR_FAIL_INDEX_V(p_texture, state->textures.size(), Ref<Texture2D>()); + const GLTFImageIndex image = state->textures[p_texture]->get_src_image(); + + ERR_FAIL_INDEX_V(image, state->images.size(), Ref<Texture2D>()); + + return state->images[image]; +} + +Error GLTFDocument::_serialize_materials(Ref<GLTFState> state) { + Array materials; + for (int32_t i = 0; i < state->materials.size(); i++) { + Dictionary d; + + Ref<BaseMaterial3D> material = state->materials[i]; + if (material.is_null()) { + materials.push_back(d); + continue; + } + if (!material->get_name().empty()) { + d["name"] = _gen_unique_name(state, material->get_name()); + } + { + Dictionary mr; + { + Array arr; + const Color c = material->get_albedo().to_linear(); + arr.push_back(c.r); + arr.push_back(c.g); + arr.push_back(c.b); + arr.push_back(c.a); + mr["baseColorFactor"] = arr; + } + { + Dictionary bct; + Ref<Texture2D> albedo_texture = material->get_texture(BaseMaterial3D::TEXTURE_ALBEDO); + GLTFTextureIndex gltf_texture_index = -1; + + if (albedo_texture.is_valid() && albedo_texture->get_data().is_valid()) { + albedo_texture->set_name(material->get_name() + "_albedo"); + gltf_texture_index = _set_texture(state, albedo_texture); + } + if (gltf_texture_index != -1) { + bct["index"] = gltf_texture_index; + bct["extensions"] = _serialize_texture_transform_uv1(material); + mr["baseColorTexture"] = bct; + } + } + + mr["metallicFactor"] = material->get_metallic(); + mr["roughnessFactor"] = material->get_roughness(); + bool has_roughness = material->get_texture(BaseMaterial3D::TEXTURE_ROUGHNESS).is_valid() && material->get_texture(BaseMaterial3D::TEXTURE_ROUGHNESS)->get_data().is_valid(); + bool has_ao = material->get_feature(BaseMaterial3D::FEATURE_AMBIENT_OCCLUSION) && material->get_texture(BaseMaterial3D::TEXTURE_AMBIENT_OCCLUSION).is_valid(); + bool has_metalness = material->get_texture(BaseMaterial3D::TEXTURE_METALLIC).is_valid() && material->get_texture(BaseMaterial3D::TEXTURE_METALLIC)->get_data().is_valid(); + if (has_ao || has_roughness || has_metalness) { + Dictionary mrt; + Ref<Texture2D> roughness_texture = material->get_texture(BaseMaterial3D::TEXTURE_ROUGHNESS); + BaseMaterial3D::TextureChannel roughness_channel = material->get_roughness_texture_channel(); + Ref<Texture2D> metallic_texture = material->get_texture(BaseMaterial3D::TEXTURE_METALLIC); + BaseMaterial3D::TextureChannel metalness_channel = material->get_metallic_texture_channel(); + Ref<Texture2D> ao_texture = material->get_texture(BaseMaterial3D::TEXTURE_AMBIENT_OCCLUSION); + BaseMaterial3D::TextureChannel ao_channel = material->get_ao_texture_channel(); + Ref<ImageTexture> orm_texture; + orm_texture.instance(); + Ref<Image> orm_image; + orm_image.instance(); + int32_t height = 0; + int32_t width = 0; + Ref<Image> ao_image; + if (has_ao) { + height = ao_texture->get_height(); + width = ao_texture->get_width(); + ao_image = ao_texture->get_data(); + Ref<ImageTexture> img_tex = ao_image; + if (img_tex.is_valid()) { + ao_image = img_tex->get_data(); + } + if (ao_image->is_compressed()) { + ao_image->decompress(); + } + } + Ref<Image> roughness_image; + if (has_roughness) { + height = roughness_texture->get_height(); + width = roughness_texture->get_width(); + roughness_image = roughness_texture->get_data(); + Ref<ImageTexture> img_tex = roughness_image; + if (img_tex.is_valid()) { + roughness_image = img_tex->get_data(); + } + if (roughness_image->is_compressed()) { + roughness_image->decompress(); + } + } + Ref<Image> metallness_image; + if (has_metalness) { + height = metallic_texture->get_height(); + width = metallic_texture->get_width(); + metallness_image = metallic_texture->get_data(); + Ref<ImageTexture> img_tex = metallness_image; + if (img_tex.is_valid()) { + metallness_image = img_tex->get_data(); + } + if (metallness_image->is_compressed()) { + metallness_image->decompress(); + } + } + Ref<Texture2D> albedo_texture = material->get_texture(BaseMaterial3D::TEXTURE_ALBEDO); + if (albedo_texture.is_valid() && albedo_texture->get_data().is_valid()) { + height = albedo_texture->get_height(); + width = albedo_texture->get_width(); + } + orm_image->create(width, height, false, Image::FORMAT_RGBA8); + if (ao_image.is_valid() && ao_image->get_size() != Vector2(width, height)) { + ao_image->resize(width, height, Image::INTERPOLATE_LANCZOS); + } + if (roughness_image.is_valid() && roughness_image->get_size() != Vector2(width, height)) { + roughness_image->resize(width, height, Image::INTERPOLATE_LANCZOS); + } + if (metallness_image.is_valid() && metallness_image->get_size() != Vector2(width, height)) { + metallness_image->resize(width, height, Image::INTERPOLATE_LANCZOS); + } + for (int32_t h = 0; h < height; h++) { + for (int32_t w = 0; w < width; w++) { + Color c = Color(1.0f, 1.0f, 1.0f); + if (has_ao) { + if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_RED == ao_channel) { + c.r = ao_image->get_pixel(w, h).r; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_GREEN == ao_channel) { + c.r = ao_image->get_pixel(w, h).g; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_BLUE == ao_channel) { + c.r = ao_image->get_pixel(w, h).b; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_ALPHA == ao_channel) { + c.r = ao_image->get_pixel(w, h).a; + } + } + if (has_roughness) { + if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_RED == roughness_channel) { + c.g = roughness_image->get_pixel(w, h).r; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_GREEN == roughness_channel) { + c.g = roughness_image->get_pixel(w, h).g; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_BLUE == roughness_channel) { + c.g = roughness_image->get_pixel(w, h).b; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_ALPHA == roughness_channel) { + c.g = roughness_image->get_pixel(w, h).a; + } + } + if (has_metalness) { + if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_RED == metalness_channel) { + c.b = metallness_image->get_pixel(w, h).r; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_GREEN == metalness_channel) { + c.b = metallness_image->get_pixel(w, h).g; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_BLUE == metalness_channel) { + c.b = metallness_image->get_pixel(w, h).b; + } else if (BaseMaterial3D::TextureChannel::TEXTURE_CHANNEL_ALPHA == metalness_channel) { + c.b = metallness_image->get_pixel(w, h).a; + } + } + orm_image->set_pixel(w, h, c); + } + } + orm_image->generate_mipmaps(); + orm_texture->create_from_image(orm_image); + GLTFTextureIndex orm_texture_index = -1; + if (has_ao || has_roughness || has_metalness) { + orm_texture->set_name(material->get_name() + "_orm"); + orm_texture_index = _set_texture(state, orm_texture); + } + if (has_ao) { + Dictionary ot; + ot["index"] = orm_texture_index; + d["occlusionTexture"] = ot; + } + if (has_roughness || has_metalness) { + mrt["index"] = orm_texture_index; + mrt["extensions"] = _serialize_texture_transform_uv1(material); + mr["metallicRoughnessTexture"] = mrt; + } + } + d["pbrMetallicRoughness"] = mr; + } + + if (material->get_feature(BaseMaterial3D::FEATURE_NORMAL_MAPPING)) { + Dictionary nt; + Ref<ImageTexture> tex; + tex.instance(); + { + Ref<Texture2D> normal_texture = material->get_texture(BaseMaterial3D::TEXTURE_NORMAL); + // Code for uncompressing RG normal maps + Ref<Image> img = normal_texture->get_data(); + Ref<ImageTexture> img_tex = img; + if (img_tex.is_valid()) { + img = img_tex->get_data(); + } + img->decompress(); + img->convert(Image::FORMAT_RGBA8); + for (int32_t y = 0; y < img->get_height(); y++) { + for (int32_t x = 0; x < img->get_width(); x++) { + Color c = img->get_pixel(x, y); + Vector2 red_green = Vector2(c.r, c.g); + red_green = red_green * Vector2(2.0f, 2.0f) - Vector2(1.0f, 1.0f); + float blue = 1.0f - red_green.dot(red_green); + blue = MAX(0.0f, blue); + c.b = Math::sqrt(blue); + img->set_pixel(x, y, c); + } + } + tex->create_from_image(img); + } + Ref<Texture2D> normal_texture = material->get_texture(BaseMaterial3D::TEXTURE_NORMAL); + GLTFTextureIndex gltf_texture_index = -1; + if (tex.is_valid() && tex->get_data().is_valid()) { + tex->set_name(material->get_name() + "_normal"); + gltf_texture_index = _set_texture(state, tex); + } + nt["scale"] = material->get_normal_scale(); + if (gltf_texture_index != -1) { + nt["index"] = gltf_texture_index; + d["normalTexture"] = nt; + } + } + + if (material->get_feature(BaseMaterial3D::FEATURE_EMISSION)) { + const Color c = material->get_emission().to_srgb(); + Array arr; + arr.push_back(c.r); + arr.push_back(c.g); + arr.push_back(c.b); + d["emissiveFactor"] = arr; + } + if (material->get_feature(BaseMaterial3D::FEATURE_EMISSION)) { + Dictionary et; + Ref<Texture2D> emission_texture = material->get_texture(BaseMaterial3D::TEXTURE_EMISSION); + GLTFTextureIndex gltf_texture_index = -1; + if (emission_texture.is_valid() && emission_texture->get_data().is_valid()) { + emission_texture->set_name(material->get_name() + "_emission"); + gltf_texture_index = _set_texture(state, emission_texture); + } + + if (gltf_texture_index != -1) { + et["index"] = gltf_texture_index; + d["emissiveTexture"] = et; + } + } + const bool ds = material->get_cull_mode() == BaseMaterial3D::CULL_DISABLED; + if (ds) { + d["doubleSided"] = ds; + } + if (material->get_transparency() == BaseMaterial3D::TRANSPARENCY_ALPHA_SCISSOR) { + d["alphaMode"] = "MASK"; + d["alphaCutoff"] = material->get_alpha_scissor_threshold(); + } else if (material->get_transparency() != BaseMaterial3D::TRANSPARENCY_DISABLED) { + d["alphaMode"] = "BLEND"; + } + materials.push_back(d); + } + state->json["materials"] = materials; + print_verbose("Total materials: " + itos(state->materials.size())); + + return OK; +} + +Error GLTFDocument::_parse_materials(Ref<GLTFState> state) { + if (!state->json.has("materials")) + return OK; + + const Array &materials = state->json["materials"]; + for (GLTFMaterialIndex i = 0; i < materials.size(); i++) { + const Dictionary &d = materials[i]; + + Ref<StandardMaterial3D> material; + material.instance(); + if (d.has("name")) { + material->set_name(d["name"]); + } + material->set_flag(BaseMaterial3D::FLAG_ALBEDO_FROM_VERTEX_COLOR, true); + Dictionary pbr_spec_gloss_extensions; + if (d.has("extensions")) { + pbr_spec_gloss_extensions = d["extensions"]; + } + if (pbr_spec_gloss_extensions.has("KHR_materials_pbrSpecularGlossiness")) { + WARN_PRINT("Material uses a specular and glossiness workflow. Textures will be converted to roughness and metallic workflow, which may not be 100% accurate."); + Dictionary sgm = pbr_spec_gloss_extensions["KHR_materials_pbrSpecularGlossiness"]; + + Ref<GLTFSpecGloss> spec_gloss; + spec_gloss.instance(); + if (sgm.has("diffuseTexture")) { + const Dictionary &diffuse_texture_dict = sgm["diffuseTexture"]; + if (diffuse_texture_dict.has("index")) { + Ref<Texture2D> diffuse_texture = _get_texture(state, diffuse_texture_dict["index"]); + if (diffuse_texture.is_valid()) { + spec_gloss->diffuse_img = diffuse_texture->get_data(); + material->set_texture(BaseMaterial3D::TEXTURE_ALBEDO, diffuse_texture); + } + } + } + if (sgm.has("diffuseFactor")) { + const Array &arr = sgm["diffuseFactor"]; + ERR_FAIL_COND_V(arr.size() != 4, ERR_PARSE_ERROR); + const Color c = Color(arr[0], arr[1], arr[2], arr[3]).to_srgb(); + spec_gloss->diffuse_factor = c; + material->set_albedo(spec_gloss->diffuse_factor); + } + + if (sgm.has("specularFactor")) { + const Array &arr = sgm["specularFactor"]; + ERR_FAIL_COND_V(arr.size() != 3, ERR_PARSE_ERROR); + spec_gloss->specular_factor = Color(arr[0], arr[1], arr[2]); + } + + if (sgm.has("glossinessFactor")) { + spec_gloss->gloss_factor = sgm["glossinessFactor"]; + material->set_roughness(1.0f - CLAMP(spec_gloss->gloss_factor, 0.0f, 1.0f)); + } + if (sgm.has("specularGlossinessTexture")) { + const Dictionary &spec_gloss_texture = sgm["specularGlossinessTexture"]; + if (spec_gloss_texture.has("index")) { + const Ref<Texture2D> orig_texture = _get_texture(state, spec_gloss_texture["index"]); + if (orig_texture.is_valid()) { + spec_gloss->spec_gloss_img = orig_texture->get_data(); + } + } + } + spec_gloss_to_rough_metal(spec_gloss, material); + + } else if (d.has("pbrMetallicRoughness")) { + const Dictionary &mr = d["pbrMetallicRoughness"]; + if (mr.has("baseColorFactor")) { + const Array &arr = mr["baseColorFactor"]; + ERR_FAIL_COND_V(arr.size() != 4, ERR_PARSE_ERROR); + const Color c = Color(arr[0], arr[1], arr[2], arr[3]).to_srgb(); + material->set_albedo(c); + } + + if (mr.has("baseColorTexture")) { + const Dictionary &bct = mr["baseColorTexture"]; + if (bct.has("index")) { + material->set_texture(BaseMaterial3D::TEXTURE_ALBEDO, _get_texture(state, bct["index"])); + } + if (!mr.has("baseColorFactor")) { + material->set_albedo(Color(1, 1, 1)); + } + _set_texture_transform_uv1(bct, material); + } + + if (mr.has("metallicFactor")) { + material->set_metallic(mr["metallicFactor"]); + } else { + material->set_metallic(1.0); + } + + if (mr.has("roughnessFactor")) { + material->set_roughness(mr["roughnessFactor"]); + } else { + material->set_roughness(1.0); + } + + if (mr.has("metallicRoughnessTexture")) { + const Dictionary &bct = mr["metallicRoughnessTexture"]; + if (bct.has("index")) { + const Ref<Texture2D> t = _get_texture(state, bct["index"]); + material->set_texture(BaseMaterial3D::TEXTURE_METALLIC, t); + material->set_metallic_texture_channel(BaseMaterial3D::TEXTURE_CHANNEL_BLUE); + material->set_texture(BaseMaterial3D::TEXTURE_ROUGHNESS, t); + material->set_roughness_texture_channel(BaseMaterial3D::TEXTURE_CHANNEL_GREEN); + if (!mr.has("metallicFactor")) { + material->set_metallic(1); + } + if (!mr.has("roughnessFactor")) { + material->set_roughness(1); + } + } + } + } + + if (d.has("normalTexture")) { + const Dictionary &bct = d["normalTexture"]; + if (bct.has("index")) { + material->set_texture(BaseMaterial3D::TEXTURE_NORMAL, _get_texture(state, bct["index"])); + material->set_feature(BaseMaterial3D::FEATURE_NORMAL_MAPPING, true); + } + if (bct.has("scale")) { + material->set_normal_scale(bct["scale"]); + } + } + if (d.has("occlusionTexture")) { + const Dictionary &bct = d["occlusionTexture"]; + if (bct.has("index")) { + material->set_texture(BaseMaterial3D::TEXTURE_AMBIENT_OCCLUSION, _get_texture(state, bct["index"])); + material->set_ao_texture_channel(BaseMaterial3D::TEXTURE_CHANNEL_RED); + material->set_feature(BaseMaterial3D::FEATURE_AMBIENT_OCCLUSION, true); + } + } + + if (d.has("emissiveFactor")) { + const Array &arr = d["emissiveFactor"]; + ERR_FAIL_COND_V(arr.size() != 3, ERR_PARSE_ERROR); + const Color c = Color(arr[0], arr[1], arr[2]).to_srgb(); + material->set_feature(BaseMaterial3D::FEATURE_EMISSION, true); + + material->set_emission(c); + } + + if (d.has("emissiveTexture")) { + const Dictionary &bct = d["emissiveTexture"]; + if (bct.has("index")) { + material->set_texture(BaseMaterial3D::TEXTURE_EMISSION, _get_texture(state, bct["index"])); + material->set_feature(BaseMaterial3D::FEATURE_EMISSION, true); + material->set_emission(Color(0, 0, 0)); + } + } + + if (d.has("doubleSided")) { + const bool ds = d["doubleSided"]; + if (ds) { + material->set_cull_mode(BaseMaterial3D::CULL_DISABLED); + } + } + + if (d.has("alphaMode")) { + const String &am = d["alphaMode"]; + if (am == "BLEND") { + material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA_DEPTH_PRE_PASS); + } else if (am == "MASK") { + material->set_transparency(BaseMaterial3D::TRANSPARENCY_ALPHA_SCISSOR); + if (d.has("alphaCutoff")) { + material->set_alpha_scissor_threshold(d["alphaCutoff"]); + } else { + material->set_alpha_scissor_threshold(0.5f); + } + } + } + state->materials.push_back(material); + } + + print_verbose("Total materials: " + itos(state->materials.size())); + + return OK; +} + +void GLTFDocument::_set_texture_transform_uv1(const Dictionary &d, Ref<BaseMaterial3D> material) { + if (d.has("extensions")) { + const Dictionary &extensions = d["extensions"]; + if (extensions.has("KHR_texture_transform")) { + const Dictionary &texture_transform = extensions["KHR_texture_transform"]; + const Array &offset_arr = texture_transform["offset"]; + if (offset_arr.size() == 2) { + const Vector3 offset_vector3 = Vector3(offset_arr[0], offset_arr[1], 0.0f); + material->set_uv1_offset(offset_vector3); + } + + const Array &scale_arr = texture_transform["scale"]; + if (scale_arr.size() == 2) { + const Vector3 scale_vector3 = Vector3(scale_arr[0], scale_arr[1], 1.0f); + material->set_uv1_scale(scale_vector3); + } + } + } +} + +void GLTFDocument::spec_gloss_to_rough_metal(Ref<GLTFSpecGloss> r_spec_gloss, Ref<BaseMaterial3D> p_material) { + if (r_spec_gloss->spec_gloss_img.is_null()) { + return; + } + if (r_spec_gloss->diffuse_img.is_null()) { + return; + } + Ref<Image> rm_img; + rm_img.instance(); + bool has_roughness = false; + bool has_metal = false; + p_material->set_roughness(1.0f); + p_material->set_metallic(1.0f); + rm_img->create(r_spec_gloss->spec_gloss_img->get_width(), r_spec_gloss->spec_gloss_img->get_height(), false, Image::FORMAT_RGBA8); + r_spec_gloss->spec_gloss_img->decompress(); + if (r_spec_gloss->diffuse_img.is_valid()) { + r_spec_gloss->diffuse_img->decompress(); + r_spec_gloss->diffuse_img->resize(r_spec_gloss->spec_gloss_img->get_width(), r_spec_gloss->spec_gloss_img->get_height(), Image::INTERPOLATE_LANCZOS); + r_spec_gloss->spec_gloss_img->resize(r_spec_gloss->diffuse_img->get_width(), r_spec_gloss->diffuse_img->get_height(), Image::INTERPOLATE_LANCZOS); + } + for (int32_t y = 0; y < r_spec_gloss->spec_gloss_img->get_height(); y++) { + for (int32_t x = 0; x < r_spec_gloss->spec_gloss_img->get_width(); x++) { + const Color specular_pixel = r_spec_gloss->spec_gloss_img->get_pixel(x, y).to_linear(); + Color specular = Color(specular_pixel.r, specular_pixel.g, specular_pixel.b); + specular *= r_spec_gloss->specular_factor; + Color diffuse = Color(1.0f, 1.0f, 1.0f); + diffuse *= r_spec_gloss->diffuse_img->get_pixel(x, y).to_linear(); + float metallic = 0.0f; + Color base_color; + spec_gloss_to_metal_base_color(specular, diffuse, base_color, metallic); + Color mr = Color(1.0f, 1.0f, 1.0f); + mr.g = specular_pixel.a; + mr.b = metallic; + if (!Math::is_equal_approx(mr.g, 1.0f)) { + has_roughness = true; + } + if (!Math::is_equal_approx(mr.b, 0.0f)) { + has_metal = true; + } + mr.g *= r_spec_gloss->gloss_factor; + mr.g = 1.0f - mr.g; + rm_img->set_pixel(x, y, mr); + if (r_spec_gloss->diffuse_img.is_valid()) { + r_spec_gloss->diffuse_img->set_pixel(x, y, base_color.to_srgb()); + } + } + } + rm_img->generate_mipmaps(); + r_spec_gloss->diffuse_img->generate_mipmaps(); + Ref<ImageTexture> diffuse_image_texture; + diffuse_image_texture.instance(); + diffuse_image_texture->create_from_image(r_spec_gloss->diffuse_img); + p_material->set_texture(BaseMaterial3D::TEXTURE_ALBEDO, diffuse_image_texture); + Ref<ImageTexture> rm_image_texture; + rm_image_texture.instance(); + rm_image_texture->create_from_image(rm_img); + if (has_roughness) { + p_material->set_texture(BaseMaterial3D::TEXTURE_ROUGHNESS, rm_image_texture); + p_material->set_roughness_texture_channel(BaseMaterial3D::TEXTURE_CHANNEL_GREEN); + } + + if (has_metal) { + p_material->set_texture(BaseMaterial3D::TEXTURE_METALLIC, rm_image_texture); + p_material->set_metallic_texture_channel(BaseMaterial3D::TEXTURE_CHANNEL_BLUE); + } +} + +void GLTFDocument::spec_gloss_to_metal_base_color(const Color &p_specular_factor, const Color &p_diffuse, Color &r_base_color, float &r_metallic) { + const Color DIELECTRIC_SPECULAR = Color(0.04f, 0.04f, 0.04f); + Color specular = Color(p_specular_factor.r, p_specular_factor.g, p_specular_factor.b); + const float one_minus_specular_strength = 1.0f - get_max_component(specular); + const float dielectric_specular_red = DIELECTRIC_SPECULAR.r; + float brightness_diffuse = get_perceived_brightness(p_diffuse); + const float brightness_specular = get_perceived_brightness(specular); + r_metallic = solve_metallic(dielectric_specular_red, brightness_diffuse, brightness_specular, one_minus_specular_strength); + const float one_minus_metallic = 1.0f - r_metallic; + const Color base_color_from_diffuse = p_diffuse * (one_minus_specular_strength / (1.0f - dielectric_specular_red) / MAX(one_minus_metallic, CMP_EPSILON)); + const Color base_color_from_specular = (specular - (DIELECTRIC_SPECULAR * (one_minus_metallic))) * (1.0f / MAX(r_metallic, CMP_EPSILON)); + r_base_color.r = Math::lerp(base_color_from_diffuse.r, base_color_from_specular.r, r_metallic * r_metallic); + r_base_color.g = Math::lerp(base_color_from_diffuse.g, base_color_from_specular.g, r_metallic * r_metallic); + r_base_color.b = Math::lerp(base_color_from_diffuse.b, base_color_from_specular.b, r_metallic * r_metallic); + r_base_color.a = p_diffuse.a; + r_base_color.r = CLAMP(r_base_color.r, 0.0f, 1.0f); + r_base_color.g = CLAMP(r_base_color.g, 0.0f, 1.0f); + r_base_color.b = CLAMP(r_base_color.b, 0.0f, 1.0f); + r_base_color.a = CLAMP(r_base_color.a, 0.0f, 1.0f); +} + +GLTFNodeIndex GLTFDocument::_find_highest_node(Ref<GLTFState> state, const Vector<GLTFNodeIndex> &subset) { + int highest = -1; + GLTFNodeIndex best_node = -1; + + for (int i = 0; i < subset.size(); ++i) { + const GLTFNodeIndex node_i = subset[i]; + const Ref<GLTFNode> node = state->nodes[node_i]; + + if (highest == -1 || node->height < highest) { + highest = node->height; + best_node = node_i; + } + } + + return best_node; +} + +bool GLTFDocument::_capture_nodes_in_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin, const GLTFNodeIndex node_index) { + bool found_joint = false; + + for (int i = 0; i < state->nodes[node_index]->children.size(); ++i) { + found_joint |= _capture_nodes_in_skin(state, skin, state->nodes[node_index]->children[i]); + } + + if (found_joint) { + // Mark it if we happen to find another skins joint... + if (state->nodes[node_index]->joint && skin->joints.find(node_index) < 0) { + skin->joints.push_back(node_index); + } else if (skin->non_joints.find(node_index) < 0) { + skin->non_joints.push_back(node_index); + } + } + + if (skin->joints.find(node_index) > 0) { + return true; + } + + return false; +} + +void GLTFDocument::_capture_nodes_for_multirooted_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin) { + DisjointSet<GLTFNodeIndex> disjoint_set; + + for (int i = 0; i < skin->joints.size(); ++i) { + const GLTFNodeIndex node_index = skin->joints[i]; + const GLTFNodeIndex parent = state->nodes[node_index]->parent; + disjoint_set.insert(node_index); + + if (skin->joints.find(parent) >= 0) { + disjoint_set.create_union(parent, node_index); + } + } + + Vector<GLTFNodeIndex> roots; + disjoint_set.get_representatives(roots); + + if (roots.size() <= 1) { + return; + } + + int maxHeight = -1; + + // Determine the max height rooted tree + for (int i = 0; i < roots.size(); ++i) { + const GLTFNodeIndex root = roots[i]; + + if (maxHeight == -1 || state->nodes[root]->height < maxHeight) { + maxHeight = state->nodes[root]->height; + } + } + + // Go up the tree till all of the multiple roots of the skin are at the same hierarchy level. + // This sucks, but 99% of all game engines (not just Godot) would have this same issue. + for (int i = 0; i < roots.size(); ++i) { + GLTFNodeIndex current_node = roots[i]; + while (state->nodes[current_node]->height > maxHeight) { + GLTFNodeIndex parent = state->nodes[current_node]->parent; + + if (state->nodes[parent]->joint && skin->joints.find(parent) < 0) { + skin->joints.push_back(parent); + } else if (skin->non_joints.find(parent) < 0) { + skin->non_joints.push_back(parent); + } + + current_node = parent; + } + + // replace the roots + roots.write[i] = current_node; + } + + // Climb up the tree until they all have the same parent + bool all_same; + + do { + all_same = true; + const GLTFNodeIndex first_parent = state->nodes[roots[0]]->parent; + + for (int i = 1; i < roots.size(); ++i) { + all_same &= (first_parent == state->nodes[roots[i]]->parent); + } + + if (!all_same) { + for (int i = 0; i < roots.size(); ++i) { + const GLTFNodeIndex current_node = roots[i]; + const GLTFNodeIndex parent = state->nodes[current_node]->parent; + + if (state->nodes[parent]->joint && skin->joints.find(parent) < 0) { + skin->joints.push_back(parent); + } else if (skin->non_joints.find(parent) < 0) { + skin->non_joints.push_back(parent); + } + + roots.write[i] = parent; + } + } + + } while (!all_same); +} + +Error GLTFDocument::_expand_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin) { + _capture_nodes_for_multirooted_skin(state, skin); + + // Grab all nodes that lay in between skin joints/nodes + DisjointSet<GLTFNodeIndex> disjoint_set; + + Vector<GLTFNodeIndex> all_skin_nodes; + all_skin_nodes.append_array(skin->joints); + all_skin_nodes.append_array(skin->non_joints); + + for (int i = 0; i < all_skin_nodes.size(); ++i) { + const GLTFNodeIndex node_index = all_skin_nodes[i]; + const GLTFNodeIndex parent = state->nodes[node_index]->parent; + disjoint_set.insert(node_index); + + if (all_skin_nodes.find(parent) >= 0) { + disjoint_set.create_union(parent, node_index); + } + } + + Vector<GLTFNodeIndex> out_owners; + disjoint_set.get_representatives(out_owners); + + Vector<GLTFNodeIndex> out_roots; + + for (int i = 0; i < out_owners.size(); ++i) { + Vector<GLTFNodeIndex> set; + disjoint_set.get_members(set, out_owners[i]); + + const GLTFNodeIndex root = _find_highest_node(state, set); + ERR_FAIL_COND_V(root < 0, FAILED); + out_roots.push_back(root); + } + + out_roots.sort(); + + for (int i = 0; i < out_roots.size(); ++i) { + _capture_nodes_in_skin(state, skin, out_roots[i]); + } + + skin->roots = out_roots; + + return OK; +} + +Error GLTFDocument::_verify_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin) { + // This may seem duplicated from expand_skins, but this is really a sanity check! (so it kinda is) + // In case additional interpolating logic is added to the skins, this will help ensure that you + // do not cause it to self implode into a fiery blaze + + // We are going to re-calculate the root nodes and compare them to the ones saved in the skin, + // then ensure the multiple trees (if they exist) are on the same sublevel + + // Grab all nodes that lay in between skin joints/nodes + DisjointSet<GLTFNodeIndex> disjoint_set; + + Vector<GLTFNodeIndex> all_skin_nodes; + all_skin_nodes.append_array(skin->joints); + all_skin_nodes.append_array(skin->non_joints); + + for (int i = 0; i < all_skin_nodes.size(); ++i) { + const GLTFNodeIndex node_index = all_skin_nodes[i]; + const GLTFNodeIndex parent = state->nodes[node_index]->parent; + disjoint_set.insert(node_index); + + if (all_skin_nodes.find(parent) >= 0) { + disjoint_set.create_union(parent, node_index); + } + } + + Vector<GLTFNodeIndex> out_owners; + disjoint_set.get_representatives(out_owners); + + Vector<GLTFNodeIndex> out_roots; + + for (int i = 0; i < out_owners.size(); ++i) { + Vector<GLTFNodeIndex> set; + disjoint_set.get_members(set, out_owners[i]); + + const GLTFNodeIndex root = _find_highest_node(state, set); + ERR_FAIL_COND_V(root < 0, FAILED); + out_roots.push_back(root); + } + + out_roots.sort(); + + ERR_FAIL_COND_V(out_roots.size() == 0, FAILED); + + // Make sure the roots are the exact same (they better be) + ERR_FAIL_COND_V(out_roots.size() != skin->roots.size(), FAILED); + for (int i = 0; i < out_roots.size(); ++i) { + ERR_FAIL_COND_V(out_roots[i] != skin->roots[i], FAILED); + } + + // Single rooted skin? Perfectly ok! + if (out_roots.size() == 1) { + return OK; + } + + // Make sure all parents of a multi-rooted skin are the SAME + const GLTFNodeIndex parent = state->nodes[out_roots[0]]->parent; + for (int i = 1; i < out_roots.size(); ++i) { + if (state->nodes[out_roots[i]]->parent != parent) { + return FAILED; + } + } + + return OK; +} + +Error GLTFDocument::_parse_skins(Ref<GLTFState> state) { + if (!state->json.has("skins")) + return OK; + + const Array &skins = state->json["skins"]; + + // Create the base skins, and mark nodes that are joints + for (int i = 0; i < skins.size(); i++) { + const Dictionary &d = skins[i]; + + Ref<GLTFSkin> skin; + skin.instance(); + + ERR_FAIL_COND_V(!d.has("joints"), ERR_PARSE_ERROR); + + const Array &joints = d["joints"]; + + if (d.has("inverseBindMatrices")) { + skin->inverse_binds = _decode_accessor_as_xform(state, d["inverseBindMatrices"], false); + ERR_FAIL_COND_V(skin->inverse_binds.size() != joints.size(), ERR_PARSE_ERROR); + } + + for (int j = 0; j < joints.size(); j++) { + const GLTFNodeIndex node = joints[j]; + ERR_FAIL_INDEX_V(node, state->nodes.size(), ERR_PARSE_ERROR); + + skin->joints.push_back(node); + skin->joints_original.push_back(node); + + state->nodes.write[node]->joint = true; + } + + if (d.has("name")) { + skin->set_name(d["name"]); + } + + if (d.has("skeleton")) { + skin->skin_root = d["skeleton"]; + } + + state->skins.push_back(skin); + } + + for (GLTFSkinIndex i = 0; i < state->skins.size(); ++i) { + Ref<GLTFSkin> skin = state->skins.write[i]; + + // Expand the skin to capture all the extra non-joints that lie in between the actual joints, + // and expand the hierarchy to ensure multi-rooted trees lie on the same height level + ERR_FAIL_COND_V(_expand_skin(state, skin), ERR_PARSE_ERROR); + ERR_FAIL_COND_V(_verify_skin(state, skin), ERR_PARSE_ERROR); + } + + print_verbose("glTF: Total skins: " + itos(state->skins.size())); + + return OK; +} + +Error GLTFDocument::_determine_skeletons(Ref<GLTFState> state) { + // Using a disjoint set, we are going to potentially combine all skins that are actually branches + // of a main skeleton, or treat skins defining the same set of nodes as ONE skeleton. + // This is another unclear issue caused by the current glTF specification. + + DisjointSet<GLTFNodeIndex> skeleton_sets; + + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + const Ref<GLTFSkin> skin = state->skins[skin_i]; + + Vector<GLTFNodeIndex> all_skin_nodes; + all_skin_nodes.append_array(skin->joints); + all_skin_nodes.append_array(skin->non_joints); + + for (int i = 0; i < all_skin_nodes.size(); ++i) { + const GLTFNodeIndex node_index = all_skin_nodes[i]; + const GLTFNodeIndex parent = state->nodes[node_index]->parent; + skeleton_sets.insert(node_index); + + if (all_skin_nodes.find(parent) >= 0) { + skeleton_sets.create_union(parent, node_index); + } + } + + // We are going to connect the separate skin subtrees in each skin together + // so that the final roots are entire sets of valid skin trees + for (int i = 1; i < skin->roots.size(); ++i) { + skeleton_sets.create_union(skin->roots[0], skin->roots[i]); + } + } + + { // attempt to joint all touching subsets (siblings/parent are part of another skin) + Vector<GLTFNodeIndex> groups_representatives; + skeleton_sets.get_representatives(groups_representatives); + + Vector<GLTFNodeIndex> highest_group_members; + Vector<Vector<GLTFNodeIndex>> groups; + for (int i = 0; i < groups_representatives.size(); ++i) { + Vector<GLTFNodeIndex> group; + skeleton_sets.get_members(group, groups_representatives[i]); + highest_group_members.push_back(_find_highest_node(state, group)); + groups.push_back(group); + } + + for (int i = 0; i < highest_group_members.size(); ++i) { + const GLTFNodeIndex node_i = highest_group_members[i]; + + // Attach any siblings together (this needs to be done n^2/2 times) + for (int j = i + 1; j < highest_group_members.size(); ++j) { + const GLTFNodeIndex node_j = highest_group_members[j]; + + // Even if they are siblings under the root! :) + if (state->nodes[node_i]->parent == state->nodes[node_j]->parent) { + skeleton_sets.create_union(node_i, node_j); + } + } + + // Attach any parenting going on together (we need to do this n^2 times) + const GLTFNodeIndex node_i_parent = state->nodes[node_i]->parent; + if (node_i_parent >= 0) { + for (int j = 0; j < groups.size() && i != j; ++j) { + const Vector<GLTFNodeIndex> &group = groups[j]; + + if (group.find(node_i_parent) >= 0) { + const GLTFNodeIndex node_j = highest_group_members[j]; + skeleton_sets.create_union(node_i, node_j); + } + } + } + } + } + + // At this point, the skeleton groups should be finalized + Vector<GLTFNodeIndex> skeleton_owners; + skeleton_sets.get_representatives(skeleton_owners); + + // Mark all the skins actual skeletons, after we have merged them + for (GLTFSkeletonIndex skel_i = 0; skel_i < skeleton_owners.size(); ++skel_i) { + const GLTFNodeIndex skeleton_owner = skeleton_owners[skel_i]; + Ref<GLTFSkeleton> skeleton; + skeleton.instance(); + + Vector<GLTFNodeIndex> skeleton_nodes; + skeleton_sets.get_members(skeleton_nodes, skeleton_owner); + + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + Ref<GLTFSkin> skin = state->skins.write[skin_i]; + + // If any of the the skeletons nodes exist in a skin, that skin now maps to the skeleton + for (int i = 0; i < skeleton_nodes.size(); ++i) { + GLTFNodeIndex skel_node_i = skeleton_nodes[i]; + if (skin->joints.find(skel_node_i) >= 0 || skin->non_joints.find(skel_node_i) >= 0) { + skin->skeleton = skel_i; + continue; + } + } + } + + Vector<GLTFNodeIndex> non_joints; + for (int i = 0; i < skeleton_nodes.size(); ++i) { + const GLTFNodeIndex node_i = skeleton_nodes[i]; + + if (state->nodes[node_i]->joint) { + skeleton->joints.push_back(node_i); + } else { + non_joints.push_back(node_i); + } + } + + state->skeletons.push_back(skeleton); + + _reparent_non_joint_skeleton_subtrees(state, state->skeletons.write[skel_i], non_joints); + } + + for (GLTFSkeletonIndex skel_i = 0; skel_i < state->skeletons.size(); ++skel_i) { + Ref<GLTFSkeleton> skeleton = state->skeletons.write[skel_i]; + + for (int i = 0; i < skeleton->joints.size(); ++i) { + const GLTFNodeIndex node_i = skeleton->joints[i]; + Ref<GLTFNode> node = state->nodes[node_i]; + + ERR_FAIL_COND_V(!node->joint, ERR_PARSE_ERROR); + ERR_FAIL_COND_V(node->skeleton >= 0, ERR_PARSE_ERROR); + node->skeleton = skel_i; + } + + ERR_FAIL_COND_V(_determine_skeleton_roots(state, skel_i), ERR_PARSE_ERROR); + } + + return OK; +} + +Error GLTFDocument::_reparent_non_joint_skeleton_subtrees(Ref<GLTFState> state, Ref<GLTFSkeleton> skeleton, const Vector<GLTFNodeIndex> &non_joints) { + DisjointSet<GLTFNodeIndex> subtree_set; + + // Populate the disjoint set with ONLY non joints that are in the skeleton hierarchy (non_joints vector) + // This way we can find any joints that lie in between joints, as the current glTF specification + // mentions nothing about non-joints being in between joints of the same skin. Hopefully one day we + // can remove this code. + + // skinD depicted here explains this issue: + // https://github.com/KhronosGroup/glTF-Asset-Generator/blob/master/Output/Positive/Animation_Skin + + for (int i = 0; i < non_joints.size(); ++i) { + const GLTFNodeIndex node_i = non_joints[i]; + + subtree_set.insert(node_i); + + const GLTFNodeIndex parent_i = state->nodes[node_i]->parent; + if (parent_i >= 0 && non_joints.find(parent_i) >= 0 && !state->nodes[parent_i]->joint) { + subtree_set.create_union(parent_i, node_i); + } + } + + // Find all the non joint subtrees and re-parent them to a new "fake" joint + + Vector<GLTFNodeIndex> non_joint_subtree_roots; + subtree_set.get_representatives(non_joint_subtree_roots); + + for (int root_i = 0; root_i < non_joint_subtree_roots.size(); ++root_i) { + const GLTFNodeIndex subtree_root = non_joint_subtree_roots[root_i]; + + Vector<GLTFNodeIndex> subtree_nodes; + subtree_set.get_members(subtree_nodes, subtree_root); + + for (int subtree_i = 0; subtree_i < subtree_nodes.size(); ++subtree_i) { + ERR_FAIL_COND_V(_reparent_to_fake_joint(state, skeleton, subtree_nodes[subtree_i]), FAILED); + + // We modified the tree, recompute all the heights + _compute_node_heights(state); + } + } + + return OK; +} + +Error GLTFDocument::_reparent_to_fake_joint(Ref<GLTFState> state, Ref<GLTFSkeleton> skeleton, const GLTFNodeIndex node_index) { + Ref<GLTFNode> node = state->nodes[node_index]; + + // Can we just "steal" this joint if it is just a spatial node? + if (node->skin < 0 && node->mesh < 0 && node->camera < 0) { + node->joint = true; + // Add the joint to the skeletons joints + skeleton->joints.push_back(node_index); + return OK; + } + + GLTFNode *fake_joint = memnew(GLTFNode); + const GLTFNodeIndex fake_joint_index = state->nodes.size(); + state->nodes.push_back(fake_joint); + + // We better not be a joint, or we messed up in our logic + if (node->joint) + return FAILED; + + fake_joint->translation = node->translation; + fake_joint->rotation = node->rotation; + fake_joint->scale = node->scale; + fake_joint->xform = node->xform; + fake_joint->joint = true; + + // We can use the exact same name here, because the joint will be inside a skeleton and not the scene + fake_joint->set_name(node->get_name()); + + // Clear the nodes transforms, since it will be parented to the fake joint + node->translation = Vector3(0, 0, 0); + node->rotation = Quat(); + node->scale = Vector3(1, 1, 1); + node->xform = Transform(); + + // Transfer the node children to the fake joint + for (int child_i = 0; child_i < node->children.size(); ++child_i) { + Ref<GLTFNode> child = state->nodes[node->children[child_i]]; + child->parent = fake_joint_index; + } + + fake_joint->children = node->children; + node->children.clear(); + + // add the fake joint to the parent and remove the original joint + if (node->parent >= 0) { + Ref<GLTFNode> parent = state->nodes[node->parent]; + parent->children.erase(node_index); + parent->children.push_back(fake_joint_index); + fake_joint->parent = node->parent; + } + + // Add the node to the fake joint + fake_joint->children.push_back(node_index); + node->parent = fake_joint_index; + node->fake_joint_parent = fake_joint_index; + + // Add the fake joint to the skeletons joints + skeleton->joints.push_back(fake_joint_index); + + // Replace skin_skeletons with fake joints if we must. + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + Ref<GLTFSkin> skin = state->skins.write[skin_i]; + if (skin->skin_root == node_index) { + skin->skin_root = fake_joint_index; + } + } + + return OK; +} + +Error GLTFDocument::_determine_skeleton_roots(Ref<GLTFState> state, const GLTFSkeletonIndex skel_i) { + DisjointSet<GLTFNodeIndex> disjoint_set; + + for (GLTFNodeIndex i = 0; i < state->nodes.size(); ++i) { + const Ref<GLTFNode> node = state->nodes[i]; + + if (node->skeleton != skel_i) { + continue; + } + + disjoint_set.insert(i); + + if (node->parent >= 0 && state->nodes[node->parent]->skeleton == skel_i) { + disjoint_set.create_union(node->parent, i); + } + } + + Ref<GLTFSkeleton> skeleton = state->skeletons.write[skel_i]; + + Vector<GLTFNodeIndex> owners; + disjoint_set.get_representatives(owners); + + Vector<GLTFNodeIndex> roots; + + for (int i = 0; i < owners.size(); ++i) { + Vector<GLTFNodeIndex> set; + disjoint_set.get_members(set, owners[i]); + const GLTFNodeIndex root = _find_highest_node(state, set); + ERR_FAIL_COND_V(root < 0, FAILED); + roots.push_back(root); + } + + roots.sort(); + + skeleton->roots = roots; + + if (roots.size() == 0) { + return FAILED; + } else if (roots.size() == 1) { + return OK; + } + + // Check that the subtrees have the same parent root + const GLTFNodeIndex parent = state->nodes[roots[0]]->parent; + for (int i = 1; i < roots.size(); ++i) { + if (state->nodes[roots[i]]->parent != parent) { + return FAILED; + } + } + + return OK; +} + +Error GLTFDocument::_create_skeletons(Ref<GLTFState> state) { + for (GLTFSkeletonIndex skel_i = 0; skel_i < state->skeletons.size(); ++skel_i) { + Ref<GLTFSkeleton> gltf_skeleton = state->skeletons.write[skel_i]; + + Skeleton3D *skeleton = memnew(Skeleton3D); + gltf_skeleton->godot_skeleton = skeleton; + + // Make a unique name, no gltf node represents this skeleton + skeleton->set_name(_gen_unique_name(state, "Skeleton3D")); + + List<GLTFNodeIndex> bones; + + for (int i = 0; i < gltf_skeleton->roots.size(); ++i) { + bones.push_back(gltf_skeleton->roots[i]); + } + + // Make the skeleton creation deterministic by going through the roots in + // a sorted order, and DEPTH FIRST + bones.sort(); + + while (!bones.empty()) { + const GLTFNodeIndex node_i = bones.front()->get(); + bones.pop_front(); + + Ref<GLTFNode> node = state->nodes[node_i]; + ERR_FAIL_COND_V(node->skeleton != skel_i, FAILED); + + { // Add all child nodes to the stack (deterministically) + Vector<GLTFNodeIndex> child_nodes; + for (int i = 0; i < node->children.size(); ++i) { + const GLTFNodeIndex child_i = node->children[i]; + if (state->nodes[child_i]->skeleton == skel_i) { + child_nodes.push_back(child_i); + } + } + + // Depth first insertion + child_nodes.sort(); + for (int i = child_nodes.size() - 1; i >= 0; --i) { + bones.push_front(child_nodes[i]); + } + } + + const int bone_index = skeleton->get_bone_count(); + + if (node->get_name().empty()) { + node->set_name("bone"); + } + + node->set_name(_gen_unique_bone_name(state, skel_i, node->get_name())); + + skeleton->add_bone(node->get_name()); + skeleton->set_bone_rest(bone_index, node->xform); + + if (node->parent >= 0 && state->nodes[node->parent]->skeleton == skel_i) { + const int bone_parent = skeleton->find_bone(state->nodes[node->parent]->get_name()); + ERR_FAIL_COND_V(bone_parent < 0, FAILED); + skeleton->set_bone_parent(bone_index, skeleton->find_bone(state->nodes[node->parent]->get_name())); + } + + state->scene_nodes.insert(node_i, skeleton); + } + } + + ERR_FAIL_COND_V(_map_skin_joints_indices_to_skeleton_bone_indices(state), ERR_PARSE_ERROR); + + return OK; +} + +Error GLTFDocument::_map_skin_joints_indices_to_skeleton_bone_indices(Ref<GLTFState> state) { + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + Ref<GLTFSkin> skin = state->skins.write[skin_i]; + + Ref<GLTFSkeleton> skeleton = state->skeletons[skin->skeleton]; + + for (int joint_index = 0; joint_index < skin->joints_original.size(); ++joint_index) { + const GLTFNodeIndex node_i = skin->joints_original[joint_index]; + const Ref<GLTFNode> node = state->nodes[node_i]; + + const int bone_index = skeleton->godot_skeleton->find_bone(node->get_name()); + ERR_FAIL_COND_V(bone_index < 0, FAILED); + + skin->joint_i_to_bone_i.insert(joint_index, bone_index); + } + } + + return OK; +} + +Error GLTFDocument::_serialize_skins(Ref<GLTFState> state) { + _remove_duplicate_skins(state); + return OK; +} + +Error GLTFDocument::_create_skins(Ref<GLTFState> state) { + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + Ref<GLTFSkin> gltf_skin = state->skins.write[skin_i]; + + Ref<Skin> skin; + skin.instance(); + + // Some skins don't have IBM's! What absolute monsters! + const bool has_ibms = !gltf_skin->inverse_binds.empty(); + + for (int joint_i = 0; joint_i < gltf_skin->joints_original.size(); ++joint_i) { + GLTFNodeIndex node = gltf_skin->joints_original[joint_i]; + String bone_name = state->nodes[node]->get_name(); + + Transform xform; + if (has_ibms) { + xform = gltf_skin->inverse_binds[joint_i]; + } + + if (state->use_named_skin_binds) { + skin->add_named_bind(bone_name, xform); + } else { + int32_t bone_i = gltf_skin->joint_i_to_bone_i[joint_i]; + skin->add_bind(bone_i, xform); + } + } + + gltf_skin->godot_skin = skin; + } + + // Purge the duplicates! + _remove_duplicate_skins(state); + + // Create unique names now, after removing duplicates + for (GLTFSkinIndex skin_i = 0; skin_i < state->skins.size(); ++skin_i) { + Ref<Skin> skin = state->skins.write[skin_i]->godot_skin; + if (skin->get_name().empty()) { + // Make a unique name, no gltf node represents this skin + skin->set_name(_gen_unique_name(state, "Skin")); + } + } + + return OK; +} + +bool GLTFDocument::_skins_are_same(const Ref<Skin> skin_a, const Ref<Skin> skin_b) { + if (skin_a->get_bind_count() != skin_b->get_bind_count()) { + return false; + } + + for (int i = 0; i < skin_a->get_bind_count(); ++i) { + if (skin_a->get_bind_bone(i) != skin_b->get_bind_bone(i)) { + return false; + } + + Transform a_xform = skin_a->get_bind_pose(i); + Transform b_xform = skin_b->get_bind_pose(i); + + if (a_xform != b_xform) { + return false; + } + } + + return true; +} + +void GLTFDocument::_remove_duplicate_skins(Ref<GLTFState> state) { + for (int i = 0; i < state->skins.size(); ++i) { + for (int j = i + 1; j < state->skins.size(); ++j) { + const Ref<Skin> skin_i = state->skins[i]->godot_skin; + const Ref<Skin> skin_j = state->skins[j]->godot_skin; + + if (_skins_are_same(skin_i, skin_j)) { + // replace it and delete the old + state->skins.write[j]->godot_skin = skin_i; + } + } + } +} + +Error GLTFDocument::_serialize_lights(Ref<GLTFState> state) { + Array lights; + for (GLTFLightIndex i = 0; i < state->lights.size(); i++) { + Dictionary d; + Ref<GLTFLight> light = state->lights[i]; + Array color; + color.resize(3); + color[0] = light->color.r; + color[1] = light->color.g; + color[2] = light->color.b; + d["color"] = color; + d["type"] = light->type; + if (light->type == "spot") { + Dictionary s; + float inner_cone_angle = light->inner_cone_angle; + s["innerConeAngle"] = inner_cone_angle; + float outer_cone_angle = light->outer_cone_angle; + s["outerConeAngle"] = outer_cone_angle; + d["spot"] = s; + } + float intensity = light->intensity; + d["intensity"] = intensity; + float range = light->range; + d["range"] = range; + lights.push_back(d); + } + + if (!state->lights.size()) { + return OK; + } + + Dictionary extensions; + if (state->json.has("extensions")) { + extensions = state->json["extensions"]; + } else { + state->json["extensions"] = extensions; + } + Dictionary lights_punctual; + extensions["KHR_lights_punctual"] = lights_punctual; + lights_punctual["lights"] = lights; + + print_verbose("glTF: Total lights: " + itos(state->lights.size())); + + return OK; +} + +Error GLTFDocument::_serialize_cameras(Ref<GLTFState> state) { + Array cameras; + cameras.resize(state->cameras.size()); + for (GLTFCameraIndex i = 0; i < state->cameras.size(); i++) { + Dictionary d; + + Ref<GLTFCamera> camera = state->cameras[i]; + + if (camera->get_perspective() == false) { + Dictionary og; + og["ymag"] = Math::deg2rad(camera->get_fov_size()); + og["xmag"] = Math::deg2rad(camera->get_fov_size()); + og["zfar"] = camera->get_zfar(); + og["znear"] = camera->get_znear(); + d["orthographic"] = og; + d["type"] = "orthographic"; + } else if (camera->get_perspective()) { + Dictionary ppt; + // GLTF spec is in radians, Godot's camera is in degrees. + ppt["yfov"] = Math::deg2rad(camera->get_fov_size()); + ppt["zfar"] = camera->get_zfar(); + ppt["znear"] = camera->get_znear(); + d["perspective"] = ppt; + d["type"] = "perspective"; + } + cameras[i] = d; + } + + if (!state->cameras.size()) { + return OK; + } + + state->json["cameras"] = cameras; + + print_verbose("glTF: Total cameras: " + itos(state->cameras.size())); + + return OK; +} + +Error GLTFDocument::_parse_lights(Ref<GLTFState> state) { + if (!state->json.has("extensions")) { + return OK; + } + Dictionary extensions = state->json["extensions"]; + if (!extensions.has("KHR_lights_punctual")) { + return OK; + } + Dictionary lights_punctual = extensions["KHR_lights_punctual"]; + if (!lights_punctual.has("lights")) { + return OK; + } + + const Array &lights = lights_punctual["lights"]; + + for (GLTFLightIndex light_i = 0; light_i < lights.size(); light_i++) { + const Dictionary &d = lights[light_i]; + + Ref<GLTFLight> light; + light.instance(); + ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); + const String &type = d["type"]; + light->type = type; + + if (d.has("color")) { + const Array &arr = d["color"]; + ERR_FAIL_COND_V(arr.size() != 3, ERR_PARSE_ERROR); + const Color c = Color(arr[0], arr[1], arr[2]).to_srgb(); + light->color = c; + } + if (d.has("intensity")) { + light->intensity = d["intensity"]; + } + if (d.has("range")) { + light->range = d["range"]; + } + if (type == "spot") { + const Dictionary &spot = d["spot"]; + light->inner_cone_angle = spot["innerConeAngle"]; + light->outer_cone_angle = spot["outerConeAngle"]; + ERR_FAIL_COND_V_MSG(light->inner_cone_angle >= light->outer_cone_angle, ERR_PARSE_ERROR, "The inner angle must be smaller than the outer angle."); + } else if (type != "point" && type != "directional") { + ERR_FAIL_V_MSG(ERR_PARSE_ERROR, "Light type is unknown."); + } + + state->lights.push_back(light); + } + + print_verbose("glTF: Total lights: " + itos(state->lights.size())); + + return OK; +} + +Error GLTFDocument::_parse_cameras(Ref<GLTFState> state) { + if (!state->json.has("cameras")) + return OK; + + const Array cameras = state->json["cameras"]; + + for (GLTFCameraIndex i = 0; i < cameras.size(); i++) { + const Dictionary &d = cameras[i]; + + Ref<GLTFCamera> camera; + camera.instance(); + ERR_FAIL_COND_V(!d.has("type"), ERR_PARSE_ERROR); + const String &type = d["type"]; + if (type == "orthographic") { + camera->set_perspective(false); + if (d.has("orthographic")) { + const Dictionary &og = d["orthographic"]; + // GLTF spec is in radians, Godot's camera is in degrees. + camera->set_fov_size(Math::rad2deg(real_t(og["ymag"]))); + camera->set_zfar(og["zfar"]); + camera->set_znear(og["znear"]); + } else { + camera->set_fov_size(10); + } + } else if (type == "perspective") { + camera->set_perspective(true); + if (d.has("perspective")) { + const Dictionary &ppt = d["perspective"]; + // GLTF spec is in radians, Godot's camera is in degrees. + camera->set_fov_size(Math::rad2deg(real_t(ppt["yfov"]))); + camera->set_zfar(ppt["zfar"]); + camera->set_znear(ppt["znear"]); + } else { + camera->set_fov_size(10); + } + } else { + ERR_FAIL_V_MSG(ERR_PARSE_ERROR, "Camera3D should be in 'orthographic' or 'perspective'"); + } + + state->cameras.push_back(camera); + } + + print_verbose("glTF: Total cameras: " + itos(state->cameras.size())); + + return OK; +} + +String GLTFDocument::interpolation_to_string(const GLTFAnimation::Interpolation p_interp) { + String interp = "LINEAR"; + if (p_interp == GLTFAnimation::INTERP_STEP) { + interp = "STEP"; + } else if (p_interp == GLTFAnimation::INTERP_LINEAR) { + interp = "LINEAR"; + } else if (p_interp == GLTFAnimation::INTERP_CATMULLROMSPLINE) { + interp = "CATMULLROMSPLINE"; + } else if (p_interp == GLTFAnimation::INTERP_CUBIC_SPLINE) { + interp = "CUBICSPLINE"; + } + + return interp; +} + +Error GLTFDocument::_serialize_animations(Ref<GLTFState> state) { + if (!state->animation_players.size()) { + return OK; + } + for (int32_t player_i = 0; player_i < state->animation_players.size(); player_i++) { + List<StringName> animation_names; + AnimationPlayer *animation_player = state->animation_players[player_i]; + animation_player->get_animation_list(&animation_names); + if (animation_names.size()) { + for (int animation_name_i = 0; animation_name_i < animation_names.size(); animation_name_i++) { + _convert_animation(state, animation_player, animation_names[animation_name_i]); + } + } + } + Array animations; + for (GLTFAnimationIndex animation_i = 0; animation_i < state->animations.size(); animation_i++) { + Dictionary d; + Ref<GLTFAnimation> gltf_animation = state->animations[animation_i]; + if (!gltf_animation->get_tracks().size()) { + continue; + } + + if (!gltf_animation->get_name().empty()) { + d["name"] = gltf_animation->get_name(); + } + Array channels; + Array samplers; + + for (Map<int, GLTFAnimation::Track>::Element *track_i = gltf_animation->get_tracks().front(); track_i; track_i = track_i->next()) { + GLTFAnimation::Track track = track_i->get(); + if (track.translation_track.times.size()) { + Dictionary t; + t["sampler"] = samplers.size(); + Dictionary s; + + s["interpolation"] = interpolation_to_string(track.translation_track.interpolation); + Vector<real_t> times = Variant(track.translation_track.times); + s["input"] = _encode_accessor_as_floats(state, times, false); + Vector<Vector3> values = Variant(track.translation_track.values); + s["output"] = _encode_accessor_as_vec3(state, values, false); + + samplers.push_back(s); + + Dictionary target; + target["path"] = "translation"; + target["node"] = track_i->key(); + + t["target"] = target; + channels.push_back(t); + } + if (track.rotation_track.times.size()) { + Dictionary t; + t["sampler"] = samplers.size(); + Dictionary s; + + s["interpolation"] = interpolation_to_string(track.rotation_track.interpolation); + Vector<real_t> times = Variant(track.rotation_track.times); + s["input"] = _encode_accessor_as_floats(state, times, false); + Vector<Quat> values = track.rotation_track.values; + s["output"] = _encode_accessor_as_quats(state, values, false); + + samplers.push_back(s); + + Dictionary target; + target["path"] = "rotation"; + target["node"] = track_i->key(); + + t["target"] = target; + channels.push_back(t); + } + if (track.scale_track.times.size()) { + Dictionary t; + t["sampler"] = samplers.size(); + Dictionary s; + + s["interpolation"] = interpolation_to_string(track.scale_track.interpolation); + Vector<real_t> times = Variant(track.scale_track.times); + s["input"] = _encode_accessor_as_floats(state, times, false); + Vector<Vector3> values = Variant(track.scale_track.values); + s["output"] = _encode_accessor_as_vec3(state, values, false); + + samplers.push_back(s); + + Dictionary target; + target["path"] = "scale"; + target["node"] = track_i->key(); + + t["target"] = target; + channels.push_back(t); + } + if (track.weight_tracks.size()) { + Dictionary t; + t["sampler"] = samplers.size(); + Dictionary s; + + Vector<real_t> times; + Vector<real_t> values; + + for (int32_t times_i = 0; times_i < track.weight_tracks[0].times.size(); times_i++) { + real_t time = track.weight_tracks[0].times[times_i]; + times.push_back(time); + } + + values.resize(times.size() * track.weight_tracks.size()); + // TODO Sort by order in blend shapes + for (int k = 0; k < track.weight_tracks.size(); k++) { + Vector<float> wdata = track.weight_tracks[k].values; + for (int l = 0; l < wdata.size(); l++) { + values.write[l * track.weight_tracks.size() + k] = wdata.write[l]; + } + } + + s["interpolation"] = interpolation_to_string(track.weight_tracks[track.weight_tracks.size() - 1].interpolation); + s["input"] = _encode_accessor_as_floats(state, times, false); + s["output"] = _encode_accessor_as_floats(state, values, false); + + samplers.push_back(s); + + Dictionary target; + target["path"] = "weights"; + target["node"] = track_i->key(); + + t["target"] = target; + channels.push_back(t); + } + } + if (channels.size() && samplers.size()) { + d["channels"] = channels; + d["samplers"] = samplers; + animations.push_back(d); + } + } + + state->json["animations"] = animations; + + print_verbose("glTF: Total animations '" + itos(state->animations.size()) + "'."); + + return OK; +} + +Error GLTFDocument::_parse_animations(Ref<GLTFState> state) { + if (!state->json.has("animations")) + return OK; + + const Array &animations = state->json["animations"]; + + for (GLTFAnimationIndex i = 0; i < animations.size(); i++) { + const Dictionary &d = animations[i]; + + Ref<GLTFAnimation> animation; + animation.instance(); + + if (!d.has("channels") || !d.has("samplers")) + continue; + + Array channels = d["channels"]; + Array samplers = d["samplers"]; + + if (d.has("name")) { + const String name = d["name"]; + if (name.begins_with("loop") || name.ends_with("loop") || name.begins_with("cycle") || name.ends_with("cycle")) { + animation->set_loop(true); + } + animation->set_name(_sanitize_scene_name(name)); + } + + for (int j = 0; j < channels.size(); j++) { + const Dictionary &c = channels[j]; + if (!c.has("target")) + continue; + + const Dictionary &t = c["target"]; + if (!t.has("node") || !t.has("path")) { + continue; + } + + ERR_FAIL_COND_V(!c.has("sampler"), ERR_PARSE_ERROR); + const int sampler = c["sampler"]; + ERR_FAIL_INDEX_V(sampler, samplers.size(), ERR_PARSE_ERROR); + + GLTFNodeIndex node = t["node"]; + String path = t["path"]; + + ERR_FAIL_INDEX_V(node, state->nodes.size(), ERR_PARSE_ERROR); + + GLTFAnimation::Track *track = nullptr; + + if (!animation->get_tracks().has(node)) { + animation->get_tracks()[node] = GLTFAnimation::Track(); + } + + track = &animation->get_tracks()[node]; + + const Dictionary &s = samplers[sampler]; + + ERR_FAIL_COND_V(!s.has("input"), ERR_PARSE_ERROR); + ERR_FAIL_COND_V(!s.has("output"), ERR_PARSE_ERROR); + + const int input = s["input"]; + const int output = s["output"]; + + GLTFAnimation::Interpolation interp = GLTFAnimation::INTERP_LINEAR; + int output_count = 1; + if (s.has("interpolation")) { + const String &in = s["interpolation"]; + if (in == "STEP") { + interp = GLTFAnimation::INTERP_STEP; + } else if (in == "LINEAR") { + interp = GLTFAnimation::INTERP_LINEAR; + } else if (in == "CATMULLROMSPLINE") { + interp = GLTFAnimation::INTERP_CATMULLROMSPLINE; + output_count = 3; + } else if (in == "CUBICSPLINE") { + interp = GLTFAnimation::INTERP_CUBIC_SPLINE; + output_count = 3; + } + } + + const Vector<float> times = _decode_accessor_as_floats(state, input, false); + if (path == "translation") { + const Vector<Vector3> translations = _decode_accessor_as_vec3(state, output, false); + track->translation_track.interpolation = interp; + track->translation_track.times = Variant(times); //convert via variant + track->translation_track.values = Variant(translations); //convert via variant + } else if (path == "rotation") { + const Vector<Quat> rotations = _decode_accessor_as_quat(state, output, false); + track->rotation_track.interpolation = interp; + track->rotation_track.times = Variant(times); //convert via variant + track->rotation_track.values = rotations; + } else if (path == "scale") { + const Vector<Vector3> scales = _decode_accessor_as_vec3(state, output, false); + track->scale_track.interpolation = interp; + track->scale_track.times = Variant(times); //convert via variant + track->scale_track.values = Variant(scales); //convert via variant + } else if (path == "weights") { + const Vector<float> weights = _decode_accessor_as_floats(state, output, false); + + ERR_FAIL_INDEX_V(state->nodes[node]->mesh, state->meshes.size(), ERR_PARSE_ERROR); + Ref<GLTFMesh> mesh = state->meshes[state->nodes[node]->mesh]; + ERR_CONTINUE(!mesh->get_blend_weights().size()); + const int wc = mesh->get_blend_weights().size(); + + track->weight_tracks.resize(wc); + + const int expected_value_count = times.size() * output_count * wc; + ERR_FAIL_COND_V_MSG(weights.size() != expected_value_count, ERR_PARSE_ERROR, "Invalid weight data, expected " + itos(expected_value_count) + " weight values, got " + itos(weights.size()) + " instead."); + + const int wlen = weights.size() / wc; + for (int k = 0; k < wc; k++) { //separate tracks, having them together is not such a good idea + GLTFAnimation::Channel<float> cf; + cf.interpolation = interp; + cf.times = Variant(times); + Vector<float> wdata; + wdata.resize(wlen); + for (int l = 0; l < wlen; l++) { + wdata.write[l] = weights[l * wc + k]; + } + + cf.values = wdata; + track->weight_tracks.write[k] = cf; + } + } else { + WARN_PRINT("Invalid path '" + path + "'."); + } + } + + state->animations.push_back(animation); + } + + print_verbose("glTF: Total animations '" + itos(state->animations.size()) + "'."); + + return OK; +} + +void GLTFDocument::_assign_scene_names(Ref<GLTFState> state) { + for (int i = 0; i < state->nodes.size(); i++) { + Ref<GLTFNode> n = state->nodes[i]; + + // Any joints get unique names generated when the skeleton is made, unique to the skeleton + if (n->skeleton >= 0) + continue; + + if (n->get_name().empty()) { + if (n->mesh >= 0) { + n->set_name(_gen_unique_name(state, "Mesh")); + } else if (n->camera >= 0) { + n->set_name(_gen_unique_name(state, "Camera3D")); + } else { + n->set_name(_gen_unique_name(state, "Node")); + } + } + + n->set_name(_gen_unique_name(state, n->get_name())); + } +} + +BoneAttachment3D *GLTFDocument::_generate_bone_attachment(Ref<GLTFState> state, Skeleton3D *skeleton, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + Ref<GLTFNode> bone_node = state->nodes[gltf_node->parent]; + + BoneAttachment3D *bone_attachment = memnew(BoneAttachment3D); + print_verbose("glTF: Creating bone attachment for: " + gltf_node->get_name()); + + ERR_FAIL_COND_V(!bone_node->joint, nullptr); + + bone_attachment->set_bone_name(bone_node->get_name()); + + return bone_attachment; +} + +GLTFMeshIndex GLTFDocument::_convert_mesh_instance(Ref<GLTFState> state, MeshInstance3D *p_mesh_instance) { + ERR_FAIL_NULL_V(p_mesh_instance, -1); + if (p_mesh_instance->get_mesh().is_null()) { + return -1; + } + Ref<EditorSceneImporterMesh> import_mesh; + import_mesh.instance(); + Ref<Mesh> godot_mesh = p_mesh_instance->get_mesh(); + if (godot_mesh.is_null()) { + return -1; + } + Vector<float> blend_weights; + Vector<String> blend_names; + int32_t blend_count = godot_mesh->get_blend_shape_count(); + blend_names.resize(blend_count); + blend_weights.resize(blend_count); + for (int32_t blend_i = 0; blend_i < godot_mesh->get_blend_shape_count(); blend_i++) { + String blend_name = godot_mesh->get_blend_shape_name(blend_i); + blend_names.write[blend_i] = blend_name; + import_mesh->add_blend_shape(blend_name); + } + for (int32_t surface_i = 0; surface_i < godot_mesh->get_surface_count(); surface_i++) { + Mesh::PrimitiveType primitive_type = godot_mesh->surface_get_primitive_type(surface_i); + Array arrays = godot_mesh->surface_get_arrays(surface_i); + Array blend_shape_arrays = godot_mesh->surface_get_blend_shape_arrays(surface_i); + Ref<Material> mat = godot_mesh->surface_get_material(surface_i); + Ref<ArrayMesh> godot_array_mesh = godot_mesh; + String surface_name; + if (godot_array_mesh.is_valid()) { + surface_name = godot_array_mesh->surface_get_name(surface_i); + } + if (p_mesh_instance->get_surface_material(surface_i).is_valid()) { + mat = p_mesh_instance->get_surface_material(surface_i); + } + if (p_mesh_instance->get_material_override().is_valid()) { + mat = p_mesh_instance->get_material_override(); + } + import_mesh->add_surface(primitive_type, arrays, blend_shape_arrays, Dictionary(), mat, surface_name); + } + for (int32_t blend_i = 0; blend_i < blend_count; blend_i++) { + blend_weights.write[blend_i] = 0.0f; + } + Ref<GLTFMesh> gltf_mesh; + gltf_mesh.instance(); + gltf_mesh->set_mesh(import_mesh); + gltf_mesh->set_blend_weights(blend_weights); + GLTFMeshIndex mesh_i = state->meshes.size(); + state->meshes.push_back(gltf_mesh); + return mesh_i; +} + +EditorSceneImporterMeshNode3D *GLTFDocument::_generate_mesh_instance(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + + ERR_FAIL_INDEX_V(gltf_node->mesh, state->meshes.size(), nullptr); + + EditorSceneImporterMeshNode3D *mi = memnew(EditorSceneImporterMeshNode3D); + print_verbose("glTF: Creating mesh for: " + gltf_node->get_name()); + + Ref<GLTFMesh> mesh = state->meshes.write[gltf_node->mesh]; + if (mesh.is_null()) { + return mi; + } + Ref<EditorSceneImporterMesh> import_mesh = mesh->get_mesh(); + if (import_mesh.is_null()) { + return mi; + } + mi->set_mesh(import_mesh); + for (int i = 0; i < mesh->get_blend_weights().size(); i++) { + mi->set("blend_shapes/" + mesh->get_mesh()->get_blend_shape_name(i), mesh->get_blend_weights()[i]); + } + return mi; +} + +Light3D *GLTFDocument::_generate_light(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + + ERR_FAIL_INDEX_V(gltf_node->light, state->lights.size(), nullptr); + + print_verbose("glTF: Creating light for: " + gltf_node->get_name()); + + Ref<GLTFLight> l = state->lights[gltf_node->light]; + + float intensity = l->intensity; + if (intensity > 10) { + // GLTF spec has the default around 1, but Blender defaults lights to 100. + // The only sane way to handle this is to check where it came from and + // handle it accordingly. If it's over 10, it probably came from Blender. + intensity /= 100; + } + + if (l->type == "directional") { + DirectionalLight3D *light = memnew(DirectionalLight3D); + light->set_param(Light3D::PARAM_ENERGY, intensity); + light->set_color(l->color); + return light; + } + + const float range = CLAMP(l->range, 0, 4096); + // Doubling the range will double the effective brightness, so we need double attenuation (half brightness). + // We want to have double intensity give double brightness, so we need half the attenuation. + const float attenuation = range / intensity; + if (l->type == "point") { + OmniLight3D *light = memnew(OmniLight3D); + light->set_param(OmniLight3D::PARAM_ATTENUATION, attenuation); + light->set_param(OmniLight3D::PARAM_RANGE, range); + light->set_color(l->color); + return light; + } + if (l->type == "spot") { + SpotLight3D *light = memnew(SpotLight3D); + light->set_param(SpotLight3D::PARAM_ATTENUATION, attenuation); + light->set_param(SpotLight3D::PARAM_RANGE, range); + light->set_param(SpotLight3D::PARAM_SPOT_ANGLE, Math::rad2deg(l->outer_cone_angle)); + light->set_color(l->color); + + // Line of best fit derived from guessing, see https://www.desmos.com/calculator/biiflubp8b + // The points in desmos are not exact, except for (1, infinity). + float angle_ratio = l->inner_cone_angle / l->outer_cone_angle; + float angle_attenuation = 0.2 / (1 - angle_ratio) - 0.1; + light->set_param(SpotLight3D::PARAM_SPOT_ATTENUATION, angle_attenuation); + return light; + } + return nullptr; +} + +Camera3D *GLTFDocument::_generate_camera(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + + ERR_FAIL_INDEX_V(gltf_node->camera, state->cameras.size(), nullptr); + + Camera3D *camera = memnew(Camera3D); + print_verbose("glTF: Creating camera for: " + gltf_node->get_name()); + + Ref<GLTFCamera> c = state->cameras[gltf_node->camera]; + if (c->get_perspective()) { + camera->set_perspective(c->get_fov_size(), c->get_znear(), c->get_zfar()); + } else { + camera->set_orthogonal(c->get_fov_size(), c->get_znear(), c->get_zfar()); + } + + return camera; +} + +GLTFCameraIndex GLTFDocument::_convert_camera(Ref<GLTFState> state, Camera3D *p_camera) { + print_verbose("glTF: Converting camera: " + p_camera->get_name()); + + Ref<GLTFCamera> c; + c.instance(); + + if (p_camera->get_projection() == Camera3D::Projection::PROJECTION_PERSPECTIVE) { + c->set_perspective(true); + c->set_fov_size(p_camera->get_fov()); + c->set_zfar(p_camera->get_zfar()); + c->set_znear(p_camera->get_znear()); + } else { + c->set_fov_size(p_camera->get_fov()); + c->set_zfar(p_camera->get_zfar()); + c->set_znear(p_camera->get_znear()); + } + GLTFCameraIndex camera_index = state->cameras.size(); + state->cameras.push_back(c); + return camera_index; +} + +GLTFLightIndex GLTFDocument::_convert_light(Ref<GLTFState> state, Light3D *p_light) { + print_verbose("glTF: Converting light: " + p_light->get_name()); + + Ref<GLTFLight> l; + l.instance(); + l->color = p_light->get_color(); + if (cast_to<DirectionalLight3D>(p_light)) { + l->type = "directional"; + DirectionalLight3D *light = cast_to<DirectionalLight3D>(p_light); + l->intensity = light->get_param(DirectionalLight3D::PARAM_ENERGY); + l->range = FLT_MAX; // Range for directional lights is infinite in Godot. + } else if (cast_to<OmniLight3D>(p_light)) { + l->type = "point"; + OmniLight3D *light = cast_to<OmniLight3D>(p_light); + l->range = light->get_param(OmniLight3D::PARAM_RANGE); + float attenuation = p_light->get_param(OmniLight3D::PARAM_ATTENUATION); + l->intensity = l->range / attenuation; + } else if (cast_to<SpotLight3D>(p_light)) { + l->type = "spot"; + SpotLight3D *light = cast_to<SpotLight3D>(p_light); + l->range = light->get_param(SpotLight3D::PARAM_RANGE); + float attenuation = light->get_param(SpotLight3D::PARAM_ATTENUATION); + l->intensity = l->range / attenuation; + l->outer_cone_angle = Math::deg2rad(light->get_param(SpotLight3D::PARAM_SPOT_ANGLE)); + + // This equation is the inverse of the import equation (which has a desmos link). + float angle_ratio = 1 - (0.2 / (0.1 + light->get_param(SpotLight3D::PARAM_SPOT_ATTENUATION))); + angle_ratio = MAX(0, angle_ratio); + l->inner_cone_angle = l->outer_cone_angle * angle_ratio; + } + + GLTFLightIndex light_index = state->lights.size(); + state->lights.push_back(l); + return light_index; +} + +GLTFSkeletonIndex GLTFDocument::_convert_skeleton(Ref<GLTFState> state, Skeleton3D *p_skeleton) { + print_verbose("glTF: Converting skeleton: " + p_skeleton->get_name()); + Ref<GLTFSkeleton> gltf_skeleton; + gltf_skeleton.instance(); + gltf_skeleton->set_name(_gen_unique_name(state, p_skeleton->get_name())); + gltf_skeleton->godot_skeleton = p_skeleton; + GLTFSkeletonIndex skeleton_i = state->skeletons.size(); + state->skeletons.push_back(gltf_skeleton); + return skeleton_i; +} + +void GLTFDocument::_convert_spatial(Ref<GLTFState> state, Node3D *p_spatial, Ref<GLTFNode> p_node) { + Transform xform = p_spatial->get_transform(); + p_node->scale = xform.basis.get_scale(); + p_node->rotation = xform.basis.get_rotation_quat(); + p_node->translation = xform.origin; +} + +Node3D *GLTFDocument::_generate_spatial(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + + Node3D *spatial = memnew(Node3D); + print_verbose("glTF: Converting spatial: " + gltf_node->get_name()); + + return spatial; +} +void GLTFDocument::_convert_scene_node(Ref<GLTFState> state, Node *p_current, Node *p_root, const GLTFNodeIndex p_gltf_parent, const GLTFNodeIndex p_gltf_root) { + bool retflag = true; + Node3D *spatial = cast_to<Node3D>(p_current); + _check_visibility(p_current, retflag); + if (retflag) { + return; + } + Ref<GLTFNode> gltf_node; + gltf_node.instance(); + gltf_node->set_name(_gen_unique_name(state, p_current->get_name())); + if (cast_to<Node3D>(p_current)) { + _convert_spatial(state, spatial, gltf_node); + } + if (cast_to<MeshInstance3D>(p_current)) { + _convert_mesh_to_gltf(p_current, state, spatial, gltf_node); + } else if (cast_to<BoneAttachment3D>(p_current)) { + _convert_bone_attachment_to_gltf(p_current, state, gltf_node, retflag); + // TODO 2020-12-21 iFire Handle the case of objects under the bone attachment. + return; + } else if (cast_to<Skeleton3D>(p_current)) { + _convert_skeleton_to_gltf(p_current, state, p_gltf_parent, p_gltf_root, gltf_node, p_root); + // We ignore the Godot Engine node that is the skeleton. + return; + } else if (cast_to<MultiMeshInstance3D>(p_current)) { + _convert_mult_mesh_instance_to_gltf(p_current, p_gltf_parent, p_gltf_root, gltf_node, state, p_root); + } else if (cast_to<CSGShape3D>(p_current)) { + if (p_current->get_parent() && cast_to<CSGShape3D>(p_current)->is_root_shape()) { + _convert_csg_shape_to_gltf(p_current, p_gltf_parent, gltf_node, state); + } + } else if (cast_to<GridMap>(p_current)) { + _convert_grid_map_to_gltf(p_current, p_gltf_parent, p_gltf_root, gltf_node, state, p_root); + } else if (cast_to<Camera3D>(p_current)) { + Camera3D *camera = Object::cast_to<Camera3D>(p_current); + _convert_camera_to_gltf(camera, state, spatial, gltf_node); + } else if (cast_to<Light3D>(p_current)) { + Light3D *light = Object::cast_to<Light3D>(p_current); + _convert_light_to_gltf(light, state, spatial, gltf_node); + } else if (cast_to<AnimationPlayer>(p_current)) { + AnimationPlayer *animation_player = Object::cast_to<AnimationPlayer>(p_current); + _convert_animation_player_to_gltf(animation_player, state, p_gltf_parent, p_gltf_root, gltf_node, p_current, p_root); + } + GLTFNodeIndex current_node_i = state->nodes.size(); + GLTFNodeIndex gltf_root = p_gltf_root; + if (gltf_root == -1) { + gltf_root = current_node_i; + Array scenes; + scenes.push_back(gltf_root); + state->json["scene"] = scenes; + } + _create_gltf_node(state, p_current, current_node_i, p_gltf_parent, gltf_root, gltf_node); + for (int node_i = 0; node_i < p_current->get_child_count(); node_i++) { + _convert_scene_node(state, p_current->get_child(node_i), p_root, current_node_i, gltf_root); + } +} + +void GLTFDocument::_convert_csg_shape_to_gltf(Node *p_current, GLTFNodeIndex p_gltf_parent, Ref<GLTFNode> gltf_node, Ref<GLTFState> state) { + CSGShape3D *csg = Object::cast_to<CSGShape3D>(p_current); + csg->call("_update_shape"); + Array meshes = csg->get_meshes(); + if (meshes.size() != 2) { + return; + } + Ref<Material> mat; + if (csg->get_material_override().is_valid()) { + mat = csg->get_material_override(); + } + Ref<GLTFMesh> gltf_mesh; + gltf_mesh.instance(); + Ref<EditorSceneImporterMesh> import_mesh; + import_mesh.instance(); + Ref<ArrayMesh> array_mesh = csg->get_meshes()[1]; + for (int32_t surface_i = 0; surface_i < array_mesh->get_surface_count(); surface_i++) { + import_mesh->add_surface(Mesh::PrimitiveType::PRIMITIVE_TRIANGLES, array_mesh->surface_get_arrays(surface_i), Array(), Dictionary(), mat, array_mesh->surface_get_name(surface_i)); + } + gltf_mesh->set_mesh(import_mesh); + GLTFMeshIndex mesh_i = state->meshes.size(); + state->meshes.push_back(gltf_mesh); + gltf_node->mesh = mesh_i; + gltf_node->xform = csg->get_meshes()[0]; + gltf_node->set_name(_gen_unique_name(state, csg->get_name())); +} + +void GLTFDocument::_create_gltf_node(Ref<GLTFState> state, Node *p_scene_parent, GLTFNodeIndex current_node_i, + GLTFNodeIndex p_parent_node_index, GLTFNodeIndex p_root_gltf_node, Ref<GLTFNode> gltf_node) { + state->scene_nodes.insert(current_node_i, p_scene_parent); + state->nodes.push_back(gltf_node); + if (current_node_i == p_parent_node_index) { + return; + } + if (p_parent_node_index == -1) { + return; + } + state->nodes.write[p_parent_node_index]->children.push_back(current_node_i); +} + +void GLTFDocument::_convert_animation_player_to_gltf(AnimationPlayer *animation_player, Ref<GLTFState> state, const GLTFNodeIndex &p_gltf_current, const GLTFNodeIndex &p_gltf_root_index, Ref<GLTFNode> p_gltf_node, Node *p_scene_parent, Node *p_root) { + ERR_FAIL_COND(!animation_player); + state->animation_players.push_back(animation_player); + print_verbose(String("glTF: Converting animation player: ") + animation_player->get_name()); +} + +void GLTFDocument::_check_visibility(Node *p_node, bool &retflag) { + retflag = true; + Node3D *spatial = Object::cast_to<Node3D>(p_node); + Node2D *node_2d = Object::cast_to<Node2D>(p_node); + if (node_2d && !node_2d->is_visible()) { + return; + } + if (spatial && !spatial->is_visible()) { + return; + } + retflag = false; +} + +void GLTFDocument::_convert_camera_to_gltf(Camera3D *camera, Ref<GLTFState> state, Node3D *spatial, Ref<GLTFNode> gltf_node) { + ERR_FAIL_COND(!camera); + GLTFCameraIndex camera_index = _convert_camera(state, camera); + if (camera_index != -1) { + gltf_node->camera = camera_index; + } +} + +void GLTFDocument::_convert_light_to_gltf(Light3D *light, Ref<GLTFState> state, Node3D *spatial, Ref<GLTFNode> gltf_node) { + ERR_FAIL_COND(!light); + GLTFLightIndex light_index = _convert_light(state, light); + if (light_index != -1) { + gltf_node->light = light_index; + } +} + +void GLTFDocument::_convert_grid_map_to_gltf(Node *p_scene_parent, const GLTFNodeIndex &p_parent_node_index, const GLTFNodeIndex &p_root_node_index, Ref<GLTFNode> gltf_node, Ref<GLTFState> state, Node *p_root_node) { + GridMap *grid_map = Object::cast_to<GridMap>(p_scene_parent); + ERR_FAIL_COND(!grid_map); + Array cells = grid_map->get_used_cells(); + for (int32_t k = 0; k < cells.size(); k++) { + GLTFNode *new_gltf_node = memnew(GLTFNode); + gltf_node->children.push_back(state->nodes.size()); + state->nodes.push_back(new_gltf_node); + Vector3 cell_location = cells[k]; + int32_t cell = grid_map->get_cell_item( + Vector3(cell_location.x, cell_location.y, cell_location.z)); + EditorSceneImporterMeshNode3D *import_mesh_node = memnew(EditorSceneImporterMeshNode3D); + import_mesh_node->set_mesh(grid_map->get_mesh_library()->get_item_mesh(cell)); + Transform cell_xform; + cell_xform.basis.set_orthogonal_index( + grid_map->get_cell_item_orientation( + Vector3(cell_location.x, cell_location.y, cell_location.z))); + cell_xform.basis.scale(Vector3(grid_map->get_cell_scale(), + grid_map->get_cell_scale(), + grid_map->get_cell_scale())); + cell_xform.set_origin(grid_map->map_to_world( + Vector3(cell_location.x, cell_location.y, cell_location.z))); + Ref<GLTFMesh> gltf_mesh; + gltf_mesh.instance(); + gltf_mesh = import_mesh_node; + new_gltf_node->mesh = state->meshes.size(); + state->meshes.push_back(gltf_mesh); + new_gltf_node->xform = cell_xform * grid_map->get_transform(); + new_gltf_node->set_name(_gen_unique_name(state, grid_map->get_mesh_library()->get_item_name(cell))); + } +} + +void GLTFDocument::_convert_mult_mesh_instance_to_gltf(Node *p_scene_parent, const GLTFNodeIndex &p_parent_node_index, const GLTFNodeIndex &p_root_node_index, Ref<GLTFNode> gltf_node, Ref<GLTFState> state, Node *p_root_node) { + MultiMeshInstance3D *multi_mesh_instance = Object::cast_to<MultiMeshInstance3D>(p_scene_parent); + ERR_FAIL_COND(!multi_mesh_instance); + Ref<MultiMesh> multi_mesh = multi_mesh_instance->get_multimesh(); + if (multi_mesh.is_valid()) { + for (int32_t instance_i = 0; instance_i < multi_mesh->get_instance_count(); + instance_i++) { + GLTFNode *new_gltf_node = memnew(GLTFNode); + Transform transform; + if (multi_mesh->get_transform_format() == MultiMesh::TRANSFORM_2D) { + Transform2D xform_2d = multi_mesh->get_instance_transform_2d(instance_i); + transform.origin = + Vector3(xform_2d.get_origin().x, 0, xform_2d.get_origin().y); + real_t rotation = xform_2d.get_rotation(); + Quat quat; + quat.set_axis_angle(Vector3(0, 1, 0), rotation); + Size2 scale = xform_2d.get_scale(); + transform.basis.set_quat_scale(quat, + Vector3(scale.x, 0, scale.y)); + transform = + multi_mesh_instance->get_transform() * transform; + } else if (multi_mesh->get_transform_format() == MultiMesh::TRANSFORM_3D) { + transform = multi_mesh_instance->get_transform() * + multi_mesh->get_instance_transform(instance_i); + } + Ref<ArrayMesh> mm = multi_mesh->get_mesh(); + if (mm.is_valid()) { + Ref<EditorSceneImporterMesh> mesh; + mesh.instance(); + for (int32_t surface_i = 0; surface_i < mm->get_surface_count(); surface_i++) { + Array surface = mm->surface_get_arrays(surface_i); + mesh->add_surface(mm->surface_get_primitive_type(surface_i), surface, Array(), Dictionary(), + mm->surface_get_material(surface_i), mm->get_name()); + } + Ref<GLTFMesh> gltf_mesh; + gltf_mesh.instance(); + gltf_mesh->set_name(multi_mesh->get_name()); + gltf_mesh->set_mesh(mesh); + new_gltf_node->mesh = state->meshes.size(); + state->meshes.push_back(gltf_mesh); + } + new_gltf_node->xform = transform; + new_gltf_node->set_name(_gen_unique_name(state, multi_mesh_instance->get_name())); + gltf_node->children.push_back(state->nodes.size()); + state->nodes.push_back(new_gltf_node); + } + } +} + +void GLTFDocument::_convert_skeleton_to_gltf(Node *p_scene_parent, Ref<GLTFState> state, const GLTFNodeIndex &p_parent_node_index, const GLTFNodeIndex &p_root_node_index, Ref<GLTFNode> gltf_node, Node *p_root_node) { + Skeleton3D *skeleton = Object::cast_to<Skeleton3D>(p_scene_parent); + if (skeleton) { + // Remove placeholder skeleton3d node by not creating the gltf node + // Skins are per mesh + for (int node_i = 0; node_i < skeleton->get_child_count(); node_i++) { + _convert_scene_node(state, skeleton->get_child(node_i), p_root_node, p_parent_node_index, p_root_node_index); + } + } +} + +void GLTFDocument::_convert_bone_attachment_to_gltf(Node *p_scene_parent, Ref<GLTFState> state, Ref<GLTFNode> gltf_node, bool &retflag) { + retflag = true; + BoneAttachment3D *bone_attachment = Object::cast_to<BoneAttachment3D>(p_scene_parent); + if (bone_attachment) { + Node *node = bone_attachment->get_parent(); + while (node) { + Skeleton3D *bone_attachment_skeleton = Object::cast_to<Skeleton3D>(node); + if (bone_attachment_skeleton) { + for (GLTFSkeletonIndex skeleton_i = 0; skeleton_i < state->skeletons.size(); skeleton_i++) { + if (state->skeletons[skeleton_i]->godot_skeleton != bone_attachment_skeleton) { + continue; + } + state->skeletons.write[skeleton_i]->bone_attachments.push_back(bone_attachment); + break; + } + break; + } + node = node->get_parent(); + } + gltf_node.unref(); + return; + } + retflag = false; +} + +void GLTFDocument::_convert_mesh_to_gltf(Node *p_scene_parent, Ref<GLTFState> state, Node3D *spatial, Ref<GLTFNode> gltf_node) { + MeshInstance3D *mi = Object::cast_to<MeshInstance3D>(p_scene_parent); + if (mi) { + GLTFMeshIndex gltf_mesh_index = _convert_mesh_instance(state, mi); + if (gltf_mesh_index != -1) { + gltf_node->mesh = gltf_mesh_index; + } + } +} + +void GLTFDocument::_generate_scene_node(Ref<GLTFState> state, Node *scene_parent, Node3D *scene_root, const GLTFNodeIndex node_index) { + Ref<GLTFNode> gltf_node = state->nodes[node_index]; + + Node3D *current_node = nullptr; + + // Is our parent a skeleton + Skeleton3D *active_skeleton = Object::cast_to<Skeleton3D>(scene_parent); + + if (gltf_node->skeleton >= 0) { + Skeleton3D *skeleton = state->skeletons[gltf_node->skeleton]->godot_skeleton; + + if (active_skeleton != skeleton) { + ERR_FAIL_COND_MSG(active_skeleton != nullptr, "glTF: Generating scene detected direct parented Skeletons"); + + // Add it to the scene if it has not already been added + if (skeleton->get_parent() == nullptr) { + scene_parent->add_child(skeleton); + skeleton->set_owner(scene_root); + } + } + + active_skeleton = skeleton; + current_node = skeleton; + } + + // If we have an active skeleton, and the node is node skinned, we need to create a bone attachment + if (current_node == nullptr && active_skeleton != nullptr && gltf_node->skin < 0) { + BoneAttachment3D *bone_attachment = _generate_bone_attachment(state, active_skeleton, node_index); + + scene_parent->add_child(bone_attachment); + bone_attachment->set_owner(scene_root); + + // There is no gltf_node that represent this, so just directly create a unique name + bone_attachment->set_name(_gen_unique_name(state, "BoneAttachment3D")); + + // We change the scene_parent to our bone attachment now. We do not set current_node because we want to make the node + // and attach it to the bone_attachment + scene_parent = bone_attachment; + } + + // We still have not managed to make a node + if (current_node == nullptr) { + if (gltf_node->mesh >= 0) { + current_node = _generate_mesh_instance(state, scene_parent, node_index); + } else if (gltf_node->camera >= 0) { + current_node = _generate_camera(state, scene_parent, node_index); + } else if (gltf_node->light >= 0) { + current_node = _generate_light(state, scene_parent, node_index); + } + + if (!current_node) { + current_node = _generate_spatial(state, scene_parent, node_index); + } + + scene_parent->add_child(current_node); + if (current_node != scene_root) { + current_node->set_owner(scene_root); + } + current_node->set_transform(gltf_node->xform); + current_node->set_name(gltf_node->get_name()); + } + + state->scene_nodes.insert(node_index, current_node); + + for (int i = 0; i < gltf_node->children.size(); ++i) { + _generate_scene_node(state, current_node, scene_root, gltf_node->children[i]); + } +} + +template <class T> +struct EditorSceneImporterGLTFInterpolate { + T lerp(const T &a, const T &b, float c) const { + return a + (b - a) * c; + } + + T catmull_rom(const T &p0, const T &p1, const T &p2, const T &p3, float t) { + const float t2 = t * t; + const float t3 = t2 * t; + + return 0.5f * ((2.0f * p1) + (-p0 + p2) * t + (2.0f * p0 - 5.0f * p1 + 4.0f * p2 - p3) * t2 + (-p0 + 3.0f * p1 - 3.0f * p2 + p3) * t3); + } + + T bezier(T start, T control_1, T control_2, T end, float t) { + /* Formula from Wikipedia article on Bezier curves. */ + const real_t omt = (1.0 - t); + const real_t omt2 = omt * omt; + const real_t omt3 = omt2 * omt; + const real_t t2 = t * t; + const real_t t3 = t2 * t; + + return start * omt3 + control_1 * omt2 * t * 3.0 + control_2 * omt * t2 * 3.0 + end * t3; + } +}; + +// thank you for existing, partial specialization +template <> +struct EditorSceneImporterGLTFInterpolate<Quat> { + Quat lerp(const Quat &a, const Quat &b, const float c) const { + ERR_FAIL_COND_V_MSG(!a.is_normalized(), Quat(), "The quaternion \"a\" must be normalized."); + ERR_FAIL_COND_V_MSG(!b.is_normalized(), Quat(), "The quaternion \"b\" must be normalized."); + + return a.slerp(b, c).normalized(); + } + + Quat catmull_rom(const Quat &p0, const Quat &p1, const Quat &p2, const Quat &p3, const float c) { + ERR_FAIL_COND_V_MSG(!p1.is_normalized(), Quat(), "The quaternion \"p1\" must be normalized."); + ERR_FAIL_COND_V_MSG(!p2.is_normalized(), Quat(), "The quaternion \"p2\" must be normalized."); + + return p1.slerp(p2, c).normalized(); + } + + Quat bezier(const Quat start, const Quat control_1, const Quat control_2, const Quat end, const float t) { + ERR_FAIL_COND_V_MSG(!start.is_normalized(), Quat(), "The start quaternion must be normalized."); + ERR_FAIL_COND_V_MSG(!end.is_normalized(), Quat(), "The end quaternion must be normalized."); + + return start.slerp(end, t).normalized(); + } +}; + +template <class T> +T GLTFDocument::_interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, const float p_time, const GLTFAnimation::Interpolation p_interp) { + //could use binary search, worth it? + int idx = -1; + for (int i = 0; i < p_times.size(); i++) { + if (p_times[i] > p_time) + break; + idx++; + } + + EditorSceneImporterGLTFInterpolate<T> interp; + + switch (p_interp) { + case GLTFAnimation::INTERP_LINEAR: { + if (idx == -1) { + return p_values[0]; + } else if (idx >= p_times.size() - 1) { + return p_values[p_times.size() - 1]; + } + + const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + return interp.lerp(p_values[idx], p_values[idx + 1], c); + } break; + case GLTFAnimation::INTERP_STEP: { + if (idx == -1) { + return p_values[0]; + } else if (idx >= p_times.size() - 1) { + return p_values[p_times.size() - 1]; + } + + return p_values[idx]; + } break; + case GLTFAnimation::INTERP_CATMULLROMSPLINE: { + if (idx == -1) { + return p_values[1]; + } else if (idx >= p_times.size() - 1) { + return p_values[1 + p_times.size() - 1]; + } + + const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + return interp.catmull_rom(p_values[idx - 1], p_values[idx], p_values[idx + 1], p_values[idx + 3], c); + } break; + case GLTFAnimation::INTERP_CUBIC_SPLINE: { + if (idx == -1) { + return p_values[1]; + } else if (idx >= p_times.size() - 1) { + return p_values[(p_times.size() - 1) * 3 + 1]; + } + + const float c = (p_time - p_times[idx]) / (p_times[idx + 1] - p_times[idx]); + + const T from = p_values[idx * 3 + 1]; + const T c1 = from + p_values[idx * 3 + 2]; + const T to = p_values[idx * 3 + 4]; + const T c2 = to + p_values[idx * 3 + 3]; + + return interp.bezier(from, c1, c2, to, c); + } break; + } + + ERR_FAIL_V(p_values[0]); +} + +void GLTFDocument::_import_animation(Ref<GLTFState> state, AnimationPlayer *ap, const GLTFAnimationIndex index, const int bake_fps) { + Ref<GLTFAnimation> anim = state->animations[index]; + + String name = anim->get_name(); + if (name.empty()) { + // No node represent these, and they are not in the hierarchy, so just make a unique name + name = _gen_unique_name(state, "Animation"); + } + + Ref<Animation> animation; + animation.instance(); + animation->set_name(name); + + if (anim->get_loop()) { + animation->set_loop(true); + } + + float length = 0; + + for (Map<int, GLTFAnimation::Track>::Element *track_i = anim->get_tracks().front(); track_i; track_i = track_i->next()) { + const GLTFAnimation::Track &track = track_i->get(); + //need to find the path + NodePath node_path; + + GLTFNodeIndex node_index = track_i->key(); + if (state->nodes[node_index]->fake_joint_parent >= 0) { + // Should be same as parent + node_index = state->nodes[node_index]->fake_joint_parent; + } + + const Ref<GLTFNode> gltf_node = state->nodes[track_i->key()]; + + if (gltf_node->skeleton >= 0) { + const Skeleton3D *sk = Object::cast_to<Skeleton3D>(state->scene_nodes.find(node_index)->get()); + ERR_FAIL_COND(sk == nullptr); + + const String path = ap->get_parent()->get_path_to(sk); + const String bone = gltf_node->get_name(); + node_path = path + ":" + bone; + } else { + Node *root = ap->get_parent(); + Node *godot_node = state->scene_nodes.find(node_index)->get(); + node_path = root->get_path_to(godot_node); + } + + for (int i = 0; i < track.rotation_track.times.size(); i++) { + length = MAX(length, track.rotation_track.times[i]); + } + for (int i = 0; i < track.translation_track.times.size(); i++) { + length = MAX(length, track.translation_track.times[i]); + } + for (int i = 0; i < track.scale_track.times.size(); i++) { + length = MAX(length, track.scale_track.times[i]); + } + + for (int i = 0; i < track.weight_tracks.size(); i++) { + for (int j = 0; j < track.weight_tracks[i].times.size(); j++) { + length = MAX(length, track.weight_tracks[i].times[j]); + } + } + + if (track.rotation_track.values.size() || track.translation_track.values.size() || track.scale_track.values.size()) { + //make transform track + int track_idx = animation->get_track_count(); + animation->add_track(Animation::TYPE_TRANSFORM); + animation->track_set_path(track_idx, node_path); + //first determine animation length + + const float increment = 1.0 / float(bake_fps); + float time = 0.0; + + Vector3 base_pos; + Quat base_rot; + Vector3 base_scale = Vector3(1, 1, 1); + + if (!track.rotation_track.values.size()) { + base_rot = state->nodes[track_i->key()]->rotation.normalized(); + } + + if (!track.translation_track.values.size()) { + base_pos = state->nodes[track_i->key()]->translation; + } + + if (!track.scale_track.values.size()) { + base_scale = state->nodes[track_i->key()]->scale; + } + + bool last = false; + while (true) { + Vector3 pos = base_pos; + Quat rot = base_rot; + Vector3 scale = base_scale; + + if (track.translation_track.times.size()) { + pos = _interpolate_track<Vector3>(track.translation_track.times, track.translation_track.values, time, track.translation_track.interpolation); + } + + if (track.rotation_track.times.size()) { + rot = _interpolate_track<Quat>(track.rotation_track.times, track.rotation_track.values, time, track.rotation_track.interpolation); + } + + if (track.scale_track.times.size()) { + scale = _interpolate_track<Vector3>(track.scale_track.times, track.scale_track.values, time, track.scale_track.interpolation); + } + + if (gltf_node->skeleton >= 0) { + Transform xform; + xform.basis.set_quat_scale(rot, scale); + xform.origin = pos; + + const Skeleton3D *skeleton = state->skeletons[gltf_node->skeleton]->godot_skeleton; + const int bone_idx = skeleton->find_bone(gltf_node->get_name()); + xform = skeleton->get_bone_rest(bone_idx).affine_inverse() * xform; + + rot = xform.basis.get_rotation_quat(); + rot.normalize(); + scale = xform.basis.get_scale(); + pos = xform.origin; + } + + animation->transform_track_insert_key(track_idx, time, pos, rot, scale); + + if (last) { + break; + } + time += increment; + if (time >= length) { + last = true; + time = length; + } + } + } + + for (int i = 0; i < track.weight_tracks.size(); i++) { + ERR_CONTINUE(gltf_node->mesh < 0 || gltf_node->mesh >= state->meshes.size()); + Ref<GLTFMesh> mesh = state->meshes[gltf_node->mesh]; + ERR_CONTINUE(mesh.is_null()); + ERR_CONTINUE(mesh->get_mesh().is_null()); + ERR_CONTINUE(mesh->get_mesh()->get_mesh().is_null()); + const String prop = "blend_shapes/" + mesh->get_mesh()->get_blend_shape_name(i); + + const String blend_path = String(node_path) + ":" + prop; + + const int track_idx = animation->get_track_count(); + animation->add_track(Animation::TYPE_VALUE); + animation->track_set_path(track_idx, blend_path); + + // Only LINEAR and STEP (NEAREST) can be supported out of the box by Godot's Animation, + // the other modes have to be baked. + GLTFAnimation::Interpolation gltf_interp = track.weight_tracks[i].interpolation; + if (gltf_interp == GLTFAnimation::INTERP_LINEAR || gltf_interp == GLTFAnimation::INTERP_STEP) { + animation->track_set_interpolation_type(track_idx, gltf_interp == GLTFAnimation::INTERP_STEP ? Animation::INTERPOLATION_NEAREST : Animation::INTERPOLATION_LINEAR); + for (int j = 0; j < track.weight_tracks[i].times.size(); j++) { + const float t = track.weight_tracks[i].times[j]; + const float attribs = track.weight_tracks[i].values[j]; + animation->track_insert_key(track_idx, t, attribs); + } + } else { + // CATMULLROMSPLINE or CUBIC_SPLINE have to be baked, apologies. + const float increment = 1.0 / float(bake_fps); + float time = 0.0; + bool last = false; + while (true) { + _interpolate_track<float>(track.weight_tracks[i].times, track.weight_tracks[i].values, time, gltf_interp); + if (last) { + break; + } + time += increment; + if (time >= length) { + last = true; + time = length; + } + } + } + } + } + + animation->set_length(length); + + ap->add_animation(name, animation); +} + +void GLTFDocument::_convert_mesh_instances(Ref<GLTFState> state) { + for (GLTFNodeIndex mi_node_i = 0; mi_node_i < state->nodes.size(); ++mi_node_i) { + Ref<GLTFNode> node = state->nodes[mi_node_i]; + + if (node->mesh < 0) { + continue; + } + Array json_skins; + if (state->json.has("skins")) { + json_skins = state->json["skins"]; + } + Map<GLTFNodeIndex, Node *>::Element *mi_element = state->scene_nodes.find(mi_node_i); + if (!mi_element) { + continue; + } + MeshInstance3D *mi = Object::cast_to<MeshInstance3D>(mi_element->get()); + ERR_CONTINUE(!mi); + Transform mi_xform = mi->get_transform(); + node->scale = mi_xform.basis.get_scale(); + node->rotation = mi_xform.basis.get_rotation_quat(); + node->translation = mi_xform.origin; + + Dictionary json_skin; + Skeleton3D *skeleton = Object::cast_to<Skeleton3D>(mi->get_node(mi->get_skeleton_path())); + if (!skeleton) { + continue; + } + if (!skeleton->get_bone_count()) { + continue; + } + Ref<Skin> skin = mi->get_skin(); + if (skin.is_null()) { + skin = skeleton->register_skin(nullptr)->get_skin(); + } + Ref<GLTFSkin> gltf_skin; + gltf_skin.instance(); + Array json_joints; + GLTFSkeletonIndex skeleton_gltf_i = -1; + + NodePath skeleton_path = mi->get_skeleton_path(); + bool is_unique = true; + for (int32_t skin_i = 0; skin_i < state->skins.size(); skin_i++) { + Ref<GLTFSkin> prev_gltf_skin = state->skins.write[skin_i]; + if (gltf_skin.is_null()) { + continue; + } + GLTFSkeletonIndex prev_skeleton = prev_gltf_skin->get_skeleton(); + if (prev_skeleton == -1 || prev_skeleton >= state->skeletons.size()) { + continue; + } + if (prev_gltf_skin->get_godot_skin() == skin && state->skeletons[prev_skeleton]->godot_skeleton == skeleton) { + node->skin = skin_i; + node->skeleton = prev_skeleton; + is_unique = false; + break; + } + } + if (!is_unique) { + continue; + } + GLTFSkeletonIndex skeleton_i = _convert_skeleton(state, skeleton); + skeleton_gltf_i = skeleton_i; + ERR_CONTINUE(skeleton_gltf_i == -1); + gltf_skin->skeleton = skeleton_gltf_i; + Ref<GLTFSkeleton> gltf_skeleton = state->skeletons.write[skeleton_gltf_i]; + for (int32_t bind_i = 0; bind_i < skin->get_bind_count(); bind_i++) { + String godot_bone_name = skin->get_bind_name(bind_i); + if (godot_bone_name.empty()) { + int32_t bone = skin->get_bind_bone(bind_i); + godot_bone_name = skeleton->get_bone_name(bone); + } + if (skeleton->find_bone(godot_bone_name) == -1) { + godot_bone_name = skeleton->get_bone_name(0); + } + BoneId bone_index = skeleton->find_bone(godot_bone_name); + ERR_CONTINUE(bone_index == -1); + Ref<GLTFNode> joint_node; + joint_node.instance(); + String gltf_bone_name = _gen_unique_bone_name(state, skeleton_gltf_i, godot_bone_name); + joint_node->set_name(gltf_bone_name); + + Transform bone_rest_xform = skeleton->get_bone_rest(bone_index); + joint_node->scale = bone_rest_xform.basis.get_scale(); + joint_node->rotation = bone_rest_xform.basis.get_rotation_quat(); + joint_node->translation = bone_rest_xform.origin; + joint_node->joint = true; + + int32_t joint_node_i = state->nodes.size(); + state->nodes.push_back(joint_node); + gltf_skeleton->godot_bone_node.insert(bone_index, joint_node_i); + int32_t joint_index = gltf_skin->joints.size(); + gltf_skin->joint_i_to_bone_i.insert(joint_index, bone_index); + gltf_skin->joints.push_back(joint_node_i); + gltf_skin->joints_original.push_back(joint_node_i); + gltf_skin->inverse_binds.push_back(skin->get_bind_pose(bind_i)); + json_joints.push_back(joint_node_i); + for (Map<GLTFNodeIndex, Node *>::Element *skin_scene_node_i = state->scene_nodes.front(); skin_scene_node_i; skin_scene_node_i = skin_scene_node_i->next()) { + if (skin_scene_node_i->get() == skeleton) { + gltf_skin->skin_root = skin_scene_node_i->key(); + json_skin["skeleton"] = skin_scene_node_i->key(); + } + } + gltf_skin->godot_skin = skin; + gltf_skin->set_name(_gen_unique_name(state, skin->get_name())); + } + for (int32_t bind_i = 0; bind_i < skin->get_bind_count(); bind_i++) { + String bone_name = skeleton->get_bone_name(bind_i); + String godot_bone_name = skin->get_bind_name(bind_i); + int32_t bone = -1; + if (skin->get_bind_bone(bind_i) != -1) { + bone = skin->get_bind_bone(bind_i); + godot_bone_name = skeleton->get_bone_name(bone); + } + bone = skeleton->find_bone(godot_bone_name); + if (bone == -1) { + continue; + } + BoneId bone_parent = skeleton->get_bone_parent(bone); + GLTFNodeIndex joint_node_i = gltf_skeleton->godot_bone_node[bone]; + ERR_CONTINUE(joint_node_i >= state->nodes.size()); + if (bone_parent != -1) { + GLTFNodeIndex parent_joint_gltf_node = gltf_skin->joints[bone_parent]; + Ref<GLTFNode> parent_joint_node = state->nodes.write[parent_joint_gltf_node]; + parent_joint_node->children.push_back(joint_node_i); + } else { + Node *node_parent = skeleton->get_parent(); + ERR_CONTINUE(!node_parent); + for (Map<GLTFNodeIndex, Node *>::Element *E = state->scene_nodes.front(); E; E = E->next()) { + if (E->get() == node_parent) { + GLTFNodeIndex gltf_node_i = E->key(); + Ref<GLTFNode> gltf_node = state->nodes.write[gltf_node_i]; + gltf_node->children.push_back(joint_node_i); + break; + } + } + } + } + _expand_skin(state, gltf_skin); + node->skin = state->skins.size(); + state->skins.push_back(gltf_skin); + + json_skin["inverseBindMatrices"] = _encode_accessor_as_xform(state, gltf_skin->inverse_binds, false); + json_skin["joints"] = json_joints; + json_skin["name"] = gltf_skin->get_name(); + json_skins.push_back(json_skin); + state->json["skins"] = json_skins; + } +} + +float GLTFDocument::solve_metallic(float p_dielectric_specular, float diffuse, float specular, float p_one_minus_specular_strength) { + if (specular <= p_dielectric_specular) { + return 0.0f; + } + + const float a = p_dielectric_specular; + const float b = diffuse * p_one_minus_specular_strength / (1.0f - p_dielectric_specular) + specular - 2.0f * p_dielectric_specular; + const float c = p_dielectric_specular - specular; + const float D = b * b - 4.0f * a * c; + return CLAMP((-b + Math::sqrt(D)) / (2.0f * a), 0.0f, 1.0f); +} + +float GLTFDocument::get_perceived_brightness(const Color p_color) { + const Color coeff = Color(R_BRIGHTNESS_COEFF, G_BRIGHTNESS_COEFF, B_BRIGHTNESS_COEFF); + const Color value = coeff * (p_color * p_color); + + const float r = value.r; + const float g = value.g; + const float b = value.b; + + return Math::sqrt(r + g + b); +} + +float GLTFDocument::get_max_component(const Color &p_color) { + const float r = p_color.r; + const float g = p_color.g; + const float b = p_color.b; + + return MAX(MAX(r, g), b); +} + +void GLTFDocument::_process_mesh_instances(Ref<GLTFState> state, Node *scene_root) { + for (GLTFNodeIndex node_i = 0; node_i < state->nodes.size(); ++node_i) { + Ref<GLTFNode> node = state->nodes[node_i]; + + if (node->skin >= 0 && node->mesh >= 0) { + const GLTFSkinIndex skin_i = node->skin; + + Map<GLTFNodeIndex, Node *>::Element *mi_element = state->scene_nodes.find(node_i); + EditorSceneImporterMeshNode3D *mi = Object::cast_to<EditorSceneImporterMeshNode3D>(mi_element->get()); + ERR_FAIL_COND(mi == nullptr); + + const GLTFSkeletonIndex skel_i = state->skins.write[node->skin]->skeleton; + Ref<GLTFSkeleton> gltf_skeleton = state->skeletons.write[skel_i]; + Skeleton3D *skeleton = gltf_skeleton->godot_skeleton; + ERR_FAIL_COND(skeleton == nullptr); + + mi->get_parent()->remove_child(mi); + skeleton->add_child(mi); + mi->set_owner(skeleton->get_owner()); + + mi->set_skin(state->skins.write[skin_i]->godot_skin); + mi->set_skeleton_path(mi->get_path_to(skeleton)); + mi->set_transform(Transform()); + } + } +} + +GLTFAnimation::Track GLTFDocument::_convert_animation_track(Ref<GLTFState> state, GLTFAnimation::Track p_track, Ref<Animation> p_animation, Transform p_bone_rest, int32_t p_track_i, GLTFNodeIndex p_node_i) { + Animation::InterpolationType interpolation = p_animation->track_get_interpolation_type(p_track_i); + + GLTFAnimation::Interpolation gltf_interpolation = GLTFAnimation::INTERP_LINEAR; + if (interpolation == Animation::InterpolationType::INTERPOLATION_LINEAR) { + gltf_interpolation = GLTFAnimation::INTERP_LINEAR; + } else if (interpolation == Animation::InterpolationType::INTERPOLATION_NEAREST) { + gltf_interpolation = GLTFAnimation::INTERP_STEP; + } else if (interpolation == Animation::InterpolationType::INTERPOLATION_CUBIC) { + gltf_interpolation = GLTFAnimation::INTERP_CUBIC_SPLINE; + } + Animation::TrackType track_type = p_animation->track_get_type(p_track_i); + int32_t key_count = p_animation->track_get_key_count(p_track_i); + Vector<float> times; + times.resize(key_count); + String path = p_animation->track_get_path(p_track_i); + for (int32_t key_i = 0; key_i < key_count; key_i++) { + times.write[key_i] = p_animation->track_get_key_time(p_track_i, key_i); + } + const float BAKE_FPS = 30.0f; + if (track_type == Animation::TYPE_TRANSFORM) { + p_track.translation_track.times = times; + p_track.translation_track.interpolation = gltf_interpolation; + p_track.rotation_track.times = times; + p_track.rotation_track.interpolation = gltf_interpolation; + p_track.scale_track.times = times; + p_track.scale_track.interpolation = gltf_interpolation; + + p_track.scale_track.values.resize(key_count); + p_track.scale_track.interpolation = gltf_interpolation; + p_track.translation_track.values.resize(key_count); + p_track.translation_track.interpolation = gltf_interpolation; + p_track.rotation_track.values.resize(key_count); + p_track.rotation_track.interpolation = gltf_interpolation; + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Vector3 translation; + Quat rotation; + Vector3 scale; + Error err = p_animation->transform_track_get_key(p_track_i, key_i, &translation, &rotation, &scale); + ERR_CONTINUE(err != OK); + Transform xform; + xform.basis.set_quat_scale(rotation, scale); + xform.origin = translation; + xform = p_bone_rest * xform; + p_track.translation_track.values.write[key_i] = xform.get_origin(); + p_track.rotation_track.values.write[key_i] = xform.basis.get_rotation_quat(); + p_track.scale_track.values.write[key_i] = xform.basis.get_scale(); + } + } else if (path.find(":transform") != -1) { + p_track.translation_track.times = times; + p_track.translation_track.interpolation = gltf_interpolation; + p_track.rotation_track.times = times; + p_track.rotation_track.interpolation = gltf_interpolation; + p_track.scale_track.times = times; + p_track.scale_track.interpolation = gltf_interpolation; + + p_track.scale_track.values.resize(key_count); + p_track.scale_track.interpolation = gltf_interpolation; + p_track.translation_track.values.resize(key_count); + p_track.translation_track.interpolation = gltf_interpolation; + p_track.rotation_track.values.resize(key_count); + p_track.rotation_track.interpolation = gltf_interpolation; + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Transform xform = p_animation->track_get_key_value(p_track_i, key_i); + p_track.translation_track.values.write[key_i] = xform.get_origin(); + p_track.rotation_track.values.write[key_i] = xform.basis.get_rotation_quat(); + p_track.scale_track.values.write[key_i] = xform.basis.get_scale(); + } + } else if (track_type == Animation::TYPE_VALUE) { + if (path.find("/rotation_quat") != -1) { + p_track.rotation_track.times = times; + p_track.rotation_track.interpolation = gltf_interpolation; + + p_track.rotation_track.values.resize(key_count); + p_track.rotation_track.interpolation = gltf_interpolation; + + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Quat rotation_track = p_animation->track_get_key_value(p_track_i, key_i); + p_track.rotation_track.values.write[key_i] = rotation_track; + } + } else if (path.find(":translation") != -1) { + p_track.translation_track.times = times; + p_track.translation_track.interpolation = gltf_interpolation; + + p_track.translation_track.values.resize(key_count); + p_track.translation_track.interpolation = gltf_interpolation; + + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Vector3 translation = p_animation->track_get_key_value(p_track_i, key_i); + p_track.translation_track.values.write[key_i] = translation; + } + } else if (path.find(":rotation_degrees") != -1) { + p_track.rotation_track.times = times; + p_track.rotation_track.interpolation = gltf_interpolation; + + p_track.rotation_track.values.resize(key_count); + p_track.rotation_track.interpolation = gltf_interpolation; + + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Quat rotation; + Vector3 rotation_degrees = p_animation->track_get_key_value(p_track_i, key_i); + Vector3 rotation_radian; + rotation_radian.x = Math::deg2rad(rotation_degrees.x); + rotation_radian.y = Math::deg2rad(rotation_degrees.y); + rotation_radian.z = Math::deg2rad(rotation_degrees.z); + rotation.set_euler(rotation_radian); + p_track.rotation_track.values.write[key_i] = rotation; + } + } else if (path.find(":scale") != -1) { + p_track.scale_track.times = times; + p_track.scale_track.interpolation = gltf_interpolation; + + p_track.scale_track.values.resize(key_count); + p_track.scale_track.interpolation = gltf_interpolation; + + for (int32_t key_i = 0; key_i < key_count; key_i++) { + Vector3 scale_track = p_animation->track_get_key_value(p_track_i, key_i); + p_track.scale_track.values.write[key_i] = scale_track; + } + } + } else if (track_type == Animation::TYPE_BEZIER) { + if (path.find("/scale") != -1) { + const int32_t keys = p_animation->track_get_key_time(p_track_i, key_count - 1) * BAKE_FPS; + if (!p_track.scale_track.times.size()) { + Vector<float> new_times; + new_times.resize(keys); + for (int32_t key_i = 0; key_i < keys; key_i++) { + new_times.write[key_i] = key_i / BAKE_FPS; + } + p_track.scale_track.times = new_times; + p_track.scale_track.interpolation = gltf_interpolation; + + p_track.scale_track.values.resize(keys); + + for (int32_t key_i = 0; key_i < keys; key_i++) { + p_track.scale_track.values.write[key_i] = Vector3(1.0f, 1.0f, 1.0f); + } + p_track.scale_track.interpolation = gltf_interpolation; + } + + for (int32_t key_i = 0; key_i < keys; key_i++) { + Vector3 bezier_track = p_track.scale_track.values[key_i]; + if (path.find("/scale:x") != -1) { + bezier_track.x = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.x = p_bone_rest.affine_inverse().basis.get_scale().x * bezier_track.x; + } else if (path.find("/scale:y") != -1) { + bezier_track.y = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.y = p_bone_rest.affine_inverse().basis.get_scale().y * bezier_track.y; + } else if (path.find("/scale:z") != -1) { + bezier_track.z = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.z = p_bone_rest.affine_inverse().basis.get_scale().z * bezier_track.z; + } + p_track.scale_track.values.write[key_i] = bezier_track; + } + } else if (path.find("/translation") != -1) { + const int32_t keys = p_animation->track_get_key_time(p_track_i, key_count - 1) * BAKE_FPS; + if (!p_track.translation_track.times.size()) { + Vector<float> new_times; + new_times.resize(keys); + for (int32_t key_i = 0; key_i < keys; key_i++) { + new_times.write[key_i] = key_i / BAKE_FPS; + } + p_track.translation_track.times = new_times; + p_track.translation_track.interpolation = gltf_interpolation; + + p_track.translation_track.values.resize(keys); + p_track.translation_track.interpolation = gltf_interpolation; + } + + for (int32_t key_i = 0; key_i < keys; key_i++) { + Vector3 bezier_track = p_track.translation_track.values[key_i]; + if (path.find("/translation:x") != -1) { + bezier_track.x = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.x = p_bone_rest.affine_inverse().origin.x * bezier_track.x; + } else if (path.find("/translation:y") != -1) { + bezier_track.y = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.y = p_bone_rest.affine_inverse().origin.y * bezier_track.y; + } else if (path.find("/translation:z") != -1) { + bezier_track.z = p_animation->bezier_track_interpolate(p_track_i, key_i / BAKE_FPS); + bezier_track.z = p_bone_rest.affine_inverse().origin.z * bezier_track.z; + } + p_track.translation_track.values.write[key_i] = bezier_track; + } + } + } + + return p_track; +} + +void GLTFDocument::_convert_animation(Ref<GLTFState> state, AnimationPlayer *ap, String p_animation_track_name) { + Ref<Animation> animation = ap->get_animation(p_animation_track_name); + Ref<GLTFAnimation> gltf_animation; + gltf_animation.instance(); + gltf_animation->set_name(_gen_unique_name(state, p_animation_track_name)); + + for (int32_t track_i = 0; track_i < animation->get_track_count(); track_i++) { + if (!animation->track_is_enabled(track_i)) { + continue; + } + String orig_track_path = animation->track_get_path(track_i); + if (String(orig_track_path).find(":translation") != -1) { + const Vector<String> node_suffix = String(orig_track_path).split(":translation"); + const NodePath path = node_suffix[0]; + const Node *node = ap->get_parent()->get_node_or_null(path); + for (Map<GLTFNodeIndex, Node *>::Element *translation_scene_node_i = state->scene_nodes.front(); translation_scene_node_i; translation_scene_node_i = translation_scene_node_i->next()) { + if (translation_scene_node_i->get() == node) { + GLTFNodeIndex node_index = translation_scene_node_i->key(); + Map<int, GLTFAnimation::Track>::Element *translation_track_i = gltf_animation->get_tracks().find(node_index); + GLTFAnimation::Track track; + if (translation_track_i) { + track = translation_track_i->get(); + } + track = _convert_animation_track(state, track, animation, Transform(), track_i, node_index); + gltf_animation->get_tracks().insert(node_index, track); + } + } + } else if (String(orig_track_path).find(":rotation_degrees") != -1) { + const Vector<String> node_suffix = String(orig_track_path).split(":rotation_degrees"); + const NodePath path = node_suffix[0]; + const Node *node = ap->get_parent()->get_node_or_null(path); + for (Map<GLTFNodeIndex, Node *>::Element *rotation_degree_scene_node_i = state->scene_nodes.front(); rotation_degree_scene_node_i; rotation_degree_scene_node_i = rotation_degree_scene_node_i->next()) { + if (rotation_degree_scene_node_i->get() == node) { + GLTFNodeIndex node_index = rotation_degree_scene_node_i->key(); + Map<int, GLTFAnimation::Track>::Element *rotation_degree_track_i = gltf_animation->get_tracks().find(node_index); + GLTFAnimation::Track track; + if (rotation_degree_track_i) { + track = rotation_degree_track_i->get(); + } + track = _convert_animation_track(state, track, animation, Transform(), track_i, node_index); + gltf_animation->get_tracks().insert(node_index, track); + } + } + } else if (String(orig_track_path).find(":scale") != -1) { + const Vector<String> node_suffix = String(orig_track_path).split(":scale"); + const NodePath path = node_suffix[0]; + const Node *node = ap->get_parent()->get_node_or_null(path); + for (Map<GLTFNodeIndex, Node *>::Element *scale_scene_node_i = state->scene_nodes.front(); scale_scene_node_i; scale_scene_node_i = scale_scene_node_i->next()) { + if (scale_scene_node_i->get() == node) { + GLTFNodeIndex node_index = scale_scene_node_i->key(); + Map<int, GLTFAnimation::Track>::Element *scale_track_i = gltf_animation->get_tracks().find(node_index); + GLTFAnimation::Track track; + if (scale_track_i) { + track = scale_track_i->get(); + } + track = _convert_animation_track(state, track, animation, Transform(), track_i, node_index); + gltf_animation->get_tracks().insert(node_index, track); + } + } + } else if (String(orig_track_path).find(":transform") != -1) { + const Vector<String> node_suffix = String(orig_track_path).split(":transform"); + const NodePath path = node_suffix[0]; + const Node *node = ap->get_parent()->get_node_or_null(path); + for (Map<GLTFNodeIndex, Node *>::Element *transform_track_i = state->scene_nodes.front(); transform_track_i; transform_track_i = transform_track_i->next()) { + if (transform_track_i->get() == node) { + GLTFAnimation::Track track; + track = _convert_animation_track(state, track, animation, Transform(), track_i, transform_track_i->key()); + gltf_animation->get_tracks().insert(transform_track_i->key(), track); + } + } + } else if (String(orig_track_path).find(":blend_shapes/") != -1) { + const Vector<String> node_suffix = String(orig_track_path).split(":blend_shapes/"); + const NodePath path = node_suffix[0]; + const String suffix = node_suffix[1]; + const Node *node = ap->get_parent()->get_node_or_null(path); + for (Map<GLTFNodeIndex, Node *>::Element *transform_track_i = state->scene_nodes.front(); transform_track_i; transform_track_i = transform_track_i->next()) { + if (transform_track_i->get() == node) { + const MeshInstance3D *mi = Object::cast_to<MeshInstance3D>(node); + if (!mi) { + continue; + } + Ref<ArrayMesh> array_mesh = mi->get_mesh(); + if (array_mesh.is_null()) { + continue; + } + if (node_suffix.size() != 2) { + continue; + } + GLTFNodeIndex mesh_index = -1; + for (GLTFNodeIndex node_i = 0; node_i < state->scene_nodes.size(); node_i++) { + if (state->scene_nodes[node_i] == node) { + mesh_index = node_i; + break; + } + } + ERR_CONTINUE(mesh_index == -1); + Ref<Mesh> mesh = mi->get_mesh(); + ERR_CONTINUE(mesh.is_null()); + for (int32_t shape_i = 0; shape_i < mesh->get_blend_shape_count(); shape_i++) { + if (mesh->get_blend_shape_name(shape_i) != suffix) { + continue; + } + GLTFAnimation::Track track; + Map<int, GLTFAnimation::Track>::Element *blend_shape_track_i = gltf_animation->get_tracks().find(mesh_index); + if (blend_shape_track_i) { + track = blend_shape_track_i->get(); + } + Animation::InterpolationType interpolation = animation->track_get_interpolation_type(track_i); + + GLTFAnimation::Interpolation gltf_interpolation = GLTFAnimation::INTERP_LINEAR; + if (interpolation == Animation::InterpolationType::INTERPOLATION_LINEAR) { + gltf_interpolation = GLTFAnimation::INTERP_LINEAR; + } else if (interpolation == Animation::InterpolationType::INTERPOLATION_NEAREST) { + gltf_interpolation = GLTFAnimation::INTERP_STEP; + } else if (interpolation == Animation::InterpolationType::INTERPOLATION_CUBIC) { + gltf_interpolation = GLTFAnimation::INTERP_CUBIC_SPLINE; + } + Animation::TrackType track_type = animation->track_get_type(track_i); + if (track_type == Animation::TYPE_VALUE) { + int32_t key_count = animation->track_get_key_count(track_i); + GLTFAnimation::Channel<float> weight; + weight.interpolation = gltf_interpolation; + weight.times.resize(key_count); + for (int32_t time_i = 0; time_i < key_count; time_i++) { + weight.times.write[time_i] = animation->track_get_key_time(track_i, time_i); + } + weight.values.resize(key_count); + for (int32_t value_i = 0; value_i < key_count; value_i++) { + weight.values.write[value_i] = animation->track_get_key_value(track_i, value_i); + } + track.weight_tracks.push_back(weight); + } + gltf_animation->get_tracks()[mesh_index] = track; + } + } + } + + } else if (String(orig_track_path).find(":") != -1) { + //Process skeleton + const Vector<String> node_suffix = String(orig_track_path).split(":"); + const String node = node_suffix[0]; + const NodePath node_path = node; + const String suffix = node_suffix[1]; + Node *godot_node = ap->get_parent()->get_node_or_null(node_path); + Skeleton3D *skeleton = nullptr; + GLTFSkeletonIndex skeleton_gltf_i = -1; + for (GLTFSkeletonIndex skeleton_i = 0; skeleton_i < state->skeletons.size(); skeleton_i++) { + if (state->skeletons[skeleton_i]->godot_skeleton == cast_to<Skeleton3D>(godot_node)) { + skeleton = state->skeletons[skeleton_i]->godot_skeleton; + skeleton_gltf_i = skeleton_i; + ERR_CONTINUE(!skeleton); + Ref<GLTFSkeleton> skeleton_gltf = state->skeletons[skeleton_gltf_i]; + int32_t bone = skeleton->find_bone(suffix); + ERR_CONTINUE(bone == -1); + Transform xform = skeleton->get_bone_rest(bone); + if (!skeleton_gltf->godot_bone_node.has(bone)) { + continue; + } + GLTFNodeIndex node_i = skeleton_gltf->godot_bone_node[bone]; + Map<int, GLTFAnimation::Track>::Element *property_track_i = gltf_animation->get_tracks().find(node_i); + GLTFAnimation::Track track; + if (property_track_i) { + track = property_track_i->get(); + } + track = _convert_animation_track(state, track, animation, xform, track_i, node_i); + gltf_animation->get_tracks()[node_i] = track; + } + } + } else if (String(orig_track_path).find(":") == -1) { + const Node *node = ap->get_parent()->get_node_or_null(orig_track_path); + for (Map<GLTFNodeIndex, Node *>::Element *scene_node_i = state->scene_nodes.front(); scene_node_i; scene_node_i = scene_node_i->next()) { + if (scene_node_i->get() == node) { + GLTFNodeIndex node_index = scene_node_i->key(); + Map<int, GLTFAnimation::Track>::Element *node_track_i = gltf_animation->get_tracks().find(node_index); + GLTFAnimation::Track track; + if (node_track_i) { + track = node_track_i->get(); + } + track = _convert_animation_track(state, track, animation, Transform(), track_i, node_index); + gltf_animation->get_tracks().insert(node_index, track); + break; + } + } + } + } + if (gltf_animation->get_tracks().size()) { + state->animations.push_back(gltf_animation); + } +} + +Error GLTFDocument::parse(Ref<GLTFState> state, String p_path, bool p_read_binary) { + Error err; + FileAccessRef f = FileAccess::open(p_path, FileAccess::READ, &err); + if (!f) { + return err; + } + uint32_t magic = f->get_32(); + if (magic == 0x46546C67) { + //binary file + //text file + err = _parse_glb(p_path, state); + if (err) + return FAILED; + } else { + //text file + err = _parse_json(p_path, state); + if (err) + return FAILED; + } + f->close(); + + ERR_FAIL_COND_V(!state->json.has("asset"), Error::FAILED); + + Dictionary asset = state->json["asset"]; + + ERR_FAIL_COND_V(!asset.has("version"), Error::FAILED); + + String version = asset["version"]; + + state->major_version = version.get_slice(".", 0).to_int(); + state->minor_version = version.get_slice(".", 1).to_int(); + + /* STEP 0 PARSE SCENE */ + err = _parse_scenes(state); + if (err != OK) + return Error::FAILED; + + /* STEP 1 PARSE NODES */ + err = _parse_nodes(state); + if (err != OK) + return Error::FAILED; + + /* STEP 2 PARSE BUFFERS */ + err = _parse_buffers(state, p_path.get_base_dir()); + if (err != OK) + return Error::FAILED; + + /* STEP 3 PARSE BUFFER VIEWS */ + err = _parse_buffer_views(state); + if (err != OK) + return Error::FAILED; + + /* STEP 4 PARSE ACCESSORS */ + err = _parse_accessors(state); + if (err != OK) + return Error::FAILED; + + /* STEP 5 PARSE IMAGES */ + err = _parse_images(state, p_path.get_base_dir()); + if (err != OK) + return Error::FAILED; + + /* STEP 6 PARSE TEXTURES */ + err = _parse_textures(state); + if (err != OK) + return Error::FAILED; + + /* STEP 7 PARSE TEXTURES */ + err = _parse_materials(state); + if (err != OK) + return Error::FAILED; + + /* STEP 9 PARSE SKINS */ + err = _parse_skins(state); + if (err != OK) + return Error::FAILED; + + /* STEP 10 DETERMINE SKELETONS */ + err = _determine_skeletons(state); + if (err != OK) + return Error::FAILED; + + /* STEP 11 CREATE SKELETONS */ + err = _create_skeletons(state); + if (err != OK) + return Error::FAILED; + + /* STEP 12 CREATE SKINS */ + err = _create_skins(state); + if (err != OK) + return Error::FAILED; + + /* STEP 13 PARSE MESHES (we have enough info now) */ + err = _parse_meshes(state); + if (err != OK) + return Error::FAILED; + + /* STEP 14 PARSE LIGHTS */ + err = _parse_lights(state); + if (err != OK) { + return Error::FAILED; + } + + /* STEP 15 PARSE CAMERAS */ + err = _parse_cameras(state); + if (err != OK) + return Error::FAILED; + + /* STEP 16 PARSE ANIMATIONS */ + err = _parse_animations(state); + if (err != OK) + return Error::FAILED; + + /* STEP 17 ASSIGN SCENE NAMES */ + _assign_scene_names(state); + + return OK; +} + +Dictionary GLTFDocument::_serialize_texture_transform_uv2(Ref<BaseMaterial3D> p_material) { + Dictionary extension; + Ref<BaseMaterial3D> mat = p_material; + if (mat.is_valid()) { + Dictionary texture_transform; + Array offset; + offset.resize(2); + offset[0] = mat->get_uv2_offset().x; + offset[1] = mat->get_uv2_offset().y; + texture_transform["offset"] = offset; + Array scale; + scale.resize(2); + scale[0] = mat->get_uv2_scale().x; + scale[1] = mat->get_uv2_scale().y; + texture_transform["scale"] = scale; + // Godot doesn't support texture rotation + extension["KHR_texture_transform"] = texture_transform; + } + return extension; +} + +Dictionary GLTFDocument::_serialize_texture_transform_uv1(Ref<BaseMaterial3D> p_material) { + Dictionary extension; + if (p_material.is_valid()) { + Dictionary texture_transform; + Array offset; + offset.resize(2); + offset[0] = p_material->get_uv1_offset().x; + offset[1] = p_material->get_uv1_offset().y; + texture_transform["offset"] = offset; + Array scale; + scale.resize(2); + scale[0] = p_material->get_uv1_scale().x; + scale[1] = p_material->get_uv1_scale().y; + texture_transform["scale"] = scale; + // Godot doesn't support texture rotation + extension["KHR_texture_transform"] = texture_transform; + } + return extension; +} + +Error GLTFDocument::_serialize_version(Ref<GLTFState> state) { + const String version = "2.0"; + state->major_version = version.get_slice(".", 0).to_int(); + state->minor_version = version.get_slice(".", 1).to_int(); + Dictionary asset; + asset["version"] = version; + + String hash = VERSION_HASH; + asset["generator"] = String(VERSION_FULL_NAME) + String("@") + (hash.length() == 0 ? String("unknown") : hash); + state->json["asset"] = asset; + ERR_FAIL_COND_V(!asset.has("version"), Error::FAILED); + ERR_FAIL_COND_V(!state->json.has("asset"), Error::FAILED); + return OK; +} + +Error GLTFDocument::_serialize_file(Ref<GLTFState> state, const String p_path) { + Error err = FAILED; + if (p_path.to_lower().ends_with("glb")) { + err = _encode_buffer_glb(state, p_path); + ERR_FAIL_COND_V(err != OK, err); + FileAccessRef f = FileAccess::open(p_path, FileAccess::WRITE, &err); + ERR_FAIL_COND_V(!f, FAILED); + + String json = JSON::print(state->json); + + const uint32_t magic = 0x46546C67; // GLTF + const int32_t header_size = 12; + const int32_t chunk_header_size = 8; + + for (int32_t pad_i = 0; pad_i < (chunk_header_size + json.utf8().length()) % 4; pad_i++) { + json += " "; + } + CharString cs = json.utf8(); + const uint32_t text_chunk_length = cs.length(); + + const uint32_t text_chunk_type = 0x4E4F534A; //JSON + int32_t binary_data_length = 0; + if (state->buffers.size()) { + binary_data_length = state->buffers[0].size(); + } + const int32_t binary_chunk_length = binary_data_length; + const int32_t binary_chunk_type = 0x004E4942; //BIN + + f->create(FileAccess::ACCESS_RESOURCES); + f->store_32(magic); + f->store_32(state->major_version); // version + f->store_32(header_size + chunk_header_size + text_chunk_length + chunk_header_size + binary_data_length); // length + f->store_32(text_chunk_length); + f->store_32(text_chunk_type); + f->store_buffer((uint8_t *)&cs[0], cs.length()); + if (binary_chunk_length) { + f->store_32(binary_chunk_length); + f->store_32(binary_chunk_type); + f->store_buffer(state->buffers[0].ptr(), binary_data_length); + } + + f->close(); + } else { + err = _encode_buffer_bins(state, p_path); + ERR_FAIL_COND_V(err != OK, err); + FileAccessRef f = FileAccess::open(p_path, FileAccess::WRITE, &err); + ERR_FAIL_COND_V(!f, FAILED); + + f->create(FileAccess::ACCESS_RESOURCES); + String json = JSON::print(state->json); + f->store_string(json); + f->close(); + } + return err; +} diff --git a/modules/gltf/gltf_document.h b/modules/gltf/gltf_document.h new file mode 100644 index 0000000000..0f4772cd26 --- /dev/null +++ b/modules/gltf/gltf_document.h @@ -0,0 +1,427 @@ +/*************************************************************************/ +/* gltf_document.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_DOCUMENT_H +#define GLTF_DOCUMENT_H + +#include "editor/import/resource_importer_scene.h" +#include "editor/import/scene_importer_mesh_node_3d.h" +#include "gltf_animation.h" +#include "scene/2d/node_2d.h" +#include "scene/3d/bone_attachment_3d.h" +#include "scene/3d/light_3d.h" +#include "scene/3d/mesh_instance_3d.h" +#include "scene/3d/node_3d.h" +#include "scene/3d/skeleton_3d.h" +#include "scene/animation/animation_player.h" +#include "scene/resources/material.h" +#include "scene/resources/texture.h" + +class GLTFState; +class GLTFSkin; +class GLTFNode; +class GLTFSpecGloss; +class GLTFSkeleton; + +using GLTFAccessorIndex = int; +using GLTFAnimationIndex = int; +using GLTFBufferIndex = int; +using GLTFBufferViewIndex = int; +using GLTFCameraIndex = int; +using GLTFImageIndex = int; +using GLTFMaterialIndex = int; +using GLTFMeshIndex = int; +using GLTFLightIndex = int; +using GLTFNodeIndex = int; +using GLTFSkeletonIndex = int; +using GLTFSkinIndex = int; +using GLTFTextureIndex = int; + +class GLTFDocument : public Resource { + GDCLASS(GLTFDocument, Resource); + friend class GLTFState; + friend class GLTFSkin; + friend class GLTFSkeleton; + +public: + enum GLTFType { + TYPE_SCALAR, + TYPE_VEC2, + TYPE_VEC3, + TYPE_VEC4, + TYPE_MAT2, + TYPE_MAT3, + TYPE_MAT4, + }; + + enum { + ARRAY_BUFFER = 34962, + ELEMENT_ARRAY_BUFFER = 34963, + + TYPE_BYTE = 5120, + TYPE_UNSIGNED_BYTE = 5121, + TYPE_SHORT = 5122, + TYPE_UNSIGNED_SHORT = 5123, + TYPE_UNSIGNED_INT = 5125, + TYPE_FLOAT = 5126, + + COMPONENT_TYPE_BYTE = 5120, + COMPONENT_TYPE_UNSIGNED_BYTE = 5121, + COMPONENT_TYPE_SHORT = 5122, + COMPONENT_TYPE_UNSIGNED_SHORT = 5123, + COMPONENT_TYPE_INT = 5125, + COMPONENT_TYPE_FLOAT = 5126, + }; + +private: + template <class T> + static Array to_array(const Vector<T> &p_inp) { + Array ret; + for (int i = 0; i < p_inp.size(); i++) { + ret.push_back(p_inp[i]); + } + return ret; + } + + template <class T> + static Array to_array(const Set<T> &p_inp) { + Array ret; + typename Set<T>::Element *elem = p_inp.front(); + while (elem) { + ret.push_back(elem->get()); + elem = elem->next(); + } + return ret; + } + + template <class T> + static void set_from_array(Vector<T> &r_out, const Array &p_inp) { + r_out.clear(); + for (int i = 0; i < p_inp.size(); i++) { + r_out.push_back(p_inp[i]); + } + } + + template <class T> + static void set_from_array(Set<T> &r_out, const Array &p_inp) { + r_out.clear(); + for (int i = 0; i < p_inp.size(); i++) { + r_out.insert(p_inp[i]); + } + } + template <class K, class V> + static Dictionary to_dict(const Map<K, V> &p_inp) { + Dictionary ret; + for (typename Map<K, V>::Element *E = p_inp.front(); E; E = E->next()) { + ret[E->key()] = E->value(); + } + return ret; + } + + template <class K, class V> + static void set_from_dict(Map<K, V> &r_out, const Dictionary &p_inp) { + r_out.clear(); + Array keys = p_inp.keys(); + for (int i = 0; i < keys.size(); i++) { + r_out[keys[i]] = p_inp[keys[i]]; + } + } + double _filter_number(double p_float); + String _get_component_type_name(const uint32_t p_component); + int _get_component_type_size(const int component_type); + Error _parse_scenes(Ref<GLTFState> state); + Error _parse_nodes(Ref<GLTFState> state); + String _get_type_name(const GLTFType p_component); + String _get_accessor_type_name(const GLTFDocument::GLTFType p_type); + String _sanitize_scene_name(const String &name); + String _gen_unique_name(Ref<GLTFState> state, const String &p_name); + String _sanitize_bone_name(const String &name); + String _gen_unique_bone_name(Ref<GLTFState> state, + const GLTFSkeletonIndex skel_i, + const String &p_name); + GLTFTextureIndex _set_texture(Ref<GLTFState> state, Ref<Texture2D> p_texture); + Ref<Texture2D> _get_texture(Ref<GLTFState> state, + const GLTFTextureIndex p_texture); + Error _parse_json(const String &p_path, Ref<GLTFState> state); + Error _parse_glb(const String &p_path, Ref<GLTFState> state); + void _compute_node_heights(Ref<GLTFState> state); + Error _parse_buffers(Ref<GLTFState> state, const String &p_base_path); + Error _parse_buffer_views(Ref<GLTFState> state); + GLTFType _get_type_from_str(const String &p_string); + Error _parse_accessors(Ref<GLTFState> state); + Error _decode_buffer_view(Ref<GLTFState> state, double *dst, + const GLTFBufferViewIndex p_buffer_view, + const int skip_every, const int skip_bytes, + const int element_size, const int count, + const GLTFType type, const int component_count, + const int component_type, const int component_size, + const bool normalized, const int byte_offset, + const bool for_vertex); + Vector<double> _decode_accessor(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<float> _decode_accessor_as_floats(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<int> _decode_accessor_as_ints(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Vector2> _decode_accessor_as_vec2(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Vector3> _decode_accessor_as_vec3(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Color> _decode_accessor_as_color(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Quat> _decode_accessor_as_quat(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Transform2D> _decode_accessor_as_xform2d(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Basis> _decode_accessor_as_basis(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Vector<Transform> _decode_accessor_as_xform(Ref<GLTFState> state, + const GLTFAccessorIndex p_accessor, + const bool p_for_vertex); + Error _parse_meshes(Ref<GLTFState> state); + Error _serialize_textures(Ref<GLTFState> state); + Error _serialize_images(Ref<GLTFState> state, const String &p_path); + Error _serialize_lights(Ref<GLTFState> state); + Error _parse_images(Ref<GLTFState> state, const String &p_base_path); + Error _parse_textures(Ref<GLTFState> state); + Error _parse_materials(Ref<GLTFState> state); + void _set_texture_transform_uv1(const Dictionary &d, Ref<BaseMaterial3D> material); + void spec_gloss_to_rough_metal(Ref<GLTFSpecGloss> r_spec_gloss, + Ref<BaseMaterial3D> p_material); + static void spec_gloss_to_metal_base_color(const Color &p_specular_factor, + const Color &p_diffuse, + Color &r_base_color, + float &r_metallic); + GLTFNodeIndex _find_highest_node(Ref<GLTFState> state, + const Vector<GLTFNodeIndex> &subset); + bool _capture_nodes_in_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin, + const GLTFNodeIndex node_index); + void _capture_nodes_for_multirooted_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin); + Error _expand_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin); + Error _verify_skin(Ref<GLTFState> state, Ref<GLTFSkin> skin); + Error _parse_skins(Ref<GLTFState> state); + Error _determine_skeletons(Ref<GLTFState> state); + Error _reparent_non_joint_skeleton_subtrees( + Ref<GLTFState> state, Ref<GLTFSkeleton> skeleton, + const Vector<GLTFNodeIndex> &non_joints); + Error _reparent_to_fake_joint(Ref<GLTFState> state, Ref<GLTFSkeleton> skeleton, + const GLTFNodeIndex node_index); + Error _determine_skeleton_roots(Ref<GLTFState> state, + const GLTFSkeletonIndex skel_i); + Error _create_skeletons(Ref<GLTFState> state); + Error _map_skin_joints_indices_to_skeleton_bone_indices(Ref<GLTFState> state); + Error _serialize_skins(Ref<GLTFState> state); + Error _create_skins(Ref<GLTFState> state); + bool _skins_are_same(const Ref<Skin> skin_a, const Ref<Skin> skin_b); + void _remove_duplicate_skins(Ref<GLTFState> state); + Error _serialize_cameras(Ref<GLTFState> state); + Error _parse_cameras(Ref<GLTFState> state); + Error _parse_lights(Ref<GLTFState> state); + Error _parse_animations(Ref<GLTFState> state); + Error _serialize_animations(Ref<GLTFState> state); + BoneAttachment3D *_generate_bone_attachment(Ref<GLTFState> state, + Skeleton3D *skeleton, + const GLTFNodeIndex node_index); + EditorSceneImporterMeshNode3D *_generate_mesh_instance(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index); + Camera3D *_generate_camera(Ref<GLTFState> state, Node *scene_parent, + const GLTFNodeIndex node_index); + Light3D *_generate_light(Ref<GLTFState> state, Node *scene_parent, const GLTFNodeIndex node_index); + Node3D *_generate_spatial(Ref<GLTFState> state, Node *scene_parent, + const GLTFNodeIndex node_index); + void _assign_scene_names(Ref<GLTFState> state); + template <class T> + T _interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, + const float p_time, + const GLTFAnimation::Interpolation p_interp); + GLTFAccessorIndex _encode_accessor_as_quats(Ref<GLTFState> state, + const Vector<Quat> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_weights(Ref<GLTFState> state, + const Vector<Color> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_joints(Ref<GLTFState> state, + const Vector<Color> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_floats(Ref<GLTFState> state, + const Vector<real_t> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_vec2(Ref<GLTFState> state, + const Vector<Vector2> p_attribs, + const bool p_for_vertex); + + void _calc_accessor_vec2_min_max(int i, const int element_count, Vector<double> &type_max, Vector2 attribs, Vector<double> &type_min) { + if (i == 0) { + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = attribs[(i * element_count) + type_i]; + type_min.write[type_i] = attribs[(i * element_count) + type_i]; + } + } + for (int32_t type_i = 0; type_i < element_count; type_i++) { + type_max.write[type_i] = MAX(attribs[(i * element_count) + type_i], type_max[type_i]); + type_min.write[type_i] = MIN(attribs[(i * element_count) + type_i], type_min[type_i]); + type_max.write[type_i] = _filter_number(type_max.write[type_i]); + type_min.write[type_i] = _filter_number(type_min.write[type_i]); + } + } + + GLTFAccessorIndex _encode_accessor_as_vec3(Ref<GLTFState> state, + const Vector<Vector3> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_color(Ref<GLTFState> state, + const Vector<Color> p_attribs, + const bool p_for_vertex); + + void _calc_accessor_min_max(int p_i, const int p_element_count, Vector<double> &p_type_max, Vector<double> p_attribs, Vector<double> &p_type_min); + + GLTFAccessorIndex _encode_accessor_as_ints(Ref<GLTFState> state, + const Vector<int32_t> p_attribs, + const bool p_for_vertex); + GLTFAccessorIndex _encode_accessor_as_xform(Ref<GLTFState> state, + const Vector<Transform> p_attribs, + const bool p_for_vertex); + Error _encode_buffer_view(Ref<GLTFState> state, const double *src, + const int count, const GLTFType type, + const int component_type, const bool normalized, + const int byte_offset, const bool for_vertex, + GLTFBufferViewIndex &r_accessor); + Error _encode_accessors(Ref<GLTFState> state); + Error _encode_buffer_views(Ref<GLTFState> state); + Error _serialize_materials(Ref<GLTFState> state); + Error _serialize_meshes(Ref<GLTFState> state); + Error _serialize_nodes(Ref<GLTFState> state); + Error _serialize_scenes(Ref<GLTFState> state); + String interpolation_to_string(const GLTFAnimation::Interpolation p_interp); + GLTFAnimation::Track _convert_animation_track(Ref<GLTFState> state, + GLTFAnimation::Track p_track, + Ref<Animation> p_animation, Transform p_bone_rest, + int32_t p_track_i, + GLTFNodeIndex p_node_i); + Error _encode_buffer_bins(Ref<GLTFState> state, const String &p_path); + Error _encode_buffer_glb(Ref<GLTFState> state, const String &p_path); + Error _serialize_bone_attachment(Ref<GLTFState> state); + Dictionary _serialize_texture_transform_uv1(Ref<BaseMaterial3D> p_material); + Dictionary _serialize_texture_transform_uv2(Ref<BaseMaterial3D> p_material); + Error _serialize_version(Ref<GLTFState> state); + Error _serialize_file(Ref<GLTFState> state, const String p_path); + Error _serialize_extensions(Ref<GLTFState> state) const; + +public: + // http://www.itu.int/rec/R-REC-BT.601 + // http://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.601-7-201103-I!!PDF-E.pdf + static constexpr float R_BRIGHTNESS_COEFF = 0.299f; + static constexpr float G_BRIGHTNESS_COEFF = 0.587f; + static constexpr float B_BRIGHTNESS_COEFF = 0.114f; + +private: + // https://github.com/microsoft/glTF-SDK/blob/master/GLTFSDK/Source/PBRUtils.cpp#L9 + // https://bghgary.github.io/glTF/convert-between-workflows-bjs/js/babylon.pbrUtilities.js + static float solve_metallic(float p_dielectric_specular, float diffuse, + float specular, + float p_one_minus_specular_strength); + static float get_perceived_brightness(const Color p_color); + static float get_max_component(const Color &p_color); + +public: + void _process_mesh_instances(Ref<GLTFState> state, Node *scene_root); + void _generate_scene_node(Ref<GLTFState> state, Node *scene_parent, + Node3D *scene_root, + const GLTFNodeIndex node_index); + void _import_animation(Ref<GLTFState> state, AnimationPlayer *ap, + const GLTFAnimationIndex index, const int bake_fps); + GLTFMeshIndex _convert_mesh_instance(Ref<GLTFState> state, + MeshInstance3D *p_mesh_instance); + void _convert_mesh_instances(Ref<GLTFState> state); + GLTFCameraIndex _convert_camera(Ref<GLTFState> state, Camera3D *p_camera); + void _convert_light_to_gltf(Light3D *light, Ref<GLTFState> state, Node3D *spatial, Ref<GLTFNode> gltf_node); + GLTFLightIndex _convert_light(Ref<GLTFState> state, Light3D *p_light); + GLTFSkeletonIndex _convert_skeleton(Ref<GLTFState> state, Skeleton3D *p_skeleton); + void _convert_spatial(Ref<GLTFState> state, Node3D *p_spatial, Ref<GLTFNode> p_node); + void _convert_scene_node(Ref<GLTFState> state, Node *p_current, Node *p_root, + const GLTFNodeIndex p_gltf_current, + const GLTFNodeIndex p_gltf_root); + + void _convert_csg_shape_to_gltf(Node *p_current, GLTFNodeIndex p_gltf_parent, Ref<GLTFNode> gltf_node, Ref<GLTFState> state); + + void _create_gltf_node(Ref<GLTFState> state, + Node *p_scene_parent, + GLTFNodeIndex current_node_i, + GLTFNodeIndex p_parent_node_index, + GLTFNodeIndex p_root_gltf_node, + Ref<GLTFNode> gltf_node); + void _convert_animation_player_to_gltf( + AnimationPlayer *animation_player, Ref<GLTFState> state, + const GLTFNodeIndex &p_gltf_current, + const GLTFNodeIndex &p_gltf_root_index, + Ref<GLTFNode> p_gltf_node, Node *p_scene_parent, + Node *p_root); + void _check_visibility(Node *p_node, bool &retflag); + void _convert_camera_to_gltf(Camera3D *camera, Ref<GLTFState> state, + Node3D *spatial, + Ref<GLTFNode> gltf_node); + void _convert_grid_map_to_gltf( + Node *p_scene_parent, + const GLTFNodeIndex &p_parent_node_index, + const GLTFNodeIndex &p_root_node_index, + Ref<GLTFNode> gltf_node, Ref<GLTFState> state, + Node *p_root_node); + void _convert_mult_mesh_instance_to_gltf( + Node *p_scene_parent, + const GLTFNodeIndex &p_parent_node_index, + const GLTFNodeIndex &p_root_node_index, + Ref<GLTFNode> gltf_node, Ref<GLTFState> state, + Node *p_root_node); + void _convert_skeleton_to_gltf( + Node *p_scene_parent, Ref<GLTFState> state, + const GLTFNodeIndex &p_parent_node_index, + const GLTFNodeIndex &p_root_node_index, + Ref<GLTFNode> gltf_node, Node *p_root_node); + void _convert_bone_attachment_to_gltf(Node *p_scene_parent, + Ref<GLTFState> state, + Ref<GLTFNode> gltf_node, + bool &retflag); + void _convert_mesh_to_gltf(Node *p_scene_parent, + Ref<GLTFState> state, Node3D *spatial, + Ref<GLTFNode> gltf_node); + void _convert_animation(Ref<GLTFState> state, AnimationPlayer *ap, + String p_animation_track_name); + Error serialize(Ref<GLTFState> state, Node *p_root, const String &p_path); + Error parse(Ref<GLTFState> state, String p_paths, bool p_read_binary = false); +}; + +#endif // GLTF_DOCUMENT_H diff --git a/modules/gltf/gltf_light.cpp b/modules/gltf/gltf_light.cpp new file mode 100644 index 0000000000..da0e504474 --- /dev/null +++ b/modules/gltf/gltf_light.cpp @@ -0,0 +1,101 @@ +/*************************************************************************/ +/* gltf_light.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_light.h" + +void GLTFLight::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_color"), &GLTFLight::get_color); + ClassDB::bind_method(D_METHOD("set_color", "color"), &GLTFLight::set_color); + ClassDB::bind_method(D_METHOD("get_intensity"), &GLTFLight::get_intensity); + ClassDB::bind_method(D_METHOD("set_intensity", "intensity"), &GLTFLight::set_intensity); + ClassDB::bind_method(D_METHOD("get_type"), &GLTFLight::get_type); + ClassDB::bind_method(D_METHOD("set_type", "type"), &GLTFLight::set_type); + ClassDB::bind_method(D_METHOD("get_range"), &GLTFLight::get_range); + ClassDB::bind_method(D_METHOD("set_range", "range"), &GLTFLight::set_range); + ClassDB::bind_method(D_METHOD("get_inner_cone_angle"), &GLTFLight::get_inner_cone_angle); + ClassDB::bind_method(D_METHOD("set_inner_cone_angle", "inner_cone_angle"), &GLTFLight::set_inner_cone_angle); + ClassDB::bind_method(D_METHOD("get_outer_cone_angle"), &GLTFLight::get_outer_cone_angle); + ClassDB::bind_method(D_METHOD("set_outer_cone_angle", "outer_cone_angle"), &GLTFLight::set_outer_cone_angle); + + ADD_PROPERTY(PropertyInfo(Variant::COLOR, "color"), "set_color", "get_color"); // Color + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "intensity"), "set_intensity", "get_intensity"); // float + ADD_PROPERTY(PropertyInfo(Variant::STRING, "type"), "set_type", "get_type"); // String + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "range"), "set_range", "get_range"); // float + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "inner_cone_angle"), "set_inner_cone_angle", "get_inner_cone_angle"); // float + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "outer_cone_angle"), "set_outer_cone_angle", "get_outer_cone_angle"); // float +} + +Color GLTFLight::get_color() { + return color; +} + +void GLTFLight::set_color(Color p_color) { + color = p_color; +} + +float GLTFLight::get_intensity() { + return intensity; +} + +void GLTFLight::set_intensity(float p_intensity) { + intensity = p_intensity; +} + +String GLTFLight::get_type() { + return type; +} + +void GLTFLight::set_type(String p_type) { + type = p_type; +} + +float GLTFLight::get_range() { + return range; +} + +void GLTFLight::set_range(float p_range) { + range = p_range; +} + +float GLTFLight::get_inner_cone_angle() { + return inner_cone_angle; +} + +void GLTFLight::set_inner_cone_angle(float p_inner_cone_angle) { + inner_cone_angle = p_inner_cone_angle; +} + +float GLTFLight::get_outer_cone_angle() { + return outer_cone_angle; +} + +void GLTFLight::set_outer_cone_angle(float p_outer_cone_angle) { + outer_cone_angle = p_outer_cone_angle; +} diff --git a/modules/gltf/gltf_light.h b/modules/gltf/gltf_light.h new file mode 100644 index 0000000000..966ef5dd44 --- /dev/null +++ b/modules/gltf/gltf_light.h @@ -0,0 +1,72 @@ +/*************************************************************************/ +/* gltf_light.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_LIGHT_H +#define GLTF_LIGHT_H + +#include "core/config/engine.h" +#include "core/io/resource.h" + +class GLTFLight : public Resource { + GDCLASS(GLTFLight, Resource) + friend class GLTFDocument; + +protected: + static void _bind_methods(); + +private: + Color color; + float intensity = 0.0f; + String type; + float range = 0.0f; + float inner_cone_angle = 0.0f; + float outer_cone_angle = 0.0f; + +public: + Color get_color(); + void set_color(Color p_color); + + float get_intensity(); + void set_intensity(float p_intensity); + + String get_type(); + void set_type(String p_type); + + float get_range(); + void set_range(float p_range); + + float get_inner_cone_angle(); + void set_inner_cone_angle(float p_inner_cone_angle); + + float get_outer_cone_angle(); + void set_outer_cone_angle(float p_outer_cone_angle); +}; + +#endif // GLTF_LIGHT_H diff --git a/modules/gltf/gltf_mesh.cpp b/modules/gltf/gltf_mesh.cpp new file mode 100644 index 0000000000..09995faab3 --- /dev/null +++ b/modules/gltf/gltf_mesh.cpp @@ -0,0 +1,58 @@ +/*************************************************************************/ +/* gltf_mesh.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_mesh.h" +#include "editor/import/scene_importer_mesh.h" + +void GLTFMesh::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_mesh"), &GLTFMesh::get_mesh); + ClassDB::bind_method(D_METHOD("set_mesh", "mesh"), &GLTFMesh::set_mesh); + ClassDB::bind_method(D_METHOD("get_blend_weights"), &GLTFMesh::get_blend_weights); + ClassDB::bind_method(D_METHOD("set_blend_weights", "blend_weights"), &GLTFMesh::set_blend_weights); + + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "mesh"), "set_mesh", "get_mesh"); + ADD_PROPERTY(PropertyInfo(Variant::PACKED_FLOAT32_ARRAY, "blend_weights"), "set_blend_weights", "get_blend_weights"); // Vector<float> +} + +Ref<EditorSceneImporterMesh> GLTFMesh::get_mesh() { + return mesh; +} + +void GLTFMesh::set_mesh(Ref<EditorSceneImporterMesh> p_mesh) { + mesh = p_mesh; +} + +Vector<float> GLTFMesh::get_blend_weights() { + return blend_weights; +} + +void GLTFMesh::set_blend_weights(Vector<float> p_blend_weights) { + blend_weights = p_blend_weights; +} diff --git a/modules/gltf/gltf_mesh.h b/modules/gltf/gltf_mesh.h new file mode 100644 index 0000000000..5fb3069ad2 --- /dev/null +++ b/modules/gltf/gltf_mesh.h @@ -0,0 +1,55 @@ +/*************************************************************************/ +/* gltf_mesh.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_MESH_H +#define GLTF_MESH_H + +#include "core/io/resource.h" +#include "editor/import/resource_importer_scene.h" +#include "editor/import/scene_importer_mesh.h" +#include "scene/resources/mesh.h" + +class GLTFMesh : public Resource { + GDCLASS(GLTFMesh, Resource); + +private: + Ref<EditorSceneImporterMesh> mesh; + Vector<float> blend_weights; + +protected: + static void _bind_methods(); + +public: + Ref<EditorSceneImporterMesh> get_mesh(); + void set_mesh(Ref<EditorSceneImporterMesh> p_mesh); + Vector<float> get_blend_weights(); + void set_blend_weights(Vector<float> p_blend_weights); +}; +#endif // GLTF_MESH_H diff --git a/modules/gltf/gltf_node.cpp b/modules/gltf/gltf_node.cpp new file mode 100644 index 0000000000..2fbd3f85d4 --- /dev/null +++ b/modules/gltf/gltf_node.cpp @@ -0,0 +1,189 @@ +/*************************************************************************/ +/* gltf_node.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_node.h" + +void GLTFNode::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_parent"), &GLTFNode::get_parent); + ClassDB::bind_method(D_METHOD("set_parent", "parent"), &GLTFNode::set_parent); + ClassDB::bind_method(D_METHOD("get_height"), &GLTFNode::get_height); + ClassDB::bind_method(D_METHOD("set_height", "height"), &GLTFNode::set_height); + ClassDB::bind_method(D_METHOD("get_xform"), &GLTFNode::get_xform); + ClassDB::bind_method(D_METHOD("set_xform", "xform"), &GLTFNode::set_xform); + ClassDB::bind_method(D_METHOD("get_mesh"), &GLTFNode::get_mesh); + ClassDB::bind_method(D_METHOD("set_mesh", "mesh"), &GLTFNode::set_mesh); + ClassDB::bind_method(D_METHOD("get_camera"), &GLTFNode::get_camera); + ClassDB::bind_method(D_METHOD("set_camera", "camera"), &GLTFNode::set_camera); + ClassDB::bind_method(D_METHOD("get_skin"), &GLTFNode::get_skin); + ClassDB::bind_method(D_METHOD("set_skin", "skin"), &GLTFNode::set_skin); + ClassDB::bind_method(D_METHOD("get_skeleton"), &GLTFNode::get_skeleton); + ClassDB::bind_method(D_METHOD("set_skeleton", "skeleton"), &GLTFNode::set_skeleton); + ClassDB::bind_method(D_METHOD("get_joint"), &GLTFNode::get_joint); + ClassDB::bind_method(D_METHOD("set_joint", "joint"), &GLTFNode::set_joint); + ClassDB::bind_method(D_METHOD("get_translation"), &GLTFNode::get_translation); + ClassDB::bind_method(D_METHOD("set_translation", "translation"), &GLTFNode::set_translation); + ClassDB::bind_method(D_METHOD("get_rotation"), &GLTFNode::get_rotation); + ClassDB::bind_method(D_METHOD("set_rotation", "rotation"), &GLTFNode::set_rotation); + ClassDB::bind_method(D_METHOD("get_scale"), &GLTFNode::get_scale); + ClassDB::bind_method(D_METHOD("set_scale", "scale"), &GLTFNode::set_scale); + ClassDB::bind_method(D_METHOD("get_children"), &GLTFNode::get_children); + ClassDB::bind_method(D_METHOD("set_children", "children"), &GLTFNode::set_children); + ClassDB::bind_method(D_METHOD("get_fake_joint_parent"), &GLTFNode::get_fake_joint_parent); + ClassDB::bind_method(D_METHOD("set_fake_joint_parent", "fake_joint_parent"), &GLTFNode::set_fake_joint_parent); + ClassDB::bind_method(D_METHOD("get_light"), &GLTFNode::get_light); + ClassDB::bind_method(D_METHOD("set_light", "light"), &GLTFNode::set_light); + + ADD_PROPERTY(PropertyInfo(Variant::INT, "parent"), "set_parent", "get_parent"); // GLTFNodeIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "height"), "set_height", "get_height"); // int + ADD_PROPERTY(PropertyInfo(Variant::TRANSFORM, "xform"), "set_xform", "get_xform"); // Transform + ADD_PROPERTY(PropertyInfo(Variant::INT, "mesh"), "set_mesh", "get_mesh"); // GLTFMeshIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "camera"), "set_camera", "get_camera"); // GLTFCameraIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "skin"), "set_skin", "get_skin"); // GLTFSkinIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "skeleton"), "set_skeleton", "get_skeleton"); // GLTFSkeletonIndex + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "joint"), "set_joint", "get_joint"); // bool + ADD_PROPERTY(PropertyInfo(Variant::VECTOR3, "translation"), "set_translation", "get_translation"); // Vector3 + ADD_PROPERTY(PropertyInfo(Variant::QUAT, "rotation"), "set_rotation", "get_rotation"); // Quat + ADD_PROPERTY(PropertyInfo(Variant::VECTOR3, "scale"), "set_scale", "get_scale"); // Vector3 + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "children"), "set_children", "get_children"); // Vector<int> + ADD_PROPERTY(PropertyInfo(Variant::INT, "fake_joint_parent"), "set_fake_joint_parent", "get_fake_joint_parent"); // GLTFNodeIndex + ADD_PROPERTY(PropertyInfo(Variant::INT, "light"), "set_light", "get_light"); // GLTFLightIndex +} + +GLTFNodeIndex GLTFNode::get_parent() { + return parent; +} + +void GLTFNode::set_parent(GLTFNodeIndex p_parent) { + parent = p_parent; +} + +int GLTFNode::get_height() { + return height; +} + +void GLTFNode::set_height(int p_height) { + height = p_height; +} + +Transform GLTFNode::get_xform() { + return xform; +} + +void GLTFNode::set_xform(Transform p_xform) { + xform = p_xform; +} + +GLTFMeshIndex GLTFNode::get_mesh() { + return mesh; +} + +void GLTFNode::set_mesh(GLTFMeshIndex p_mesh) { + mesh = p_mesh; +} + +GLTFCameraIndex GLTFNode::get_camera() { + return camera; +} + +void GLTFNode::set_camera(GLTFCameraIndex p_camera) { + camera = p_camera; +} + +GLTFSkinIndex GLTFNode::get_skin() { + return skin; +} + +void GLTFNode::set_skin(GLTFSkinIndex p_skin) { + skin = p_skin; +} + +GLTFSkeletonIndex GLTFNode::get_skeleton() { + return skeleton; +} + +void GLTFNode::set_skeleton(GLTFSkeletonIndex p_skeleton) { + skeleton = p_skeleton; +} + +bool GLTFNode::get_joint() { + return joint; +} + +void GLTFNode::set_joint(bool p_joint) { + joint = p_joint; +} + +Vector3 GLTFNode::get_translation() { + return translation; +} + +void GLTFNode::set_translation(Vector3 p_translation) { + translation = p_translation; +} + +Quat GLTFNode::get_rotation() { + return rotation; +} + +void GLTFNode::set_rotation(Quat p_rotation) { + rotation = p_rotation; +} + +Vector3 GLTFNode::get_scale() { + return scale; +} + +void GLTFNode::set_scale(Vector3 p_scale) { + scale = p_scale; +} + +Vector<int> GLTFNode::get_children() { + return children; +} + +void GLTFNode::set_children(Vector<int> p_children) { + children = p_children; +} + +GLTFNodeIndex GLTFNode::get_fake_joint_parent() { + return fake_joint_parent; +} + +void GLTFNode::set_fake_joint_parent(GLTFNodeIndex p_fake_joint_parent) { + fake_joint_parent = p_fake_joint_parent; +} + +GLTFLightIndex GLTFNode::get_light() { + return light; +} + +void GLTFNode::set_light(GLTFLightIndex p_light) { + light = p_light; +} diff --git a/modules/gltf/gltf_node.h b/modules/gltf/gltf_node.h new file mode 100644 index 0000000000..b96e700ec2 --- /dev/null +++ b/modules/gltf/gltf_node.h @@ -0,0 +1,105 @@ +/*************************************************************************/ +/* gltf_node.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_NODE_H +#define GLTF_NODE_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +class GLTFNode : public Resource { + GDCLASS(GLTFNode, Resource); + friend class GLTFDocument; + friend class PackedSceneGLTF; + +private: + // matrices need to be transformed to this + GLTFNodeIndex parent = -1; + int height = -1; + Transform xform; + GLTFMeshIndex mesh = -1; + GLTFCameraIndex camera = -1; + GLTFSkinIndex skin = -1; + GLTFSkeletonIndex skeleton = -1; + bool joint = false; + Vector3 translation; + Quat rotation; + Vector3 scale = Vector3(1, 1, 1); + Vector<int> children; + GLTFNodeIndex fake_joint_parent = -1; + GLTFLightIndex light = -1; + +protected: + static void _bind_methods(); + +public: + GLTFNodeIndex get_parent(); + void set_parent(GLTFNodeIndex p_parent); + + int get_height(); + void set_height(int p_height); + + Transform get_xform(); + void set_xform(Transform p_xform); + + GLTFMeshIndex get_mesh(); + void set_mesh(GLTFMeshIndex p_mesh); + + GLTFCameraIndex get_camera(); + void set_camera(GLTFCameraIndex p_camera); + + GLTFSkinIndex get_skin(); + void set_skin(GLTFSkinIndex p_skin); + + GLTFSkeletonIndex get_skeleton(); + void set_skeleton(GLTFSkeletonIndex p_skeleton); + + bool get_joint(); + void set_joint(bool p_joint); + + Vector3 get_translation(); + void set_translation(Vector3 p_translation); + + Quat get_rotation(); + void set_rotation(Quat p_rotation); + + Vector3 get_scale(); + void set_scale(Vector3 p_scale); + + Vector<int> get_children(); + void set_children(Vector<int> p_children); + + GLTFNodeIndex get_fake_joint_parent(); + void set_fake_joint_parent(GLTFNodeIndex p_fake_joint_parent); + + GLTFLightIndex get_light(); + void set_light(GLTFLightIndex p_light); +}; +#endif // GLTF_NODE_H diff --git a/modules/gltf/gltf_skeleton.cpp b/modules/gltf/gltf_skeleton.cpp new file mode 100644 index 0000000000..35671335d3 --- /dev/null +++ b/modules/gltf/gltf_skeleton.cpp @@ -0,0 +1,95 @@ +/*************************************************************************/ +/* gltf_skeleton.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_skeleton.h" + +void GLTFSkeleton::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_joints"), &GLTFSkeleton::get_joints); + ClassDB::bind_method(D_METHOD("set_joints", "joints"), &GLTFSkeleton::set_joints); + ClassDB::bind_method(D_METHOD("get_roots"), &GLTFSkeleton::get_roots); + ClassDB::bind_method(D_METHOD("set_roots", "roots"), &GLTFSkeleton::set_roots); + ClassDB::bind_method(D_METHOD("get_godot_skeleton"), &GLTFSkeleton::get_godot_skeleton); + ClassDB::bind_method(D_METHOD("get_unique_names"), &GLTFSkeleton::get_unique_names); + ClassDB::bind_method(D_METHOD("set_unique_names", "unique_names"), &GLTFSkeleton::set_unique_names); + ClassDB::bind_method(D_METHOD("get_godot_bone_node"), &GLTFSkeleton::get_godot_bone_node); + ClassDB::bind_method(D_METHOD("set_godot_bone_node", "godot_bone_node"), &GLTFSkeleton::set_godot_bone_node); + ClassDB::bind_method(D_METHOD("get_bone_attachment_count"), &GLTFSkeleton::get_bone_attachment_count); + ClassDB::bind_method(D_METHOD("get_bone_attachment"), &GLTFSkeleton::get_bone_attachment); + + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "joints"), "set_joints", "get_joints"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "roots"), "set_roots", "get_roots"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "unique_names", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_unique_names", "get_unique_names"); // Set<String> + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "godot_bone_node", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_godot_bone_node", "get_godot_bone_node"); // Map<int32_t, +} + +Vector<GLTFNodeIndex> GLTFSkeleton::get_joints() { + return joints; +} + +void GLTFSkeleton::set_joints(Vector<GLTFNodeIndex> p_joints) { + joints = p_joints; +} + +Vector<GLTFNodeIndex> GLTFSkeleton::get_roots() { + return roots; +} + +void GLTFSkeleton::set_roots(Vector<GLTFNodeIndex> p_roots) { + roots = p_roots; +} + +Skeleton3D *GLTFSkeleton::get_godot_skeleton() { + return godot_skeleton; +} + +Array GLTFSkeleton::get_unique_names() { + return GLTFDocument::to_array(unique_names); +} + +void GLTFSkeleton::set_unique_names(Array p_unique_names) { + GLTFDocument::set_from_array(unique_names, p_unique_names); +} + +Dictionary GLTFSkeleton::get_godot_bone_node() { + return GLTFDocument::to_dict(godot_bone_node); +} + +void GLTFSkeleton::set_godot_bone_node(Dictionary p_indict) { + GLTFDocument::set_from_dict(godot_bone_node, p_indict); +} + +BoneAttachment3D *GLTFSkeleton::get_bone_attachment(int idx) { + ERR_FAIL_INDEX_V(idx, bone_attachments.size(), nullptr); + return bone_attachments[idx]; +} + +int32_t GLTFSkeleton::get_bone_attachment_count() { + return bone_attachments.size(); +} diff --git a/modules/gltf/gltf_skeleton.h b/modules/gltf/gltf_skeleton.h new file mode 100644 index 0000000000..6263fa3c5d --- /dev/null +++ b/modules/gltf/gltf_skeleton.h @@ -0,0 +1,101 @@ +/*************************************************************************/ +/* gltf_skeleton.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_SKELETON_H +#define GLTF_SKELETON_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +class GLTFSkeleton : public Resource { + GDCLASS(GLTFSkeleton, Resource); + friend class GLTFDocument; + +private: + // The *synthesized* skeletons joints + Vector<GLTFNodeIndex> joints; + + // The roots of the skeleton. If there are multiple, each root must have the + // same parent (ie roots are siblings) + Vector<GLTFNodeIndex> roots; + + // The created Skeleton3D for the scene + Skeleton3D *godot_skeleton = nullptr; + + // Set of unique bone names for the skeleton + Set<String> unique_names; + + Map<int32_t, GLTFNodeIndex> godot_bone_node; + + Vector<BoneAttachment3D *> bone_attachments; + +protected: + static void _bind_methods(); + +public: + Vector<GLTFNodeIndex> get_joints(); + void set_joints(Vector<GLTFNodeIndex> p_joints); + + Vector<GLTFNodeIndex> get_roots(); + void set_roots(Vector<GLTFNodeIndex> p_roots); + + Skeleton3D *get_godot_skeleton(); + + // Skeleton *get_godot_skeleton() { + // return this->godot_skeleton; + // } + // void set_godot_skeleton(Skeleton p_*godot_skeleton) { + // this->godot_skeleton = p_godot_skeleton; + // } + + Array get_unique_names(); + void set_unique_names(Array p_unique_names); + + //Map<int32_t, GLTFNodeIndex> get_godot_bone_node() { + // return this->godot_bone_node; + //} + //void set_godot_bone_node(Map<int32_t, GLTFNodeIndex> p_godot_bone_node) { + // this->godot_bone_node = p_godot_bone_node; + //} + Dictionary get_godot_bone_node(); + void set_godot_bone_node(Dictionary p_indict); + + //Dictionary get_godot_bone_node() { + // return VariantConversion::to_dict(this->godot_bone_node); + //} + //void set_godot_bone_node(Dictionary p_indict) { + // VariantConversion::set_from_dict(this->godot_bone_node, p_indict); + //} + + BoneAttachment3D *get_bone_attachment(int idx); + + int32_t get_bone_attachment_count(); +}; +#endif // GLTF_SKELETON_H diff --git a/modules/gltf/gltf_skin.cpp b/modules/gltf/gltf_skin.cpp new file mode 100644 index 0000000000..1b94b9d106 --- /dev/null +++ b/modules/gltf/gltf_skin.cpp @@ -0,0 +1,155 @@ +/*************************************************************************/ +/* gltf_skin.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_skin.h" + +void GLTFSkin::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_skin_root"), &GLTFSkin::get_skin_root); + ClassDB::bind_method(D_METHOD("set_skin_root", "skin_root"), &GLTFSkin::set_skin_root); + ClassDB::bind_method(D_METHOD("get_joints_original"), &GLTFSkin::get_joints_original); + ClassDB::bind_method(D_METHOD("set_joints_original", "joints_original"), &GLTFSkin::set_joints_original); + ClassDB::bind_method(D_METHOD("get_inverse_binds"), &GLTFSkin::get_inverse_binds); + ClassDB::bind_method(D_METHOD("set_inverse_binds", "inverse_binds"), &GLTFSkin::set_inverse_binds); + ClassDB::bind_method(D_METHOD("get_joints"), &GLTFSkin::get_joints); + ClassDB::bind_method(D_METHOD("set_joints", "joints"), &GLTFSkin::set_joints); + ClassDB::bind_method(D_METHOD("get_non_joints"), &GLTFSkin::get_non_joints); + ClassDB::bind_method(D_METHOD("set_non_joints", "non_joints"), &GLTFSkin::set_non_joints); + ClassDB::bind_method(D_METHOD("get_roots"), &GLTFSkin::get_roots); + ClassDB::bind_method(D_METHOD("set_roots", "roots"), &GLTFSkin::set_roots); + ClassDB::bind_method(D_METHOD("get_skeleton"), &GLTFSkin::get_skeleton); + ClassDB::bind_method(D_METHOD("set_skeleton", "skeleton"), &GLTFSkin::set_skeleton); + ClassDB::bind_method(D_METHOD("get_joint_i_to_bone_i"), &GLTFSkin::get_joint_i_to_bone_i); + ClassDB::bind_method(D_METHOD("set_joint_i_to_bone_i", "joint_i_to_bone_i"), &GLTFSkin::set_joint_i_to_bone_i); + ClassDB::bind_method(D_METHOD("get_joint_i_to_name"), &GLTFSkin::get_joint_i_to_name); + ClassDB::bind_method(D_METHOD("set_joint_i_to_name", "joint_i_to_name"), &GLTFSkin::set_joint_i_to_name); + ClassDB::bind_method(D_METHOD("get_godot_skin"), &GLTFSkin::get_godot_skin); + ClassDB::bind_method(D_METHOD("set_godot_skin", "godot_skin"), &GLTFSkin::set_godot_skin); + + ADD_PROPERTY(PropertyInfo(Variant::INT, "skin_root"), "set_skin_root", "get_skin_root"); // GLTFNodeIndex + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "joints_original"), "set_joints_original", "get_joints_original"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "inverse_binds", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL), "set_inverse_binds", "get_inverse_binds"); // Vector<Transform> + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "joints"), "set_joints", "get_joints"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "non_joints"), "set_non_joints", "get_non_joints"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "roots"), "set_roots", "get_roots"); // Vector<GLTFNodeIndex> + ADD_PROPERTY(PropertyInfo(Variant::INT, "skeleton"), "set_skeleton", "get_skeleton"); // int + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "joint_i_to_bone_i", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL), "set_joint_i_to_bone_i", "get_joint_i_to_bone_i"); // Map<int, + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "joint_i_to_name", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL), "set_joint_i_to_name", "get_joint_i_to_name"); // Map<int, + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "godot_skin"), "set_godot_skin", "get_godot_skin"); // Ref<Skin> +} + +GLTFNodeIndex GLTFSkin::get_skin_root() { + return skin_root; +} + +void GLTFSkin::set_skin_root(GLTFNodeIndex p_skin_root) { + skin_root = p_skin_root; +} + +Vector<GLTFNodeIndex> GLTFSkin::get_joints_original() { + return joints_original; +} + +void GLTFSkin::set_joints_original(Vector<GLTFNodeIndex> p_joints_original) { + joints_original = p_joints_original; +} + +Array GLTFSkin::get_inverse_binds() { + return GLTFDocument::to_array(inverse_binds); +} + +void GLTFSkin::set_inverse_binds(Array p_inverse_binds) { + GLTFDocument::set_from_array(inverse_binds, p_inverse_binds); +} + +Vector<GLTFNodeIndex> GLTFSkin::get_joints() { + return joints; +} + +void GLTFSkin::set_joints(Vector<GLTFNodeIndex> p_joints) { + joints = p_joints; +} + +Vector<GLTFNodeIndex> GLTFSkin::get_non_joints() { + return non_joints; +} + +void GLTFSkin::set_non_joints(Vector<GLTFNodeIndex> p_non_joints) { + non_joints = p_non_joints; +} + +Vector<GLTFNodeIndex> GLTFSkin::get_roots() { + return roots; +} + +void GLTFSkin::set_roots(Vector<GLTFNodeIndex> p_roots) { + roots = p_roots; +} + +int GLTFSkin::get_skeleton() { + return skeleton; +} + +void GLTFSkin::set_skeleton(int p_skeleton) { + skeleton = p_skeleton; +} + +Dictionary GLTFSkin::get_joint_i_to_bone_i() { + return GLTFDocument::to_dict(joint_i_to_bone_i); +} + +void GLTFSkin::set_joint_i_to_bone_i(Dictionary p_joint_i_to_bone_i) { + GLTFDocument::set_from_dict(joint_i_to_bone_i, p_joint_i_to_bone_i); +} + +Dictionary GLTFSkin::get_joint_i_to_name() { + Dictionary ret; + Map<int, StringName>::Element *elem = joint_i_to_name.front(); + while (elem) { + ret[elem->key()] = String(elem->value()); + elem = elem->next(); + } + return ret; +} + +void GLTFSkin::set_joint_i_to_name(Dictionary p_joint_i_to_name) { + joint_i_to_name = Map<int, StringName>(); + Array keys = p_joint_i_to_name.keys(); + for (int i = 0; i < keys.size(); i++) { + joint_i_to_name[keys[i]] = joint_i_to_name[keys[i]]; + } +} + +Ref<Skin> GLTFSkin::get_godot_skin() { + return godot_skin; +} + +void GLTFSkin::set_godot_skin(Ref<Skin> p_godot_skin) { + godot_skin = p_godot_skin; +} diff --git a/modules/gltf/gltf_skin.h b/modules/gltf/gltf_skin.h new file mode 100644 index 0000000000..09e1a37a55 --- /dev/null +++ b/modules/gltf/gltf_skin.h @@ -0,0 +1,109 @@ +/*************************************************************************/ +/* gltf_skin.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_SKIN_H +#define GLTF_SKIN_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +class GLTFSkin : public Resource { + GDCLASS(GLTFSkin, Resource); + friend class GLTFDocument; + +private: + // The "skeleton" property defined in the gltf spec. -1 = Scene Root + GLTFNodeIndex skin_root = -1; + + Vector<GLTFNodeIndex> joints_original; + Vector<Transform> inverse_binds; + + // Note: joints + non_joints should form a complete subtree, or subtrees + // with a common parent + + // All nodes that are skins that are caught in-between the original joints + // (inclusive of joints_original) + Vector<GLTFNodeIndex> joints; + + // All Nodes that are caught in-between skin joint nodes, and are not + // defined as joints by any skin + Vector<GLTFNodeIndex> non_joints; + + // The roots of the skin. In the case of multiple roots, their parent *must* + // be the same (the roots must be siblings) + Vector<GLTFNodeIndex> roots; + + // The GLTF Skeleton this Skin points to (after we determine skeletons) + GLTFSkeletonIndex skeleton = -1; + + // A mapping from the joint indices (in the order of joints_original) to the + // Godot Skeleton's bone_indices + Map<int, int> joint_i_to_bone_i; + Map<int, StringName> joint_i_to_name; + + // The Actual Skin that will be created as a mapping between the IBM's of + // this skin to the generated skeleton for the mesh instances. + Ref<Skin> godot_skin; + +protected: + static void _bind_methods(); + +public: + GLTFNodeIndex get_skin_root(); + void set_skin_root(GLTFNodeIndex p_skin_root); + + Vector<GLTFNodeIndex> get_joints_original(); + void set_joints_original(Vector<GLTFNodeIndex> p_joints_original); + + Array get_inverse_binds(); + void set_inverse_binds(Array p_inverse_binds); + + Vector<GLTFNodeIndex> get_joints(); + void set_joints(Vector<GLTFNodeIndex> p_joints); + + Vector<GLTFNodeIndex> get_non_joints(); + void set_non_joints(Vector<GLTFNodeIndex> p_non_joints); + + Vector<GLTFNodeIndex> get_roots(); + void set_roots(Vector<GLTFNodeIndex> p_roots); + + int get_skeleton(); + void set_skeleton(int p_skeleton); + + Dictionary get_joint_i_to_bone_i(); + void set_joint_i_to_bone_i(Dictionary p_joint_i_to_bone_i); + + Dictionary get_joint_i_to_name(); + void set_joint_i_to_name(Dictionary p_joint_i_to_name); + + Ref<Skin> get_godot_skin(); + void set_godot_skin(Ref<Skin> p_godot_skin); +}; +#endif // GLTF_SKIN_H diff --git a/modules/gltf/gltf_spec_gloss.cpp b/modules/gltf/gltf_spec_gloss.cpp new file mode 100644 index 0000000000..7f27805d62 --- /dev/null +++ b/modules/gltf/gltf_spec_gloss.cpp @@ -0,0 +1,90 @@ +/*************************************************************************/ +/* gltf_spec_gloss.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_spec_gloss.h" + +void GLTFSpecGloss::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_diffuse_img"), &GLTFSpecGloss::get_diffuse_img); + ClassDB::bind_method(D_METHOD("set_diffuse_img", "diffuse_img"), &GLTFSpecGloss::set_diffuse_img); + ClassDB::bind_method(D_METHOD("get_diffuse_factor"), &GLTFSpecGloss::get_diffuse_factor); + ClassDB::bind_method(D_METHOD("set_diffuse_factor", "diffuse_factor"), &GLTFSpecGloss::set_diffuse_factor); + ClassDB::bind_method(D_METHOD("get_gloss_factor"), &GLTFSpecGloss::get_gloss_factor); + ClassDB::bind_method(D_METHOD("set_gloss_factor", "gloss_factor"), &GLTFSpecGloss::set_gloss_factor); + ClassDB::bind_method(D_METHOD("get_specular_factor"), &GLTFSpecGloss::get_specular_factor); + ClassDB::bind_method(D_METHOD("set_specular_factor", "specular_factor"), &GLTFSpecGloss::set_specular_factor); + ClassDB::bind_method(D_METHOD("get_spec_gloss_img"), &GLTFSpecGloss::get_spec_gloss_img); + ClassDB::bind_method(D_METHOD("set_spec_gloss_img", "spec_gloss_img"), &GLTFSpecGloss::set_spec_gloss_img); + + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "diffuse_img"), "set_diffuse_img", "get_diffuse_img"); // Ref<Image> + ADD_PROPERTY(PropertyInfo(Variant::COLOR, "diffuse_factor"), "set_diffuse_factor", "get_diffuse_factor"); // Color + ADD_PROPERTY(PropertyInfo(Variant::FLOAT, "gloss_factor"), "set_gloss_factor", "get_gloss_factor"); // float + ADD_PROPERTY(PropertyInfo(Variant::COLOR, "specular_factor"), "set_specular_factor", "get_specular_factor"); // Color + ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "spec_gloss_img"), "set_spec_gloss_img", "get_spec_gloss_img"); // Ref<Image> +} + +Ref<Image> GLTFSpecGloss::get_diffuse_img() { + return diffuse_img; +} + +void GLTFSpecGloss::set_diffuse_img(Ref<Image> p_diffuse_img) { + diffuse_img = p_diffuse_img; +} + +Color GLTFSpecGloss::get_diffuse_factor() { + return diffuse_factor; +} + +void GLTFSpecGloss::set_diffuse_factor(Color p_diffuse_factor) { + diffuse_factor = p_diffuse_factor; +} + +float GLTFSpecGloss::get_gloss_factor() { + return gloss_factor; +} + +void GLTFSpecGloss::set_gloss_factor(float p_gloss_factor) { + gloss_factor = p_gloss_factor; +} + +Color GLTFSpecGloss::get_specular_factor() { + return specular_factor; +} + +void GLTFSpecGloss::set_specular_factor(Color p_specular_factor) { + specular_factor = p_specular_factor; +} + +Ref<Image> GLTFSpecGloss::get_spec_gloss_img() { + return spec_gloss_img; +} + +void GLTFSpecGloss::set_spec_gloss_img(Ref<Image> p_spec_gloss_img) { + spec_gloss_img = p_spec_gloss_img; +} diff --git a/modules/gltf/gltf_spec_gloss.h b/modules/gltf/gltf_spec_gloss.h new file mode 100644 index 0000000000..e06c6c14f3 --- /dev/null +++ b/modules/gltf/gltf_spec_gloss.h @@ -0,0 +1,67 @@ +/*************************************************************************/ +/* gltf_spec_gloss.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_SPEC_GLOSS_H +#define GLTF_SPEC_GLOSS_H + +#include "core/io/image.h" +#include "core/io/resource.h" + +class GLTFSpecGloss : public Resource { + GDCLASS(GLTFSpecGloss, Resource); + friend class GLTFDocument; + +private: + Ref<Image> diffuse_img = nullptr; + Color diffuse_factor = Color(1.0f, 1.0f, 1.0f); + float gloss_factor = 1.0f; + Color specular_factor = Color(1.0f, 1.0f, 1.0f); + Ref<Image> spec_gloss_img = nullptr; + +protected: + static void _bind_methods(); + +public: + Ref<Image> get_diffuse_img(); + void set_diffuse_img(Ref<Image> p_diffuse_img); + + Color get_diffuse_factor(); + void set_diffuse_factor(Color p_diffuse_factor); + + float get_gloss_factor(); + void set_gloss_factor(float p_gloss_factor); + + Color get_specular_factor(); + void set_specular_factor(Color p_specular_factor); + + Ref<Image> get_spec_gloss_img(); + void set_spec_gloss_img(Ref<Image> p_spec_gloss_img); +}; +#endif // GLTF_SPEC_GLOSS_H diff --git a/modules/gltf/gltf_state.cpp b/modules/gltf/gltf_state.cpp new file mode 100644 index 0000000000..403ae26bd3 --- /dev/null +++ b/modules/gltf/gltf_state.cpp @@ -0,0 +1,296 @@ +/*************************************************************************/ +/* gltf_state.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_state.h" + +void GLTFState::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_json"), &GLTFState::get_json); + ClassDB::bind_method(D_METHOD("set_json", "json"), &GLTFState::set_json); + ClassDB::bind_method(D_METHOD("get_major_version"), &GLTFState::get_major_version); + ClassDB::bind_method(D_METHOD("set_major_version", "major_version"), &GLTFState::set_major_version); + ClassDB::bind_method(D_METHOD("get_minor_version"), &GLTFState::get_minor_version); + ClassDB::bind_method(D_METHOD("set_minor_version", "minor_version"), &GLTFState::set_minor_version); + ClassDB::bind_method(D_METHOD("get_glb_data"), &GLTFState::get_glb_data); + ClassDB::bind_method(D_METHOD("set_glb_data", "glb_data"), &GLTFState::set_glb_data); + ClassDB::bind_method(D_METHOD("get_use_named_skin_binds"), &GLTFState::get_use_named_skin_binds); + ClassDB::bind_method(D_METHOD("set_use_named_skin_binds", "use_named_skin_binds"), &GLTFState::set_use_named_skin_binds); + ClassDB::bind_method(D_METHOD("get_nodes"), &GLTFState::get_nodes); + ClassDB::bind_method(D_METHOD("set_nodes", "nodes"), &GLTFState::set_nodes); + ClassDB::bind_method(D_METHOD("get_buffers"), &GLTFState::get_buffers); + ClassDB::bind_method(D_METHOD("set_buffers", "buffers"), &GLTFState::set_buffers); + ClassDB::bind_method(D_METHOD("get_buffer_views"), &GLTFState::get_buffer_views); + ClassDB::bind_method(D_METHOD("set_buffer_views", "buffer_views"), &GLTFState::set_buffer_views); + ClassDB::bind_method(D_METHOD("get_accessors"), &GLTFState::get_accessors); + ClassDB::bind_method(D_METHOD("set_accessors", "accessors"), &GLTFState::set_accessors); + ClassDB::bind_method(D_METHOD("get_meshes"), &GLTFState::get_meshes); + ClassDB::bind_method(D_METHOD("set_meshes", "meshes"), &GLTFState::set_meshes); + ClassDB::bind_method(D_METHOD("get_animation_players_count"), &GLTFState::get_animation_players_count); + ClassDB::bind_method(D_METHOD("get_animation_player"), &GLTFState::get_animation_player); + ClassDB::bind_method(D_METHOD("get_materials"), &GLTFState::get_materials); + ClassDB::bind_method(D_METHOD("set_materials", "materials"), &GLTFState::set_materials); + ClassDB::bind_method(D_METHOD("get_scene_name"), &GLTFState::get_scene_name); + ClassDB::bind_method(D_METHOD("set_scene_name", "scene_name"), &GLTFState::set_scene_name); + ClassDB::bind_method(D_METHOD("get_root_nodes"), &GLTFState::get_root_nodes); + ClassDB::bind_method(D_METHOD("set_root_nodes", "root_nodes"), &GLTFState::set_root_nodes); + ClassDB::bind_method(D_METHOD("get_textures"), &GLTFState::get_textures); + ClassDB::bind_method(D_METHOD("set_textures", "textures"), &GLTFState::set_textures); + ClassDB::bind_method(D_METHOD("get_images"), &GLTFState::get_images); + ClassDB::bind_method(D_METHOD("set_images", "images"), &GLTFState::set_images); + ClassDB::bind_method(D_METHOD("get_skins"), &GLTFState::get_skins); + ClassDB::bind_method(D_METHOD("set_skins", "skins"), &GLTFState::set_skins); + ClassDB::bind_method(D_METHOD("get_cameras"), &GLTFState::get_cameras); + ClassDB::bind_method(D_METHOD("set_cameras", "cameras"), &GLTFState::set_cameras); + ClassDB::bind_method(D_METHOD("get_lights"), &GLTFState::get_lights); + ClassDB::bind_method(D_METHOD("set_lights", "lights"), &GLTFState::set_lights); + ClassDB::bind_method(D_METHOD("get_unique_names"), &GLTFState::get_unique_names); + ClassDB::bind_method(D_METHOD("set_unique_names", "unique_names"), &GLTFState::set_unique_names); + ClassDB::bind_method(D_METHOD("get_skeletons"), &GLTFState::get_skeletons); + ClassDB::bind_method(D_METHOD("set_skeletons", "skeletons"), &GLTFState::set_skeletons); + ClassDB::bind_method(D_METHOD("get_skeleton_to_node"), &GLTFState::get_skeleton_to_node); + ClassDB::bind_method(D_METHOD("set_skeleton_to_node", "skeleton_to_node"), &GLTFState::set_skeleton_to_node); + ClassDB::bind_method(D_METHOD("get_animations"), &GLTFState::get_animations); + ClassDB::bind_method(D_METHOD("set_animations", "animations"), &GLTFState::set_animations); + ClassDB::bind_method(D_METHOD("get_scene_node"), &GLTFState::get_scene_node); + + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "json"), "set_json", "get_json"); // Dictionary + ADD_PROPERTY(PropertyInfo(Variant::INT, "major_version"), "set_major_version", "get_major_version"); // int + ADD_PROPERTY(PropertyInfo(Variant::INT, "minor_version"), "set_minor_version", "get_minor_version"); // int + ADD_PROPERTY(PropertyInfo(Variant::PACKED_BYTE_ARRAY, "glb_data"), "set_glb_data", "get_glb_data"); // Vector<uint8_t> + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "use_named_skin_binds"), "set_use_named_skin_binds", "get_use_named_skin_binds"); // bool + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "nodes", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_nodes", "get_nodes"); // Vector<Ref<GLTFNode>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "buffers"), "set_buffers", "get_buffers"); // Vector<Vector<uint8_t> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "buffer_views", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_buffer_views", "get_buffer_views"); // Vector<Ref<GLTFBufferView>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "accessors", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_accessors", "get_accessors"); // Vector<Ref<GLTFAccessor>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "meshes", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_meshes", "get_meshes"); // Vector<Ref<GLTFMesh>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "materials", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_materials", "get_materials"); // Vector<Ref<Material> + ADD_PROPERTY(PropertyInfo(Variant::STRING, "scene_name"), "set_scene_name", "get_scene_name"); // String + ADD_PROPERTY(PropertyInfo(Variant::PACKED_INT32_ARRAY, "root_nodes"), "set_root_nodes", "get_root_nodes"); // Vector<int> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "textures", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_textures", "get_textures"); // Vector<Ref<GLTFTexture>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "images", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_images", "get_images"); // Vector<Ref<Texture> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "skins", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_skins", "get_skins"); // Vector<Ref<GLTFSkin>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "cameras", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_cameras", "get_cameras"); // Vector<Ref<GLTFCamera>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "lights", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_lights", "get_lights"); // Vector<Ref<GLTFLight>> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "unique_names", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_unique_names", "get_unique_names"); // Set<String> + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "skeletons", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_skeletons", "get_skeletons"); // Vector<Ref<GLTFSkeleton>> + ADD_PROPERTY(PropertyInfo(Variant::DICTIONARY, "skeleton_to_node", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_skeleton_to_node", "get_skeleton_to_node"); // Map<GLTFSkeletonIndex, + ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "animations", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_STORAGE | PROPERTY_USAGE_INTERNAL | PROPERTY_USAGE_EDITOR), "set_animations", "get_animations"); // Vector<Ref<GLTFAnimation>> +} + +Dictionary GLTFState::get_json() { + return json; +} + +void GLTFState::set_json(Dictionary p_json) { + json = p_json; +} + +int GLTFState::get_major_version() { + return major_version; +} + +void GLTFState::set_major_version(int p_major_version) { + major_version = p_major_version; +} + +int GLTFState::get_minor_version() { + return minor_version; +} + +void GLTFState::set_minor_version(int p_minor_version) { + minor_version = p_minor_version; +} + +Vector<uint8_t> GLTFState::get_glb_data() { + return glb_data; +} + +void GLTFState::set_glb_data(Vector<uint8_t> p_glb_data) { + glb_data = p_glb_data; +} + +bool GLTFState::get_use_named_skin_binds() { + return use_named_skin_binds; +} + +void GLTFState::set_use_named_skin_binds(bool p_use_named_skin_binds) { + use_named_skin_binds = p_use_named_skin_binds; +} + +Array GLTFState::get_nodes() { + return GLTFDocument::to_array(nodes); +} + +void GLTFState::set_nodes(Array p_nodes) { + GLTFDocument::set_from_array(nodes, p_nodes); +} + +Array GLTFState::get_buffers() { + return GLTFDocument::to_array(buffers); +} + +void GLTFState::set_buffers(Array p_buffers) { + GLTFDocument::set_from_array(buffers, p_buffers); +} + +Array GLTFState::get_buffer_views() { + return GLTFDocument::to_array(buffer_views); +} + +void GLTFState::set_buffer_views(Array p_buffer_views) { + GLTFDocument::set_from_array(buffer_views, p_buffer_views); +} + +Array GLTFState::get_accessors() { + return GLTFDocument::to_array(accessors); +} + +void GLTFState::set_accessors(Array p_accessors) { + GLTFDocument::set_from_array(accessors, p_accessors); +} + +Array GLTFState::get_meshes() { + return GLTFDocument::to_array(meshes); +} + +void GLTFState::set_meshes(Array p_meshes) { + GLTFDocument::set_from_array(meshes, p_meshes); +} + +Array GLTFState::get_materials() { + return GLTFDocument::to_array(materials); +} + +void GLTFState::set_materials(Array p_materials) { + GLTFDocument::set_from_array(materials, p_materials); +} + +String GLTFState::get_scene_name() { + return scene_name; +} + +void GLTFState::set_scene_name(String p_scene_name) { + scene_name = p_scene_name; +} + +Array GLTFState::get_root_nodes() { + return GLTFDocument::to_array(root_nodes); +} + +void GLTFState::set_root_nodes(Array p_root_nodes) { + GLTFDocument::set_from_array(root_nodes, p_root_nodes); +} + +Array GLTFState::get_textures() { + return GLTFDocument::to_array(textures); +} + +void GLTFState::set_textures(Array p_textures) { + GLTFDocument::set_from_array(textures, p_textures); +} + +Array GLTFState::get_images() { + return GLTFDocument::to_array(images); +} + +void GLTFState::set_images(Array p_images) { + GLTFDocument::set_from_array(images, p_images); +} + +Array GLTFState::get_skins() { + return GLTFDocument::to_array(skins); +} + +void GLTFState::set_skins(Array p_skins) { + GLTFDocument::set_from_array(skins, p_skins); +} + +Array GLTFState::get_cameras() { + return GLTFDocument::to_array(cameras); +} + +void GLTFState::set_cameras(Array p_cameras) { + GLTFDocument::set_from_array(cameras, p_cameras); +} + +Array GLTFState::get_lights() { + return GLTFDocument::to_array(lights); +} + +void GLTFState::set_lights(Array p_lights) { + GLTFDocument::set_from_array(lights, p_lights); +} + +Array GLTFState::get_unique_names() { + return GLTFDocument::to_array(unique_names); +} + +void GLTFState::set_unique_names(Array p_unique_names) { + GLTFDocument::set_from_array(unique_names, p_unique_names); +} + +Array GLTFState::get_skeletons() { + return GLTFDocument::to_array(skeletons); +} + +void GLTFState::set_skeletons(Array p_skeletons) { + GLTFDocument::set_from_array(skeletons, p_skeletons); +} + +Dictionary GLTFState::get_skeleton_to_node() { + return GLTFDocument::to_dict(skeleton_to_node); +} + +void GLTFState::set_skeleton_to_node(Dictionary p_skeleton_to_node) { + GLTFDocument::set_from_dict(skeleton_to_node, p_skeleton_to_node); +} + +Array GLTFState::get_animations() { + return GLTFDocument::to_array(animations); +} + +void GLTFState::set_animations(Array p_animations) { + GLTFDocument::set_from_array(animations, p_animations); +} + +Node *GLTFState::get_scene_node(GLTFNodeIndex idx) { + if (!scene_nodes.has(idx)) { + return nullptr; + } + return scene_nodes[idx]; +} + +int GLTFState::get_animation_players_count(int idx) { + return animation_players.size(); +} + +AnimationPlayer *GLTFState::get_animation_player(int idx) { + ERR_FAIL_INDEX_V(idx, animation_players.size(), nullptr); + return animation_players[idx]; +} diff --git a/modules/gltf/gltf_state.h b/modules/gltf/gltf_state.h new file mode 100644 index 0000000000..f21472ad1b --- /dev/null +++ b/modules/gltf/gltf_state.h @@ -0,0 +1,180 @@ +/*************************************************************************/ +/* gltf_state.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_STATE_H +#define GLTF_STATE_H + +#include "core/io/resource.h" +#include "core/templates/vector.h" +#include "editor_scene_importer_gltf.h" +#include "gltf_accessor.h" +#include "gltf_animation.h" +#include "gltf_buffer_view.h" +#include "gltf_camera.h" +#include "gltf_document.h" +#include "gltf_light.h" +#include "gltf_mesh.h" +#include "gltf_node.h" +#include "gltf_skeleton.h" +#include "gltf_skin.h" +#include "gltf_texture.h" +#include "scene/animation/animation_player.h" +#include "scene/resources/texture.h" + +class GLTFState : public Resource { + GDCLASS(GLTFState, Resource); + friend class GLTFDocument; + friend class PackedSceneGLTF; + + Dictionary json; + int major_version = 0; + int minor_version = 0; + Vector<uint8_t> glb_data; + + bool use_named_skin_binds = false; + + Vector<Ref<GLTFNode>> nodes; + Vector<Vector<uint8_t>> buffers; + Vector<Ref<GLTFBufferView>> buffer_views; + Vector<Ref<GLTFAccessor>> accessors; + + Vector<Ref<GLTFMesh>> meshes; // meshes are loaded directly, no reason not to. + + Vector<AnimationPlayer *> animation_players; + Map<Ref<BaseMaterial3D>, GLTFMaterialIndex> material_cache; + Vector<Ref<BaseMaterial3D>> materials; + + String scene_name; + Vector<int> root_nodes; + Vector<Ref<GLTFTexture>> textures; + Vector<Ref<Texture2D>> images; + + Vector<Ref<GLTFSkin>> skins; + Vector<Ref<GLTFCamera>> cameras; + Vector<Ref<GLTFLight>> lights; + Set<String> unique_names; + + Vector<Ref<GLTFSkeleton>> skeletons; + Map<GLTFSkeletonIndex, GLTFNodeIndex> skeleton_to_node; + Vector<Ref<GLTFAnimation>> animations; + Map<GLTFNodeIndex, Node *> scene_nodes; + +protected: + static void _bind_methods(); + +public: + Dictionary get_json(); + void set_json(Dictionary p_json); + + int get_major_version(); + void set_major_version(int p_major_version); + + int get_minor_version(); + void set_minor_version(int p_minor_version); + + Vector<uint8_t> get_glb_data(); + void set_glb_data(Vector<uint8_t> p_glb_data); + + bool get_use_named_skin_binds(); + void set_use_named_skin_binds(bool p_use_named_skin_binds); + + Array get_nodes(); + void set_nodes(Array p_nodes); + + Array get_buffers(); + void set_buffers(Array p_buffers); + + Array get_buffer_views(); + void set_buffer_views(Array p_buffer_views); + + Array get_accessors(); + void set_accessors(Array p_accessors); + + Array get_meshes(); + void set_meshes(Array p_meshes); + + Array get_materials(); + void set_materials(Array p_materials); + + String get_scene_name(); + void set_scene_name(String p_scene_name); + + Array get_root_nodes(); + void set_root_nodes(Array p_root_nodes); + + Array get_textures(); + void set_textures(Array p_textures); + + Array get_images(); + void set_images(Array p_images); + + Array get_skins(); + void set_skins(Array p_skins); + + Array get_cameras(); + void set_cameras(Array p_cameras); + + Array get_lights(); + void set_lights(Array p_lights); + + Array get_unique_names(); + void set_unique_names(Array p_unique_names); + + Array get_skeletons(); + void set_skeletons(Array p_skeletons); + + Dictionary get_skeleton_to_node(); + void set_skeleton_to_node(Dictionary p_skeleton_to_node); + + Array get_animations(); + void set_animations(Array p_animations); + + Node *get_scene_node(GLTFNodeIndex idx); + + int get_animation_players_count(int idx); + + AnimationPlayer *get_animation_player(int idx); + + //void set_scene_nodes(Map<GLTFNodeIndex, Node *> p_scene_nodes) { + // this->scene_nodes = p_scene_nodes; + //} + + //void set_animation_players(Vector<AnimationPlayer *> p_animation_players) { + // this->animation_players = p_animation_players; + //} + + //Map<Ref<Material>, GLTFMaterialIndex> get_material_cache() { + // return this->material_cache; + //} + //void set_material_cache(Map<Ref<Material>, GLTFMaterialIndex> p_material_cache) { + // this->material_cache = p_material_cache; + //} +}; +#endif // GLTF_STATE_H diff --git a/modules/gltf/gltf_texture.cpp b/modules/gltf/gltf_texture.cpp new file mode 100644 index 0000000000..55434a5047 --- /dev/null +++ b/modules/gltf/gltf_texture.cpp @@ -0,0 +1,46 @@ +/*************************************************************************/ +/* gltf_texture.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "gltf_texture.h" + +void GLTFTexture::_bind_methods() { + ClassDB::bind_method(D_METHOD("get_src_image"), &GLTFTexture::get_src_image); + ClassDB::bind_method(D_METHOD("set_src_image", "src_image"), &GLTFTexture::set_src_image); + + ADD_PROPERTY(PropertyInfo(Variant::INT, "src_image"), "set_src_image", "get_src_image"); // int +} + +GLTFImageIndex GLTFTexture::get_src_image() const { + return src_image; +} + +void GLTFTexture::set_src_image(GLTFImageIndex val) { + src_image = val; +} diff --git a/modules/gltf/gltf_texture.h b/modules/gltf/gltf_texture.h new file mode 100644 index 0000000000..5e0c9c307b --- /dev/null +++ b/modules/gltf/gltf_texture.h @@ -0,0 +1,51 @@ +/*************************************************************************/ +/* gltf_texture.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef GLTF_TEXTURE_H +#define GLTF_TEXTURE_H + +#include "core/io/resource.h" +#include "gltf_document.h" + +class GLTFTexture : public Resource { + GDCLASS(GLTFTexture, Resource); + +private: + GLTFImageIndex src_image; + +protected: + static void _bind_methods(); + +public: + GLTFImageIndex get_src_image() const; + void set_src_image(GLTFImageIndex val); +}; + +#endif // GLTF_TEXTURE_H diff --git a/modules/gltf/register_types.cpp b/modules/gltf/register_types.cpp new file mode 100644 index 0000000000..bd5775af34 --- /dev/null +++ b/modules/gltf/register_types.cpp @@ -0,0 +1,88 @@ +/*************************************************************************/ +/* register_types.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "register_types.h" + +#include "editor/editor_node.h" +#include "editor_scene_exporter_gltf_plugin.h" +#include "editor_scene_importer_gltf.h" +#include "gltf_accessor.h" +#include "gltf_animation.h" +#include "gltf_buffer_view.h" +#include "gltf_camera.h" +#include "gltf_document.h" +#include "gltf_light.h" +#include "gltf_mesh.h" +#include "gltf_node.h" +#include "gltf_skeleton.h" +#include "gltf_skin.h" +#include "gltf_spec_gloss.h" +#include "gltf_state.h" +#include "gltf_texture.h" + +#ifndef _3D_DISABLED +#ifdef TOOLS_ENABLED +static void _editor_init() { + Ref<EditorSceneImporterGLTF> import_gltf; + import_gltf.instance(); + ResourceImporterScene::get_singleton()->add_importer(import_gltf); +} +#endif +#endif + +void register_gltf_types() { +#ifndef _3D_DISABLED +#ifdef TOOLS_ENABLED + ClassDB::register_class<EditorSceneImporterGLTF>(); + ClassDB::APIType prev_api = ClassDB::get_current_api(); + ClassDB::set_current_api(ClassDB::API_EDITOR); + EditorPlugins::add_by_type<SceneExporterGLTFPlugin>(); + ClassDB::set_current_api(prev_api); + EditorNode::add_init_callback(_editor_init); +#endif + ClassDB::register_class<GLTFSpecGloss>(); + ClassDB::register_class<GLTFNode>(); + ClassDB::register_class<GLTFAnimation>(); + ClassDB::register_class<GLTFBufferView>(); + ClassDB::register_class<GLTFAccessor>(); + ClassDB::register_class<GLTFTexture>(); + ClassDB::register_class<GLTFSkeleton>(); + ClassDB::register_class<GLTFSkin>(); + ClassDB::register_class<GLTFMesh>(); + ClassDB::register_class<GLTFCamera>(); + ClassDB::register_class<GLTFLight>(); + ClassDB::register_class<GLTFState>(); + ClassDB::register_class<GLTFDocument>(); + ClassDB::register_class<PackedSceneGLTF>(); +#endif +} + +void unregister_gltf_types() { +} diff --git a/modules/gltf/register_types.h b/modules/gltf/register_types.h new file mode 100644 index 0000000000..ffbc586ade --- /dev/null +++ b/modules/gltf/register_types.h @@ -0,0 +1,32 @@ +/*************************************************************************/ +/* register_types.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +void register_gltf_types(); +void unregister_gltf_types(); diff --git a/modules/text_server_adv/dynamic_font_adv.cpp b/modules/text_server_adv/dynamic_font_adv.cpp index 08c4ad2727..99d78a5299 100644 --- a/modules/text_server_adv/dynamic_font_adv.cpp +++ b/modules/text_server_adv/dynamic_font_adv.cpp @@ -432,8 +432,8 @@ DynamicFontDataAdvanced::Character DynamicFontDataAdvanced::bitmap_to_character( } Character chr; - chr.align = Vector2(xofs, -yofs) * p_data->scale_color_font / oversampling; - chr.advance = advance * p_data->scale_color_font / oversampling; + chr.align = (Vector2(xofs, -yofs) * p_data->scale_color_font / oversampling).round(); + chr.advance = (advance * p_data->scale_color_font / oversampling).round(); chr.texture_idx = tex_pos.index; chr.found = true; diff --git a/modules/text_server_fb/dynamic_font_fb.cpp b/modules/text_server_fb/dynamic_font_fb.cpp index 6731870e8f..ca9e5b580b 100644 --- a/modules/text_server_fb/dynamic_font_fb.cpp +++ b/modules/text_server_fb/dynamic_font_fb.cpp @@ -317,8 +317,8 @@ DynamicFontDataFallback::Character DynamicFontDataFallback::bitmap_to_character( } Character chr; - chr.align = Vector2(xofs, -yofs) * p_data->scale_color_font / oversampling; - chr.advance = advance * p_data->scale_color_font / oversampling; + chr.align = (Vector2(xofs, -yofs) * p_data->scale_color_font / oversampling).round(); + chr.advance = (advance * p_data->scale_color_font / oversampling).round(); chr.texture_idx = tex_pos.index; chr.found = true; diff --git a/platform/android/java_godot_wrapper.cpp b/platform/android/java_godot_wrapper.cpp index 7919e47b5c..818e55476e 100644 --- a/platform/android/java_godot_wrapper.cpp +++ b/platform/android/java_godot_wrapper.cpp @@ -103,7 +103,7 @@ jobject GodotJavaWrapper::get_member_object(const char *p_name, const char *p_cl jobject GodotJavaWrapper::get_class_loader() { if (_get_class_loader) { JNIEnv *env = ThreadAndroid::get_env(); - return env->CallObjectMethod(godot_instance, _get_class_loader); + return env->CallObjectMethod(activity, _get_class_loader); } else { return nullptr; } diff --git a/platform/osx/display_server_osx.mm b/platform/osx/display_server_osx.mm index c4c2426f1f..8d82119ae2 100644 --- a/platform/osx/display_server_osx.mm +++ b/platform/osx/display_server_osx.mm @@ -751,28 +751,32 @@ static const NSRange kEmptyRange = { NSNotFound, 0 }; ERR_FAIL_COND_V(!DS_OSX->windows.has(window_id), NO); DisplayServerOSX::WindowData &wd = DS_OSX->windows[window_id]; - NSPasteboard *pboard = [sender draggingPasteboard]; -#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101400 - NSArray<NSURL *> *filenames = [pboard propertyListForType:NSPasteboardTypeFileURL]; -#else - NSArray *filenames = [pboard propertyListForType:NSFilenamesPboardType]; -#endif + if (!wd.drop_files_callback.is_null()) { + Vector<String> files; + NSPasteboard *pboard = [sender draggingPasteboard]; - Vector<String> files; - for (NSUInteger i = 0; i < filenames.count; i++) { #if MAC_OS_X_VERSION_MIN_REQUIRED >= 101400 - NSString *ns = [[filenames objectAtIndex:i] path]; + NSArray *items = pboard.pasteboardItems; + for (NSPasteboardItem *item in items) { + NSString *path = [item stringForType:NSPasteboardTypeFileURL]; + NSString *ns = [NSURL URLWithString:path].path; + char *utfs = strdup([ns UTF8String]); + String ret; + ret.parse_utf8(utfs); + free(utfs); + files.push_back(ret); + } #else - NSString *ns = [filenames objectAtIndex:i]; + NSArray *filenames = [pboard propertyListForType:NSFilenamesPboardType]; + for (NSString *ns in filenames) { + char *utfs = strdup([ns UTF8String]); + String ret; + ret.parse_utf8(utfs); + free(utfs); + files.push_back(ret); + } #endif - char *utfs = strdup([ns UTF8String]); - String ret; - ret.parse_utf8(utfs); - free(utfs); - files.push_back(ret); - } - if (!wd.drop_files_callback.is_null()) { Variant v = files; Variant *vp = &v; Variant ret; diff --git a/scene/3d/physics_body_3d.cpp b/scene/3d/physics_body_3d.cpp index 15cd2238e1..d9e19db4bd 100644 --- a/scene/3d/physics_body_3d.cpp +++ b/scene/3d/physics_body_3d.cpp @@ -1971,7 +1971,7 @@ bool PhysicalBone3D::_set(const StringName &p_name, const Variant &p_value) { } if (joint_data) { - if (joint_data->_set(p_name, p_value)) { + if (joint_data->_set(p_name, p_value, joint)) { #ifdef TOOLS_ENABLED if (get_gizmo().is_valid()) { get_gizmo()->redraw(); diff --git a/scene/3d/physics_body_3d.h b/scene/3d/physics_body_3d.h index 58eebc90ce..cbf16863fb 100644 --- a/scene/3d/physics_body_3d.h +++ b/scene/3d/physics_body_3d.h @@ -370,7 +370,7 @@ public: virtual JointType get_joint_type() { return JOINT_TYPE_NONE; } /// "j" is used to set the parameter inside the PhysicsServer3D - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; @@ -380,7 +380,7 @@ public: struct PinJointData : public JointData { virtual JointType get_joint_type() { return JOINT_TYPE_PIN; } - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; @@ -394,7 +394,7 @@ public: struct ConeJointData : public JointData { virtual JointType get_joint_type() { return JOINT_TYPE_CONE; } - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; @@ -411,7 +411,7 @@ public: struct HingeJointData : public JointData { virtual JointType get_joint_type() { return JOINT_TYPE_HINGE; } - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; @@ -431,7 +431,7 @@ public: struct SliderJointData : public JointData { virtual JointType get_joint_type() { return JOINT_TYPE_SLIDER; } - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; @@ -478,7 +478,7 @@ public: virtual JointType get_joint_type() { return JOINT_TYPE_6DOF; } - virtual bool _set(const StringName &p_name, const Variant &p_value, RID j = RID()); + virtual bool _set(const StringName &p_name, const Variant &p_value, RID j); virtual bool _get(const StringName &p_name, Variant &r_ret) const; virtual void _get_property_list(List<PropertyInfo> *p_list) const; diff --git a/scene/gui/progress_bar.cpp b/scene/gui/progress_bar.cpp index 1344d010ae..c111ddff58 100644 --- a/scene/gui/progress_bar.cpp +++ b/scene/gui/progress_bar.cpp @@ -74,7 +74,7 @@ void ProgressBar::_notification(int p_what) { if (percent_visible) { String txt = TS->format_number(itos(int(get_as_ratio() * 100))) + TS->percent_sign(); TextLine tl = TextLine(txt, font, font_size); - tl.draw(get_canvas_item(), Point2(get_size().width - tl.get_size().x, get_size().height - tl.get_size().y) / 2, font_color); + tl.draw(get_canvas_item(), (Point2(get_size().width - tl.get_size().x, get_size().height - tl.get_size().y) / 2).round(), font_color); } } } diff --git a/scene/gui/rich_text_label.cpp b/scene/gui/rich_text_label.cpp index 40138e458b..69b2c6ac7e 100644 --- a/scene/gui/rich_text_label.cpp +++ b/scene/gui/rich_text_label.cpp @@ -606,11 +606,11 @@ void RichTextLabel::_shape_line(ItemFrame *p_frame, int p_line, const Ref<Font> } } -float RichTextLabel::_draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p_ofs, int p_width, const Color &p_base_color, int p_outline_size, const Color &p_outline_color, const Color &p_font_color_shadow, bool p_shadow_as_outline, const Point2 &p_shadow_ofs) { +void RichTextLabel::_draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p_ofs, int p_width, const Color &p_base_color, int p_outline_size, const Color &p_outline_color, const Color &p_font_color_shadow, bool p_shadow_as_outline, const Point2 &p_shadow_ofs) { Vector2 off; - ERR_FAIL_COND_V(p_frame == nullptr, off.y); - ERR_FAIL_COND_V(p_line < 0 || p_line >= p_frame->lines.size(), off.y); + ERR_FAIL_COND(p_frame == nullptr); + ERR_FAIL_COND(p_line < 0 || p_line >= p_frame->lines.size()); Line &l = p_frame->lines.write[p_line]; @@ -619,7 +619,7 @@ float RichTextLabel::_draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p Variant meta; if (it_from == nullptr) { - return off.y; + return; } RID ci = get_canvas_item(); @@ -1040,8 +1040,6 @@ float RichTextLabel::_draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p } off.y += TS->shaped_text_get_descent(rid); } - - return off.y; } void RichTextLabel::_find_click(ItemFrame *p_frame, const Point2i &p_click, ItemFrame **r_click_frame, int *r_click_line, Item **r_click_item, int *r_click_char, bool *r_outside) { @@ -1065,7 +1063,7 @@ void RichTextLabel::_find_click(ItemFrame *p_frame, const Point2i &p_click, Item //TODO, change to binary search ? while (from_line < main->lines.size()) { - if (main->lines[from_line].offset.y + main->lines[from_line].text_buf->get_size().y + _get_text_rect().get_position().y >= vofs) { + if (main->lines[from_line].offset.y + main->lines[from_line].text_buf->get_size().y >= vofs) { break; } from_line++; @@ -1349,7 +1347,7 @@ void RichTextLabel::_notification(int p_what) { //TODO, change to binary search ? while (from_line < main->lines.size()) { - if (main->lines[from_line].offset.y + main->lines[from_line].text_buf->get_size().y + _get_text_rect().get_position().y >= vofs) { + if (main->lines[from_line].offset.y + main->lines[from_line].text_buf->get_size().y >= vofs) { break; } from_line++; @@ -1372,7 +1370,8 @@ void RichTextLabel::_notification(int p_what) { Point2 ofs = text_rect.get_position() + Vector2(0, main->lines[from_line].offset.y - vofs); while (ofs.y < size.height && from_line < main->lines.size()) { visible_line_count++; - ofs.y += _draw_line(main, from_line, ofs, text_rect.size.x, base_color, outline_size, outline_color, font_color_shadow, use_outline, shadow_ofs); + _draw_line(main, from_line, ofs, text_rect.size.x, base_color, outline_size, outline_color, font_color_shadow, use_outline, shadow_ofs); + ofs.y += main->lines[from_line].text_buf->get_size().y; from_line++; } } break; @@ -2062,14 +2061,14 @@ void RichTextLabel::_validate_line_caches(ItemFrame *p_frame) { int total_height = 0; if (p_frame->lines.size()) { - total_height = p_frame->lines[p_frame->lines.size() - 1].offset.y + p_frame->lines[p_frame->lines.size() - 1].text_buf->get_size().y + get_theme_stylebox("normal")->get_minimum_size().height; + total_height = p_frame->lines[p_frame->lines.size() - 1].offset.y + p_frame->lines[p_frame->lines.size() - 1].text_buf->get_size().y; } p_frame->first_resized_line = p_frame->lines.size(); updating_scroll = true; vscroll->set_max(total_height); - vscroll->set_page(size.height); + vscroll->set_page(text_rect.size.height); if (scroll_follow && scroll_following) { vscroll->set_value(total_height - size.height); } @@ -2098,7 +2097,7 @@ void RichTextLabel::_validate_line_caches(ItemFrame *p_frame) { int total_height = 0; if (p_frame->lines.size()) { - total_height = p_frame->lines[p_frame->lines.size() - 1].offset.y + p_frame->lines[p_frame->lines.size() - 1].text_buf->get_size().y + get_theme_stylebox("normal")->get_minimum_size().height; + total_height = p_frame->lines[p_frame->lines.size() - 1].offset.y + p_frame->lines[p_frame->lines.size() - 1].text_buf->get_size().y; } p_frame->first_invalid_line = p_frame->lines.size(); @@ -2106,7 +2105,7 @@ void RichTextLabel::_validate_line_caches(ItemFrame *p_frame) { updating_scroll = true; vscroll->set_max(total_height); - vscroll->set_page(size.height); + vscroll->set_page(text_rect.size.height); if (scroll_follow && scroll_following) { vscroll->set_value(total_height - size.height); } @@ -3649,7 +3648,7 @@ void RichTextLabel::install_effect(const Variant effect) { int RichTextLabel::get_content_height() const { int total_height = 0; if (main->lines.size()) { - total_height = main->lines[main->lines.size() - 1].offset.y + main->lines[main->lines.size() - 1].text_buf->get_size().y + get_theme_stylebox("normal")->get_minimum_size().height; + total_height = main->lines[main->lines.size() - 1].offset.y + main->lines[main->lines.size() - 1].text_buf->get_size().y; } return total_height; } diff --git a/scene/gui/rich_text_label.h b/scene/gui/rich_text_label.h index 93e48dd449..f1dac69dce 100644 --- a/scene/gui/rich_text_label.h +++ b/scene/gui/rich_text_label.h @@ -378,7 +378,7 @@ private: void _shape_line(ItemFrame *p_frame, int p_line, const Ref<Font> &p_base_font, int p_base_font_size, int p_width, int *r_char_offset); void _resize_line(ItemFrame *p_frame, int p_line, const Ref<Font> &p_base_font, int p_base_font_size, int p_width); - float _draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p_ofs, int p_width, const Color &p_base_color, int p_outline_size, const Color &p_outline_color, const Color &p_font_color_shadow, bool p_shadow_as_outline, const Point2 &shadow_ofs); + void _draw_line(ItemFrame *p_frame, int p_line, const Vector2 &p_ofs, int p_width, const Color &p_base_color, int p_outline_size, const Color &p_outline_color, const Color &p_font_color_shadow, bool p_shadow_as_outline, const Point2 &shadow_ofs); float _find_click_in_line(ItemFrame *p_frame, int p_line, const Vector2 &p_ofs, int p_width, const Point2i &p_click, ItemFrame **r_click_frame = nullptr, int *r_click_line = nullptr, Item **r_click_item = nullptr, int *r_click_char = nullptr); String _roman(int p_num, bool p_capitalize) const; diff --git a/scene/gui/scroll_container.cpp b/scene/gui/scroll_container.cpp index 65f1084dc2..942593df27 100644 --- a/scene/gui/scroll_container.cpp +++ b/scene/gui/scroll_container.cpp @@ -328,6 +328,7 @@ void ScrollContainer::_notification(int p_what) { if (rtl && v_scroll->is_visible_in_tree() && v_scroll->get_parent() == this) { r.position.x += v_scroll->get_minimum_size().x; } + r.position = r.position.floor(); fit_child_in_rect(c, r); } diff --git a/scene/gui/text_edit.cpp b/scene/gui/text_edit.cpp index 971d374667..27d82c7ac7 100644 --- a/scene/gui/text_edit.cpp +++ b/scene/gui/text_edit.cpp @@ -1245,7 +1245,7 @@ void TextEdit::_notification(int p_what) { int gl_size = visual.size(); ofs_y += ldata->get_line_ascent(line_wrap_index); - float char_ofs = 0.f; + int char_ofs = 0; for (int j = 0; j < gl_size; j++) { if (color_map.has(glyphs[j].start)) { current_color = color_map[glyphs[j].start].get("color"); diff --git a/servers/text_server.cpp b/servers/text_server.cpp index 30dfa60ba3..fa39185ac0 100644 --- a/servers/text_server.cpp +++ b/servers/text_server.cpp @@ -905,7 +905,7 @@ Vector<Vector2> TextServer::shaped_text_get_selection(RID p_shaped, int p_start, float off = 0.0f; for (int i = 0; i < v_size; i++) { for (int k = 0; k < glyphs[i].repeat; k++) { - if (glyphs[i].count > 0 && glyphs[i].index != 0) { + if ((glyphs[i].count > 0) && ((glyphs[i].index != 0) || ((glyphs[i].flags & GRAPHEME_IS_SPACE) == GRAPHEME_IS_SPACE))) { if (glyphs[i].start < end && glyphs[i].end > start) { // Grapheme fully in selection range. if (glyphs[i].start >= start && glyphs[i].end <= end) { @@ -963,6 +963,10 @@ Vector<Vector2> TextServer::shaped_text_get_selection(RID p_shaped, int p_start, // Merge intersecting ranges. int i = 0; while (i < ranges.size()) { + i++; + } + i = 0; + while (i < ranges.size()) { int j = i + 1; while (j < ranges.size()) { if (Math::is_equal_approx(ranges[i].y, ranges[j].x, UNIT_EPSILON)) { @@ -1034,31 +1038,36 @@ int TextServer::shaped_text_hit_test_position(RID p_shaped, float p_coords) cons float off = 0.0f; for (int i = 0; i < v_size; i++) { - for (int k = 0; k < glyphs[i].repeat; k++) { - if (glyphs[i].count > 0) { - float advance = 0.f; - for (int j = 0; j < glyphs[i].count; j++) { - advance += glyphs[i + j].advance; + if (glyphs[i].count > 0) { + float advance = 0.f; + for (int j = 0; j < glyphs[i].count; j++) { + advance += glyphs[i + j].advance * glyphs[i + j].repeat; + } + if (((glyphs[i].flags & GRAPHEME_IS_VIRTUAL) == GRAPHEME_IS_VIRTUAL) && (p_coords >= off && p_coords < off + advance)) { + if ((glyphs[i].flags & GRAPHEME_IS_RTL) == GRAPHEME_IS_RTL) { + return glyphs[i].end; + } else { + return glyphs[i].start; } - // Place caret to the left of clicked grapheme. - if (p_coords >= off && p_coords < off + advance / 2) { - if ((glyphs[i].flags & GRAPHEME_IS_RTL) == GRAPHEME_IS_RTL) { - return glyphs[i].end; - } else { - return glyphs[i].start; - } + } + // Place caret to the left of clicked grapheme. + if (p_coords >= off && p_coords < off + advance / 2) { + if ((glyphs[i].flags & GRAPHEME_IS_RTL) == GRAPHEME_IS_RTL) { + return glyphs[i].end; + } else { + return glyphs[i].start; } - // Place caret to the right of clicked grapheme. - if (p_coords >= off + advance / 2 && p_coords < off + advance) { - if ((glyphs[i].flags & GRAPHEME_IS_RTL) == GRAPHEME_IS_RTL) { - return glyphs[i].start; - } else { - return glyphs[i].end; - } + } + // Place caret to the right of clicked grapheme. + if (p_coords >= off + advance / 2 && p_coords < off + advance) { + if ((glyphs[i].flags & GRAPHEME_IS_RTL) == GRAPHEME_IS_RTL) { + return glyphs[i].start; + } else { + return glyphs[i].end; } } - off += glyphs[i].advance; } + off += glyphs[i].advance * glyphs[i].repeat; } return 0; } diff --git a/thirdparty/README.md b/thirdparty/README.md index 8cb6424a1c..f31a9b864c 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -6,24 +6,6 @@ readability. Subcategories (`###` level) where needed are separated by a single empty line. -## assimp - -- Upstream: http://github.com/assimp/assimp -- Version: git (308db73d0b3c2d1870cd3e465eaa283692a4cf23, 2019) -- License: BSD-3-Clause - -Files extracted from upstream source: - -- Run `cmake .` in root folder to generate files -- `code/{CApi,Common,FBX,Material,PostProcessing}/` -- `contrib/utf8cpp/source/` -- `include/` -- `revision.h` -- `CREDITS` and `LICENSE` files -- `rm -f code/Common/ZipArchiveIOSystem.cpp include/assimp/ZipArchiveIOSystem.h - include/assimp/irrXMLWrapper.h` - - ## basis_universal - Upstream: https://github.com/BinomialLLC/basis_universal @@ -164,7 +146,7 @@ Files extracted from upstream source: ## glslang - Upstream: https://github.com/KhronosGroup/glslang -- Version: git (bacaef3237c515e40d1a24722be48c0a0b30f75f, 2020) +- Version: git (dd69df7f3dac26362e10b0f38efb9e47990f7537, 2020) - License: glslang Version should be kept in sync with the one of the used Vulkan SDK (see `vulkan` @@ -180,8 +162,6 @@ Files extracted from upstream source: - `LICENSE.txt` - Unnecessary files like `CMakeLists.txt` and `updateGrammar` removed. -Patches in the `patches` directory should be re-applied after updates. - ## Graphite engine - Upstream: https://github.com/silnrsi/graphite @@ -642,7 +622,7 @@ folder. ## vulkan - Upstream: https://github.com/KhronosGroup/Vulkan-Loader -- Version: sdk-1.2.154.0 (2020) +- Version: sdk-1.2.162.0 (2020) - License: Apache 2.0 Unless there is a specific reason to package a more recent version, please stick diff --git a/thirdparty/assimp/LICENSE b/thirdparty/assimp/LICENSE deleted file mode 100644 index 262606aff3..0000000000 --- a/thirdparty/assimp/LICENSE +++ /dev/null @@ -1,78 +0,0 @@ -Open Asset Import Library (assimp) - -Copyright (c) 2006-2016, assimp team -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -****************************************************************************** - -AN EXCEPTION applies to all files in the ./test/models-nonbsd folder. -These are 3d models for testing purposes, from various free sources -on the internet. They are - unless otherwise stated - copyright of -their respective creators, which may impose additional requirements -on the use of their work. For any of these models, see -<model-name>.source.txt for more legal information. Contact us if you -are a copyright holder and believe that we credited you inproperly or -if you don't want your files to appear in the repository. - - -****************************************************************************** - -Poly2Tri Copyright (c) 2009-2010, Poly2Tri Contributors -http://code.google.com/p/poly2tri/ - -All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of Poly2Tri nor the names of its contributors may be - used to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/assimp/code/CApi/AssimpCExport.cpp b/thirdparty/assimp/code/CApi/AssimpCExport.cpp deleted file mode 100644 index 7557edcfc6..0000000000 --- a/thirdparty/assimp/code/CApi/AssimpCExport.cpp +++ /dev/null @@ -1,156 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file AssimpCExport.cpp -Assimp C export interface. See Exporter.cpp for some notes. -*/ - -#ifndef ASSIMP_BUILD_NO_EXPORT - -#include "CInterfaceIOWrapper.h" -#include <assimp/SceneCombiner.h> -#include "Common/ScenePrivate.h" -#include <assimp/Exporter.hpp> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API size_t aiGetExportFormatCount(void) -{ - return Exporter().GetExportFormatCount(); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API const aiExportFormatDesc* aiGetExportFormatDescription( size_t index) -{ - // Note: this is valid as the index always pertains to a built-in exporter, - // for which the returned structure is guaranteed to be of static storage duration. - Exporter exporter; - const aiExportFormatDesc* orig( exporter.GetExportFormatDescription( index ) ); - if (NULL == orig) { - return NULL; - } - - aiExportFormatDesc *desc = new aiExportFormatDesc; - desc->description = new char[ strlen( orig->description ) + 1 ](); - ::strncpy( (char*) desc->description, orig->description, strlen( orig->description ) ); - desc->fileExtension = new char[ strlen( orig->fileExtension ) + 1 ](); - ::strncpy( ( char* ) desc->fileExtension, orig->fileExtension, strlen( orig->fileExtension ) ); - desc->id = new char[ strlen( orig->id ) + 1 ](); - ::strncpy( ( char* ) desc->id, orig->id, strlen( orig->id ) ); - - return desc; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiReleaseExportFormatDescription( const aiExportFormatDesc *desc ) { - if (NULL == desc) { - return; - } - - delete [] desc->description; - delete [] desc->fileExtension; - delete [] desc->id; - delete desc; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiCopyScene(const aiScene* pIn, aiScene** pOut) -{ - if (!pOut || !pIn) { - return; - } - - SceneCombiner::CopyScene(pOut,pIn,true); - ScenePriv(*pOut)->mIsCopy = true; -} - - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiFreeScene(const C_STRUCT aiScene* pIn) -{ - // note: aiReleaseImport() is also able to delete scene copies, but in addition - // it also handles scenes with import metadata. - delete pIn; -} - - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiReturn aiExportScene( const aiScene* pScene, const char* pFormatId, const char* pFileName, unsigned int pPreprocessing ) -{ - return ::aiExportSceneEx(pScene,pFormatId,pFileName,NULL,pPreprocessing); -} - - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiReturn aiExportSceneEx( const aiScene* pScene, const char* pFormatId, const char* pFileName, aiFileIO* pIO, unsigned int pPreprocessing ) -{ - Exporter exp; - - if (pIO) { - exp.SetIOHandler(new CIOSystemWrapper(pIO)); - } - return exp.Export(pScene,pFormatId,pFileName,pPreprocessing); -} - - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API const C_STRUCT aiExportDataBlob* aiExportSceneToBlob( const aiScene* pScene, const char* pFormatId, unsigned int pPreprocessing ) -{ - Exporter exp; - if (!exp.ExportToBlob(pScene,pFormatId,pPreprocessing)) { - return NULL; - } - const aiExportDataBlob* blob = exp.GetOrphanedBlob(); - ai_assert(blob); - - return blob; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API C_STRUCT void aiReleaseExportBlob( const aiExportDataBlob* pData ) -{ - delete pData; -} - -#endif // !ASSIMP_BUILD_NO_EXPORT diff --git a/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.cpp b/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.cpp deleted file mode 100644 index 5a3a49565a..0000000000 --- a/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.cpp +++ /dev/null @@ -1,136 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file aiFileIO -> IOSystem wrapper*/ - -#include "CInterfaceIOWrapper.h" - -namespace Assimp { - -CIOStreamWrapper::~CIOStreamWrapper(void) -{ - /* Various places depend on this destructor to close the file */ - if (mFile) { - mIO->mFileSystem->CloseProc(mIO->mFileSystem, mFile); - mFile = nullptr; - } -} - -// ................................................................... -size_t CIOStreamWrapper::Read(void* pvBuffer, - size_t pSize, - size_t pCount -){ - // need to typecast here as C has no void* - return mFile->ReadProc(mFile,(char*)pvBuffer,pSize,pCount); -} - -// ................................................................... -size_t CIOStreamWrapper::Write(const void* pvBuffer, - size_t pSize, - size_t pCount -){ - // need to typecast here as C has no void* - return mFile->WriteProc(mFile,(const char*)pvBuffer,pSize,pCount); -} - -// ................................................................... -aiReturn CIOStreamWrapper::Seek(size_t pOffset, - aiOrigin pOrigin -){ - return mFile->SeekProc(mFile,pOffset,pOrigin); -} - -// ................................................................... -size_t CIOStreamWrapper::Tell(void) const { - return mFile->TellProc(mFile); -} - -// ................................................................... -size_t CIOStreamWrapper::FileSize() const { - return mFile->FileSizeProc(mFile); -} - -// ................................................................... -void CIOStreamWrapper::Flush () { - return mFile->FlushProc(mFile); -} - -// ------------------------------------------------------------------------------------------------ -// Custom IOStream implementation for the C-API -bool CIOSystemWrapper::Exists( const char* pFile) const { - aiFile* p = mFileSystem->OpenProc(mFileSystem,pFile,"rb"); - if (p){ - mFileSystem->CloseProc(mFileSystem,p); - return true; - } - return false; -} - -// ................................................................... -char CIOSystemWrapper::getOsSeparator() const { -#ifndef _WIN32 - return '/'; -#else - return '\\'; -#endif -} - -// ................................................................... -IOStream* CIOSystemWrapper::Open(const char* pFile,const char* pMode) { - aiFile* p = mFileSystem->OpenProc(mFileSystem,pFile,pMode); - if (!p) { - return NULL; - } - return new CIOStreamWrapper(p, this); -} - -// ................................................................... -void CIOSystemWrapper::Close( IOStream* pFile) { - if (!pFile) { - return; - } - delete pFile; -} - -} diff --git a/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.h b/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.h deleted file mode 100644 index 2162320302..0000000000 --- a/thirdparty/assimp/code/CApi/CInterfaceIOWrapper.h +++ /dev/null @@ -1,99 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file aiFileIO -> IOSystem wrapper*/ - -#ifndef AI_CIOSYSTEM_H_INCLUDED -#define AI_CIOSYSTEM_H_INCLUDED - -#include <assimp/cfileio.h> -#include <assimp/IOStream.hpp> -#include <assimp/IOSystem.hpp> - -namespace Assimp { - -class CIOSystemWrapper; - -// ------------------------------------------------------------------------------------------------ -// Custom IOStream implementation for the C-API -class CIOStreamWrapper : public IOStream -{ -public: - explicit CIOStreamWrapper(aiFile* pFile, CIOSystemWrapper* io) - : mFile(pFile), - mIO(io) - {} - ~CIOStreamWrapper(void); - - size_t Read(void* pvBuffer, size_t pSize, size_t pCount); - size_t Write(const void* pvBuffer, size_t pSize, size_t pCount); - aiReturn Seek(size_t pOffset, aiOrigin pOrigin); - size_t Tell(void) const; - size_t FileSize() const; - void Flush(); - -private: - aiFile* mFile; - CIOSystemWrapper* mIO; -}; - -class CIOSystemWrapper : public IOSystem -{ - friend class CIOStreamWrapper; -public: - explicit CIOSystemWrapper(aiFileIO* pFile) - : mFileSystem(pFile) - {} - - bool Exists( const char* pFile) const; - char getOsSeparator() const; - IOStream* Open(const char* pFile,const char* pMode = "rb"); - void Close( IOStream* pFile); -private: - aiFileIO* mFileSystem; -}; - -} - -#endif - diff --git a/thirdparty/assimp/code/Common/Assimp.cpp b/thirdparty/assimp/code/Common/Assimp.cpp deleted file mode 100644 index 178b2c01d0..0000000000 --- a/thirdparty/assimp/code/Common/Assimp.cpp +++ /dev/null @@ -1,695 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file Assimp.cpp - * @brief Implementation of the Plain-C API - */ - -#include <assimp/cimport.h> -#include <assimp/LogStream.hpp> -#include <assimp/DefaultLogger.hpp> -#include <assimp/Importer.hpp> -#include <assimp/importerdesc.h> -#include <assimp/scene.h> -#include <assimp/GenericProperty.h> -#include <assimp/Exceptional.h> -#include <assimp/BaseImporter.h> - -#include "CApi/CInterfaceIOWrapper.h" -#include "Importer.h" -#include "ScenePrivate.h" - -#include <list> - -// ------------------------------------------------------------------------------------------------ -#ifndef ASSIMP_BUILD_SINGLETHREADED -# include <thread> -# include <mutex> -#endif -// ------------------------------------------------------------------------------------------------ -using namespace Assimp; - -namespace Assimp { - // underlying structure for aiPropertyStore - typedef BatchLoader::PropertyMap PropertyMap; - - /** Stores the LogStream objects for all active C log streams */ - struct mpred { - bool operator () (const aiLogStream& s0, const aiLogStream& s1) const { - return s0.callback<s1.callback&&s0.user<s1.user; - } - }; - typedef std::map<aiLogStream, Assimp::LogStream*, mpred> LogStreamMap; - - /** Stores the LogStream objects allocated by #aiGetPredefinedLogStream */ - typedef std::list<Assimp::LogStream*> PredefLogStreamMap; - - /** Local storage of all active log streams */ - static LogStreamMap gActiveLogStreams; - - /** Local storage of LogStreams allocated by #aiGetPredefinedLogStream */ - static PredefLogStreamMap gPredefinedStreams; - - /** Error message of the last failed import process */ - static std::string gLastErrorString; - - /** Verbose logging active or not? */ - static aiBool gVerboseLogging = false; - - /** will return all registered importers. */ - void GetImporterInstanceList(std::vector< BaseImporter* >& out); - - /** will delete all registered importers. */ - void DeleteImporterInstanceList(std::vector< BaseImporter* >& out); -} // namespace assimp - - -#ifndef ASSIMP_BUILD_SINGLETHREADED -/** Global mutex to manage the access to the log-stream map */ -static std::mutex gLogStreamMutex; -#endif - -// ------------------------------------------------------------------------------------------------ -// Custom LogStream implementation for the C-API -class LogToCallbackRedirector : public LogStream { -public: - explicit LogToCallbackRedirector(const aiLogStream& s) - : stream (s) { - ai_assert(NULL != s.callback); - } - - ~LogToCallbackRedirector() { -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(gLogStreamMutex); -#endif - // (HACK) Check whether the 'stream.user' pointer points to a - // custom LogStream allocated by #aiGetPredefinedLogStream. - // In this case, we need to delete it, too. Of course, this - // might cause strange problems, but the chance is quite low. - - PredefLogStreamMap::iterator it = std::find(gPredefinedStreams.begin(), - gPredefinedStreams.end(), (Assimp::LogStream*)stream.user); - - if (it != gPredefinedStreams.end()) { - delete *it; - gPredefinedStreams.erase(it); - } - } - - /** @copydoc LogStream::write */ - void write(const char* message) { - stream.callback(message,stream.user); - } - -private: - aiLogStream stream; -}; - -// ------------------------------------------------------------------------------------------------ -void ReportSceneNotFoundError() { - ASSIMP_LOG_ERROR("Unable to find the Assimp::Importer for this aiScene. " - "The C-API does not accept scenes produced by the C++ API and vice versa"); - - ai_assert(false); -} - -// ------------------------------------------------------------------------------------------------ -// Reads the given file and returns its content. -const aiScene* aiImportFile( const char* pFile, unsigned int pFlags) { - return aiImportFileEx(pFile,pFlags,NULL); -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* aiImportFileEx( const char* pFile, unsigned int pFlags, aiFileIO* pFS) { - return aiImportFileExWithProperties(pFile, pFlags, pFS, NULL); -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* aiImportFileExWithProperties( const char* pFile, unsigned int pFlags, - aiFileIO* pFS, const aiPropertyStore* props) { - ai_assert(NULL != pFile); - - const aiScene* scene = NULL; - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // create an Importer for this file - Assimp::Importer* imp = new Assimp::Importer(); - - // copy properties - if(props) { - const PropertyMap* pp = reinterpret_cast<const PropertyMap*>(props); - ImporterPimpl* pimpl = imp->Pimpl(); - pimpl->mIntProperties = pp->ints; - pimpl->mFloatProperties = pp->floats; - pimpl->mStringProperties = pp->strings; - pimpl->mMatrixProperties = pp->matrices; - } - // setup a custom IO system if necessary - if (pFS) { - imp->SetIOHandler( new CIOSystemWrapper (pFS) ); - } - - // and have it read the file - scene = imp->ReadFile( pFile, pFlags); - - // if succeeded, store the importer in the scene and keep it alive - if( scene) { - ScenePrivateData* priv = const_cast<ScenePrivateData*>( ScenePriv(scene) ); - priv->mOrigImporter = imp; - } else { - // if failed, extract error code and destroy the import - gLastErrorString = imp->GetErrorString(); - delete imp; - } - - // return imported data. If the import failed the pointer is NULL anyways - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - - return scene; -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* aiImportFileFromMemory( - const char* pBuffer, - unsigned int pLength, - unsigned int pFlags, - const char* pHint) -{ - return aiImportFileFromMemoryWithProperties(pBuffer, pLength, pFlags, pHint, NULL); -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* aiImportFileFromMemoryWithProperties( - const char* pBuffer, - unsigned int pLength, - unsigned int pFlags, - const char* pHint, - const aiPropertyStore* props) -{ - ai_assert( NULL != pBuffer ); - ai_assert( 0 != pLength ); - - const aiScene* scene = NULL; - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // create an Importer for this file - Assimp::Importer* imp = new Assimp::Importer(); - - // copy properties - if(props) { - const PropertyMap* pp = reinterpret_cast<const PropertyMap*>(props); - ImporterPimpl* pimpl = imp->Pimpl(); - pimpl->mIntProperties = pp->ints; - pimpl->mFloatProperties = pp->floats; - pimpl->mStringProperties = pp->strings; - pimpl->mMatrixProperties = pp->matrices; - } - - // and have it read the file from the memory buffer - scene = imp->ReadFileFromMemory( pBuffer, pLength, pFlags,pHint); - - // if succeeded, store the importer in the scene and keep it alive - if( scene) { - ScenePrivateData* priv = const_cast<ScenePrivateData*>( ScenePriv(scene) ); - priv->mOrigImporter = imp; - } - else { - // if failed, extract error code and destroy the import - gLastErrorString = imp->GetErrorString(); - delete imp; - } - // return imported data. If the import failed the pointer is NULL anyways - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - return scene; -} - -// ------------------------------------------------------------------------------------------------ -// Releases all resources associated with the given import process. -void aiReleaseImport( const aiScene* pScene) -{ - if (!pScene) { - return; - } - - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // find the importer associated with this data - const ScenePrivateData* priv = ScenePriv(pScene); - if( !priv || !priv->mOrigImporter) { - delete pScene; - } - else { - // deleting the Importer also deletes the scene - // Note: the reason that this is not written as 'delete priv->mOrigImporter' - // is a suspected bug in gcc 4.4+ (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=52339) - Importer* importer = priv->mOrigImporter; - delete importer; - } - - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API const aiScene* aiApplyPostProcessing(const aiScene* pScene, - unsigned int pFlags) -{ - const aiScene* sc = NULL; - - - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // find the importer associated with this data - const ScenePrivateData* priv = ScenePriv(pScene); - if( !priv || !priv->mOrigImporter) { - ReportSceneNotFoundError(); - return NULL; - } - - sc = priv->mOrigImporter->ApplyPostProcessing(pFlags); - - if (!sc) { - aiReleaseImport(pScene); - return NULL; - } - - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - return sc; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API const aiScene *aiApplyCustomizedPostProcessing( const aiScene *scene, - BaseProcess* process, - bool requestValidation ) { - const aiScene* sc( NULL ); - - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // find the importer associated with this data - const ScenePrivateData* priv = ScenePriv( scene ); - if ( NULL == priv || NULL == priv->mOrigImporter ) { - ReportSceneNotFoundError(); - return NULL; - } - - sc = priv->mOrigImporter->ApplyCustomizedPostProcessing( process, requestValidation ); - - if ( !sc ) { - aiReleaseImport( scene ); - return NULL; - } - - ASSIMP_END_EXCEPTION_REGION( const aiScene* ); - - return sc; -} - -// ------------------------------------------------------------------------------------------------ -void CallbackToLogRedirector (const char* msg, char* dt) -{ - ai_assert( NULL != msg ); - ai_assert( NULL != dt ); - LogStream* s = (LogStream*)dt; - - s->write(msg); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiLogStream aiGetPredefinedLogStream(aiDefaultLogStream pStream,const char* file) -{ - aiLogStream sout; - - ASSIMP_BEGIN_EXCEPTION_REGION(); - LogStream* stream = LogStream::createDefaultStream(pStream,file); - if (!stream) { - sout.callback = NULL; - sout.user = NULL; - } - else { - sout.callback = &CallbackToLogRedirector; - sout.user = (char*)stream; - } - gPredefinedStreams.push_back(stream); - ASSIMP_END_EXCEPTION_REGION(aiLogStream); - return sout; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiAttachLogStream( const aiLogStream* stream ) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(gLogStreamMutex); -#endif - - LogStream* lg = new LogToCallbackRedirector(*stream); - gActiveLogStreams[*stream] = lg; - - if (DefaultLogger::isNullLogger()) { - DefaultLogger::create(NULL,(gVerboseLogging == AI_TRUE ? Logger::VERBOSE : Logger::NORMAL)); - } - DefaultLogger::get()->attachStream(lg); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiReturn aiDetachLogStream( const aiLogStream* stream) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(gLogStreamMutex); -#endif - // find the log-stream associated with this data - LogStreamMap::iterator it = gActiveLogStreams.find( *stream); - // it should be there... else the user is playing fools with us - if( it == gActiveLogStreams.end()) { - return AI_FAILURE; - } - DefaultLogger::get()->detatchStream( it->second ); - delete it->second; - - gActiveLogStreams.erase( it); - - if (gActiveLogStreams.empty()) { - DefaultLogger::kill(); - } - ASSIMP_END_EXCEPTION_REGION(aiReturn); - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiDetachAllLogStreams(void) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(gLogStreamMutex); -#endif - Logger *logger( DefaultLogger::get() ); - if ( NULL == logger ) { - return; - } - - for (LogStreamMap::iterator it = gActiveLogStreams.begin(); it != gActiveLogStreams.end(); ++it) { - logger->detatchStream( it->second ); - delete it->second; - } - gActiveLogStreams.clear(); - DefaultLogger::kill(); - - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiEnableVerboseLogging(aiBool d) -{ - if (!DefaultLogger::isNullLogger()) { - DefaultLogger::get()->setLogSeverity((d == AI_TRUE ? Logger::VERBOSE : Logger::NORMAL)); - } - gVerboseLogging = d; -} - -// ------------------------------------------------------------------------------------------------ -// Returns the error text of the last failed import process. -const char* aiGetErrorString() -{ - return gLastErrorString.c_str(); -} - -// ----------------------------------------------------------------------------------------------- -// Return the description of a importer given its index -const aiImporterDesc* aiGetImportFormatDescription( size_t pIndex) -{ - return Importer().GetImporterInfo(pIndex); -} - -// ----------------------------------------------------------------------------------------------- -// Return the number of importers -size_t aiGetImportFormatCount(void) -{ - return Importer().GetImporterCount(); -} - -// ------------------------------------------------------------------------------------------------ -// Returns the error text of the last failed import process. -aiBool aiIsExtensionSupported(const char* szExtension) -{ - ai_assert(NULL != szExtension); - aiBool candoit=AI_FALSE; - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // FIXME: no need to create a temporary Importer instance just for that .. - Assimp::Importer tmp; - candoit = tmp.IsExtensionSupported(std::string(szExtension)) ? AI_TRUE : AI_FALSE; - - ASSIMP_END_EXCEPTION_REGION(aiBool); - return candoit; -} - -// ------------------------------------------------------------------------------------------------ -// Get a list of all file extensions supported by ASSIMP -void aiGetExtensionList(aiString* szOut) -{ - ai_assert(NULL != szOut); - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // FIXME: no need to create a temporary Importer instance just for that .. - Assimp::Importer tmp; - tmp.GetExtensionList(*szOut); - - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Get the memory requirements for a particular import. -void aiGetMemoryRequirements(const C_STRUCT aiScene* pIn, - C_STRUCT aiMemoryInfo* in) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // find the importer associated with this data - const ScenePrivateData* priv = ScenePriv(pIn); - if( !priv || !priv->mOrigImporter) { - ReportSceneNotFoundError(); - return; - } - - return priv->mOrigImporter->GetMemoryRequirements(*in); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiPropertyStore* aiCreatePropertyStore(void) -{ - return reinterpret_cast<aiPropertyStore*>( new PropertyMap() ); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiReleasePropertyStore(aiPropertyStore* p) -{ - delete reinterpret_cast<PropertyMap*>(p); -} - -// ------------------------------------------------------------------------------------------------ -// Importer::SetPropertyInteger -ASSIMP_API void aiSetImportPropertyInteger(aiPropertyStore* p, const char* szName, int value) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - PropertyMap* pp = reinterpret_cast<PropertyMap*>(p); - SetGenericProperty<int>(pp->ints,szName,value); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Importer::SetPropertyFloat -ASSIMP_API void aiSetImportPropertyFloat(aiPropertyStore* p, const char* szName, ai_real value) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - PropertyMap* pp = reinterpret_cast<PropertyMap*>(p); - SetGenericProperty<ai_real>(pp->floats,szName,value); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Importer::SetPropertyString -ASSIMP_API void aiSetImportPropertyString(aiPropertyStore* p, const char* szName, - const C_STRUCT aiString* st) -{ - if (!st) { - return; - } - ASSIMP_BEGIN_EXCEPTION_REGION(); - PropertyMap* pp = reinterpret_cast<PropertyMap*>(p); - SetGenericProperty<std::string>(pp->strings,szName,std::string(st->C_Str())); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Importer::SetPropertyMatrix -ASSIMP_API void aiSetImportPropertyMatrix(aiPropertyStore* p, const char* szName, - const C_STRUCT aiMatrix4x4* mat) -{ - if (!mat) { - return; - } - ASSIMP_BEGIN_EXCEPTION_REGION(); - PropertyMap* pp = reinterpret_cast<PropertyMap*>(p); - SetGenericProperty<aiMatrix4x4>(pp->matrices,szName,*mat); - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Rotation matrix to quaternion -ASSIMP_API void aiCreateQuaternionFromMatrix(aiQuaternion* quat,const aiMatrix3x3* mat) -{ - ai_assert( NULL != quat ); - ai_assert( NULL != mat ); - *quat = aiQuaternion(*mat); -} - -// ------------------------------------------------------------------------------------------------ -// Matrix decomposition -ASSIMP_API void aiDecomposeMatrix(const aiMatrix4x4* mat,aiVector3D* scaling, - aiQuaternion* rotation, - aiVector3D* position) -{ - ai_assert( NULL != rotation ); - ai_assert( NULL != position ); - ai_assert( NULL != scaling ); - ai_assert( NULL != mat ); - mat->Decompose(*scaling,*rotation,*position); -} - -// ------------------------------------------------------------------------------------------------ -// Matrix transpose -ASSIMP_API void aiTransposeMatrix3(aiMatrix3x3* mat) -{ - ai_assert(NULL != mat); - mat->Transpose(); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiTransposeMatrix4(aiMatrix4x4* mat) -{ - ai_assert(NULL != mat); - mat->Transpose(); -} - -// ------------------------------------------------------------------------------------------------ -// Vector transformation -ASSIMP_API void aiTransformVecByMatrix3(aiVector3D* vec, - const aiMatrix3x3* mat) -{ - ai_assert( NULL != mat ); - ai_assert( NULL != vec); - *vec *= (*mat); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiTransformVecByMatrix4(aiVector3D* vec, - const aiMatrix4x4* mat) -{ - ai_assert( NULL != mat ); - ai_assert( NULL != vec ); - - *vec *= (*mat); -} - -// ------------------------------------------------------------------------------------------------ -// Matrix multiplication -ASSIMP_API void aiMultiplyMatrix4( - aiMatrix4x4* dst, - const aiMatrix4x4* src) -{ - ai_assert( NULL != dst ); - ai_assert( NULL != src ); - *dst = (*dst) * (*src); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiMultiplyMatrix3( - aiMatrix3x3* dst, - const aiMatrix3x3* src) -{ - ai_assert( NULL != dst ); - ai_assert( NULL != src ); - *dst = (*dst) * (*src); -} - -// ------------------------------------------------------------------------------------------------ -// Matrix identity -ASSIMP_API void aiIdentityMatrix3( - aiMatrix3x3* mat) -{ - ai_assert(NULL != mat); - *mat = aiMatrix3x3(); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API void aiIdentityMatrix4( - aiMatrix4x4* mat) -{ - ai_assert(NULL != mat); - *mat = aiMatrix4x4(); -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API C_STRUCT const aiImporterDesc* aiGetImporterDesc( const char *extension ) { - if( NULL == extension ) { - return NULL; - } - const aiImporterDesc *desc( NULL ); - std::vector< BaseImporter* > out; - GetImporterInstanceList( out ); - for( size_t i = 0; i < out.size(); ++i ) { - if( 0 == strncmp( out[ i ]->GetInfo()->mFileExtensions, extension, strlen( extension ) ) ) { - desc = out[ i ]->GetInfo(); - break; - } - } - - DeleteImporterInstanceList(out); - - return desc; -} - -// ------------------------------------------------------------------------------------------------ diff --git a/thirdparty/assimp/code/Common/BaseImporter.cpp b/thirdparty/assimp/code/Common/BaseImporter.cpp deleted file mode 100644 index 5c1e605549..0000000000 --- a/thirdparty/assimp/code/Common/BaseImporter.cpp +++ /dev/null @@ -1,656 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file BaseImporter.cpp - * @brief Implementation of BaseImporter - */ - -#include <assimp/BaseImporter.h> -#include <assimp/ParsingUtils.h> -#include "FileSystemFilter.h" -#include "Importer.h" -#include <assimp/ByteSwapper.h> -#include <assimp/scene.h> -#include <assimp/Importer.hpp> -#include <assimp/postprocess.h> -#include <assimp/importerdesc.h> - -#include <ios> -#include <list> -#include <memory> -#include <sstream> -#include <cctype> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -BaseImporter::BaseImporter() AI_NO_EXCEPT -: m_progress() { - /** - * Assimp Importer - * unit conversions available - * if you need another measurment unit add it below. - * it's currently defined in assimp that we prefer meters. - * - * NOTE: Initialised here rather than in the header file - * to workaround a VS2013 bug with brace initialisers - * */ - importerUnits[ImporterUnits::M] = 1.0; - importerUnits[ImporterUnits::CM] = 0.01; - importerUnits[ImporterUnits::MM] = 0.001; - importerUnits[ImporterUnits::INCHES] = 0.0254; - importerUnits[ImporterUnits::FEET] = 0.3048; -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -BaseImporter::~BaseImporter() { - // nothing to do here -} - -void BaseImporter::UpdateImporterScale( Importer* pImp ) -{ - ai_assert(pImp != nullptr); - ai_assert(importerScale != 0.0); - ai_assert(fileScale != 0.0); - - double activeScale = importerScale * fileScale; - - // Set active scaling - pImp->SetPropertyFloat( AI_CONFIG_APP_SCALE_KEY, static_cast<float>( activeScale) ); - - ASSIMP_LOG_DEBUG_F("UpdateImporterScale scale set: %f", activeScale ); -} - -// ------------------------------------------------------------------------------------------------ -// Imports the given file and returns the imported data. -aiScene* BaseImporter::ReadFile(Importer* pImp, const std::string& pFile, IOSystem* pIOHandler) { - - - m_progress = pImp->GetProgressHandler(); - if (nullptr == m_progress) { - return nullptr; - } - - ai_assert(m_progress); - - // Gather configuration properties for this run - SetupProperties( pImp ); - - // Construct a file system filter to improve our success ratio at reading external files - FileSystemFilter filter(pFile,pIOHandler); - - // create a scene object to hold the data - std::unique_ptr<aiScene> sc(new aiScene()); - - // dispatch importing - try - { - InternReadFile( pFile, sc.get(), &filter); - - // Calculate import scale hook - required because pImp not available anywhere else - // passes scale into ScaleProcess - UpdateImporterScale(pImp); - - - } catch( const std::exception& err ) { - // extract error description - m_ErrorText = err.what(); - ASSIMP_LOG_ERROR(m_ErrorText); - return nullptr; - } - - // return what we gathered from the import. - return sc.release(); -} - -// ------------------------------------------------------------------------------------------------ -void BaseImporter::SetupProperties(const Importer* pImp) -{ - // the default implementation does nothing -} - -// ------------------------------------------------------------------------------------------------ -void BaseImporter::GetExtensionList(std::set<std::string>& extensions) { - const aiImporterDesc* desc = GetInfo(); - ai_assert(desc != nullptr); - - const char* ext = desc->mFileExtensions; - ai_assert(ext != nullptr ); - - const char* last = ext; - do { - if (!*ext || *ext == ' ') { - extensions.insert(std::string(last,ext-last)); - ai_assert(ext-last > 0); - last = ext; - while(*last == ' ') { - ++last; - } - } - } - while(*ext++); -} - -// ------------------------------------------------------------------------------------------------ -/*static*/ bool BaseImporter::SearchFileHeaderForToken( IOSystem* pIOHandler, - const std::string& pFile, - const char** tokens, - unsigned int numTokens, - unsigned int searchBytes /* = 200 */, - bool tokensSol /* false */, - bool noAlphaBeforeTokens /* false */) -{ - ai_assert( nullptr != tokens ); - ai_assert( 0 != numTokens ); - ai_assert( 0 != searchBytes); - - if ( nullptr == pIOHandler ) { - return false; - } - - std::unique_ptr<IOStream> pStream (pIOHandler->Open(pFile)); - if (pStream.get() ) { - // read 200 characters from the file - std::unique_ptr<char[]> _buffer (new char[searchBytes+1 /* for the '\0' */]); - char *buffer( _buffer.get() ); - const size_t read( pStream->Read(buffer,1,searchBytes) ); - if( 0 == read ) { - return false; - } - - for( size_t i = 0; i < read; ++i ) { - buffer[ i ] = static_cast<char>( ::tolower( buffer[ i ] ) ); - } - - // It is not a proper handling of unicode files here ... - // ehm ... but it works in most cases. - char* cur = buffer,*cur2 = buffer,*end = &buffer[read]; - while (cur != end) { - if( *cur ) { - *cur2++ = *cur; - } - ++cur; - } - *cur2 = '\0'; - - std::string token; - for (unsigned int i = 0; i < numTokens; ++i ) { - ai_assert( nullptr != tokens[i] ); - const size_t len( strlen( tokens[ i ] ) ); - token.clear(); - const char *ptr( tokens[ i ] ); - for ( size_t tokIdx = 0; tokIdx < len; ++tokIdx ) { - token.push_back( static_cast<char>( tolower( *ptr ) ) ); - ++ptr; - } - const char* r = strstr( buffer, token.c_str() ); - if( !r ) { - continue; - } - // We need to make sure that we didn't accidentially identify the end of another token as our token, - // e.g. in a previous version the "gltf " present in some gltf files was detected as "f " - if (noAlphaBeforeTokens && (r != buffer && isalpha(r[-1]))) { - continue; - } - // We got a match, either we don't care where it is, or it happens to - // be in the beginning of the file / line - if (!tokensSol || r == buffer || r[-1] == '\r' || r[-1] == '\n') { - ASSIMP_LOG_DEBUG_F( "Found positive match for header keyword: ", tokens[i] ); - return true; - } - } - } - - return false; -} - -// ------------------------------------------------------------------------------------------------ -// Simple check for file extension -/*static*/ bool BaseImporter::SimpleExtensionCheck (const std::string& pFile, - const char* ext0, - const char* ext1, - const char* ext2) -{ - std::string::size_type pos = pFile.find_last_of('.'); - - // no file extension - can't read - if( pos == std::string::npos) - return false; - - const char* ext_real = & pFile[ pos+1 ]; - if( !ASSIMP_stricmp(ext_real,ext0) ) - return true; - - // check for other, optional, file extensions - if (ext1 && !ASSIMP_stricmp(ext_real,ext1)) - return true; - - if (ext2 && !ASSIMP_stricmp(ext_real,ext2)) - return true; - - return false; -} - -// ------------------------------------------------------------------------------------------------ -// Get file extension from path -std::string BaseImporter::GetExtension( const std::string& file ) { - std::string::size_type pos = file.find_last_of('.'); - - // no file extension at all - if (pos == std::string::npos) { - return ""; - } - - - // thanks to Andy Maloney for the hint - std::string ret = file.substr( pos + 1 ); - std::transform( ret.begin(), ret.end(), ret.begin(), ToLower<char>); - - return ret; -} - -// ------------------------------------------------------------------------------------------------ -// Check for magic bytes at the beginning of the file. -/* static */ bool BaseImporter::CheckMagicToken(IOSystem* pIOHandler, const std::string& pFile, - const void* _magic, unsigned int num, unsigned int offset, unsigned int size) -{ - ai_assert( size <= 16 ); - ai_assert( _magic ); - - if (!pIOHandler) { - return false; - } - union { - const char* magic; - const uint16_t* magic_u16; - const uint32_t* magic_u32; - }; - magic = reinterpret_cast<const char*>(_magic); - std::unique_ptr<IOStream> pStream (pIOHandler->Open(pFile)); - if (pStream.get() ) { - - // skip to offset - pStream->Seek(offset,aiOrigin_SET); - - // read 'size' characters from the file - union { - char data[16]; - uint16_t data_u16[8]; - uint32_t data_u32[4]; - }; - if(size != pStream->Read(data,1,size)) { - return false; - } - - for (unsigned int i = 0; i < num; ++i) { - // also check against big endian versions of tokens with size 2,4 - // that's just for convenience, the chance that we cause conflicts - // is quite low and it can save some lines and prevent nasty bugs - if (2 == size) { - uint16_t rev = *magic_u16; - ByteSwap::Swap(&rev); - if (data_u16[0] == *magic_u16 || data_u16[0] == rev) { - return true; - } - } - else if (4 == size) { - uint32_t rev = *magic_u32; - ByteSwap::Swap(&rev); - if (data_u32[0] == *magic_u32 || data_u32[0] == rev) { - return true; - } - } - else { - // any length ... just compare - if(!memcmp(magic,data,size)) { - return true; - } - } - magic += size; - } - } - return false; -} - -#ifdef ASSIMP_USE_HUNTER -# include <utf8/utf8.h> -#else -# include "../contrib/utf8cpp/source/utf8.h" -#endif - -// ------------------------------------------------------------------------------------------------ -// Convert to UTF8 data -void BaseImporter::ConvertToUTF8(std::vector<char>& data) -{ - //ConversionResult result; - if(data.size() < 8) { - throw DeadlyImportError("File is too small"); - } - - // UTF 8 with BOM - if((uint8_t)data[0] == 0xEF && (uint8_t)data[1] == 0xBB && (uint8_t)data[2] == 0xBF) { - ASSIMP_LOG_DEBUG("Found UTF-8 BOM ..."); - - std::copy(data.begin()+3,data.end(),data.begin()); - data.resize(data.size()-3); - return; - } - - - // UTF 32 BE with BOM - if(*((uint32_t*)&data.front()) == 0xFFFE0000) { - - // swap the endianness .. - for(uint32_t* p = (uint32_t*)&data.front(), *end = (uint32_t*)&data.back(); p <= end; ++p) { - AI_SWAP4P(p); - } - } - - // UTF 32 LE with BOM - if(*((uint32_t*)&data.front()) == 0x0000FFFE) { - ASSIMP_LOG_DEBUG("Found UTF-32 BOM ..."); - - std::vector<char> output; - int *ptr = (int*)&data[ 0 ]; - int *end = ptr + ( data.size() / sizeof(int) ) +1; - utf8::utf32to8( ptr, end, back_inserter(output)); - return; - } - - // UTF 16 BE with BOM - if(*((uint16_t*)&data.front()) == 0xFFFE) { - - // swap the endianness .. - for(uint16_t* p = (uint16_t*)&data.front(), *end = (uint16_t*)&data.back(); p <= end; ++p) { - ByteSwap::Swap2(p); - } - } - - // UTF 16 LE with BOM - if(*((uint16_t*)&data.front()) == 0xFEFF) { - ASSIMP_LOG_DEBUG("Found UTF-16 BOM ..."); - - std::vector<unsigned char> output; - utf8::utf16to8(data.begin(), data.end(), back_inserter(output)); - return; - } -} - -// ------------------------------------------------------------------------------------------------ -// Convert to UTF8 data to ISO-8859-1 -void BaseImporter::ConvertUTF8toISO8859_1(std::string& data) -{ - size_t size = data.size(); - size_t i = 0, j = 0; - - while(i < size) { - if ((unsigned char) data[i] < (size_t) 0x80) { - data[j] = data[i]; - } else if(i < size - 1) { - if((unsigned char) data[i] == 0xC2) { - data[j] = data[++i]; - } else if((unsigned char) data[i] == 0xC3) { - data[j] = ((unsigned char) data[++i] + 0x40); - } else { - std::stringstream stream; - stream << "UTF8 code " << std::hex << data[i] << data[i + 1] << " can not be converted into ISA-8859-1."; - ASSIMP_LOG_ERROR( stream.str() ); - - data[j++] = data[i++]; - data[j] = data[i]; - } - } else { - ASSIMP_LOG_ERROR("UTF8 code but only one character remaining"); - - data[j] = data[i]; - } - - i++; j++; - } - - data.resize(j); -} - -// ------------------------------------------------------------------------------------------------ -void BaseImporter::TextFileToBuffer(IOStream* stream, - std::vector<char>& data, - TextFileMode mode) -{ - ai_assert(nullptr != stream); - - const size_t fileSize = stream->FileSize(); - if (mode == FORBID_EMPTY) { - if(!fileSize) { - throw DeadlyImportError("File is empty"); - } - } - - data.reserve(fileSize+1); - data.resize(fileSize); - if(fileSize > 0) { - if(fileSize != stream->Read( &data[0], 1, fileSize)) { - throw DeadlyImportError("File read error"); - } - - ConvertToUTF8(data); - } - - // append a binary zero to simplify string parsing - data.push_back(0); -} - -// ------------------------------------------------------------------------------------------------ -namespace Assimp { - // Represents an import request - struct LoadRequest { - LoadRequest(const std::string& _file, unsigned int _flags,const BatchLoader::PropertyMap* _map, unsigned int _id) - : file(_file) - , flags(_flags) - , refCnt(1) - , scene(NULL) - , loaded(false) - , id(_id) { - if ( _map ) { - map = *_map; - } - } - - bool operator== ( const std::string& f ) const { - return file == f; - } - - const std::string file; - unsigned int flags; - unsigned int refCnt; - aiScene *scene; - bool loaded; - BatchLoader::PropertyMap map; - unsigned int id; - }; -} - -// ------------------------------------------------------------------------------------------------ -// BatchLoader::pimpl data structure -struct Assimp::BatchData { - BatchData( IOSystem* pIO, bool validate ) - : pIOSystem( pIO ) - , pImporter( nullptr ) - , next_id(0xffff) - , validate( validate ) { - ai_assert( nullptr != pIO ); - - pImporter = new Importer(); - pImporter->SetIOHandler( pIO ); - } - - ~BatchData() { - pImporter->SetIOHandler( nullptr ); /* get pointer back into our possession */ - delete pImporter; - } - - // IO system to be used for all imports - IOSystem* pIOSystem; - - // Importer used to load all meshes - Importer* pImporter; - - // List of all imports - std::list<LoadRequest> requests; - - // Base path - std::string pathBase; - - // Id for next item - unsigned int next_id; - - // Validation enabled state - bool validate; -}; - -typedef std::list<LoadRequest>::iterator LoadReqIt; - -// ------------------------------------------------------------------------------------------------ -BatchLoader::BatchLoader(IOSystem* pIO, bool validate ) { - ai_assert(nullptr != pIO); - - m_data = new BatchData( pIO, validate ); -} - -// ------------------------------------------------------------------------------------------------ -BatchLoader::~BatchLoader() -{ - // delete all scenes what have not been polled by the user - for ( LoadReqIt it = m_data->requests.begin();it != m_data->requests.end(); ++it) { - delete (*it).scene; - } - delete m_data; -} - -// ------------------------------------------------------------------------------------------------ -void BatchLoader::setValidation( bool enabled ) { - m_data->validate = enabled; -} - -// ------------------------------------------------------------------------------------------------ -bool BatchLoader::getValidation() const { - return m_data->validate; -} - -// ------------------------------------------------------------------------------------------------ -unsigned int BatchLoader::AddLoadRequest(const std::string& file, - unsigned int steps /*= 0*/, const PropertyMap* map /*= NULL*/) -{ - ai_assert(!file.empty()); - - // check whether we have this loading request already - for ( LoadReqIt it = m_data->requests.begin();it != m_data->requests.end(); ++it) { - // Call IOSystem's path comparison function here - if ( m_data->pIOSystem->ComparePaths((*it).file,file)) { - if (map) { - if ( !( ( *it ).map == *map ) ) { - continue; - } - } - else if ( !( *it ).map.empty() ) { - continue; - } - - (*it).refCnt++; - return (*it).id; - } - } - - // no, we don't have it. So add it to the queue ... - m_data->requests.push_back(LoadRequest(file,steps,map, m_data->next_id)); - return m_data->next_id++; -} - -// ------------------------------------------------------------------------------------------------ -aiScene* BatchLoader::GetImport( unsigned int which ) -{ - for ( LoadReqIt it = m_data->requests.begin();it != m_data->requests.end(); ++it) { - if ((*it).id == which && (*it).loaded) { - aiScene* sc = (*it).scene; - if (!(--(*it).refCnt)) { - m_data->requests.erase(it); - } - return sc; - } - } - return nullptr; -} - - - -// ------------------------------------------------------------------------------------------------ -void BatchLoader::LoadAll() -{ - // no threaded implementation for the moment - for ( LoadReqIt it = m_data->requests.begin();it != m_data->requests.end(); ++it) { - // force validation in debug builds - unsigned int pp = (*it).flags; - if ( m_data->validate ) { - pp |= aiProcess_ValidateDataStructure; - } - - // setup config properties if necessary - ImporterPimpl* pimpl = m_data->pImporter->Pimpl(); - pimpl->mFloatProperties = (*it).map.floats; - pimpl->mIntProperties = (*it).map.ints; - pimpl->mStringProperties = (*it).map.strings; - pimpl->mMatrixProperties = (*it).map.matrices; - - if (!DefaultLogger::isNullLogger()) - { - ASSIMP_LOG_INFO("%%% BEGIN EXTERNAL FILE %%%"); - ASSIMP_LOG_INFO_F("File: ", (*it).file); - } - m_data->pImporter->ReadFile((*it).file,pp); - (*it).scene = m_data->pImporter->GetOrphanedScene(); - (*it).loaded = true; - - ASSIMP_LOG_INFO("%%% END EXTERNAL FILE %%%"); - } -} diff --git a/thirdparty/assimp/code/Common/BaseProcess.cpp b/thirdparty/assimp/code/Common/BaseProcess.cpp deleted file mode 100644 index e247be418d..0000000000 --- a/thirdparty/assimp/code/Common/BaseProcess.cpp +++ /dev/null @@ -1,107 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of BaseProcess */ - -#include <assimp/BaseImporter.h> -#include "BaseProcess.h" -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> -#include "Importer.h" - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -BaseProcess::BaseProcess() AI_NO_EXCEPT -: shared() -, progress() -{ -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -BaseProcess::~BaseProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -void BaseProcess::ExecuteOnScene( Importer* pImp) -{ - ai_assert(NULL != pImp && NULL != pImp->Pimpl()->mScene); - - progress = pImp->GetProgressHandler(); - ai_assert(progress); - - SetupProperties( pImp ); - - // catch exceptions thrown inside the PostProcess-Step - try - { - Execute(pImp->Pimpl()->mScene); - - } catch( const std::exception& err ) { - - // extract error description - pImp->Pimpl()->mErrorString = err.what(); - ASSIMP_LOG_ERROR(pImp->Pimpl()->mErrorString); - - // and kill the partially imported data - delete pImp->Pimpl()->mScene; - pImp->Pimpl()->mScene = nullptr; - } -} - -// ------------------------------------------------------------------------------------------------ -void BaseProcess::SetupProperties(const Importer* /*pImp*/) -{ - // the default implementation does nothing -} - -// ------------------------------------------------------------------------------------------------ -bool BaseProcess::RequireVerboseFormat() const -{ - return true; -} - diff --git a/thirdparty/assimp/code/Common/BaseProcess.h b/thirdparty/assimp/code/Common/BaseProcess.h deleted file mode 100644 index 4d5c7a76be..0000000000 --- a/thirdparty/assimp/code/Common/BaseProcess.h +++ /dev/null @@ -1,290 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Base class of all import post processing steps */ -#ifndef INCLUDED_AI_BASEPROCESS_H -#define INCLUDED_AI_BASEPROCESS_H - -#include <map> -#include <assimp/GenericProperty.h> - -struct aiScene; - -namespace Assimp { - -class Importer; - -// --------------------------------------------------------------------------- -/** Helper class to allow post-processing steps to interact with each other. - * - * The class maintains a simple property list that can be used by pp-steps - * to provide additional information to other steps. This is primarily - * intended for cross-step optimizations. - */ -class SharedPostProcessInfo -{ -public: - - struct Base - { - virtual ~Base() - {} - }; - - //! Represents data that is allocated on the heap, thus needs to be deleted - template <typename T> - struct THeapData : public Base - { - explicit THeapData(T* in) - : data (in) - {} - - ~THeapData() - { - delete data; - } - T* data; - }; - - //! Represents static, by-value data not allocated on the heap - template <typename T> - struct TStaticData : public Base - { - explicit TStaticData(T in) - : data (in) - {} - - ~TStaticData() - {} - - T data; - }; - - // some typedefs for cleaner code - typedef unsigned int KeyType; - typedef std::map<KeyType, Base*> PropertyMap; - -public: - - //! Destructor - ~SharedPostProcessInfo() - { - Clean(); - } - - //! Remove all stored properties from the table - void Clean() - { - // invoke the virtual destructor for all stored properties - for (PropertyMap::iterator it = pmap.begin(), end = pmap.end(); - it != end; ++it) - { - delete (*it).second; - } - pmap.clear(); - } - - //! Add a heap property to the list - template <typename T> - void AddProperty( const char* name, T* in ){ - AddProperty(name,(Base*)new THeapData<T>(in)); - } - - //! Add a static by-value property to the list - template <typename T> - void AddProperty( const char* name, T in ){ - AddProperty(name,(Base*)new TStaticData<T>(in)); - } - - - //! Get a heap property - template <typename T> - bool GetProperty( const char* name, T*& out ) const - { - THeapData<T>* t = (THeapData<T>*)GetPropertyInternal(name); - if(!t) - { - out = NULL; - return false; - } - out = t->data; - return true; - } - - //! Get a static, by-value property - template <typename T> - bool GetProperty( const char* name, T& out ) const - { - TStaticData<T>* t = (TStaticData<T>*)GetPropertyInternal(name); - if(!t)return false; - out = t->data; - return true; - } - - //! Remove a property of a specific type - void RemoveProperty( const char* name) { - SetGenericPropertyPtr<Base>(pmap,name,NULL); - } - -private: - - void AddProperty( const char* name, Base* data) { - SetGenericPropertyPtr<Base>(pmap,name,data); - } - - Base* GetPropertyInternal( const char* name) const { - return GetGenericProperty<Base*>(pmap,name,NULL); - } - -private: - - //! Map of all stored properties - PropertyMap pmap; -}; - -#if 0 - -// --------------------------------------------------------------------------- -/** @brief Represents a dependency table for a postprocessing steps. - * - * For future use. - */ - struct PPDependencyTable - { - unsigned int execute_me_before_these; - unsigned int execute_me_after_these; - unsigned int only_if_these_are_not_specified; - unsigned int mutually_exclusive_with; - }; - -#endif - - -#define AI_SPP_SPATIAL_SORT "$Spat" - -// --------------------------------------------------------------------------- -/** The BaseProcess defines a common interface for all post processing steps. - * A post processing step is run after a successful import if the caller - * specified the corresponding flag when calling ReadFile(). - * Enum #aiPostProcessSteps defines which flags are available. - * After a successful import the Importer iterates over its internal array - * of processes and calls IsActive() on each process to evaluate if the step - * should be executed. If the function returns true, the class' Execute() - * function is called subsequently. - */ -class ASSIMP_API_WINONLY BaseProcess { - friend class Importer; - -public: - /** Constructor to be privately used by Importer */ - BaseProcess() AI_NO_EXCEPT; - - /** Destructor, private as well */ - virtual ~BaseProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. A - * bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - virtual bool IsActive( unsigned int pFlags) const = 0; - - // ------------------------------------------------------------------- - /** Check whether this step expects its input vertex data to be - * in verbose format. */ - virtual bool RequireVerboseFormat() const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * The function deletes the scene if the postprocess step fails ( - * the object pointer will be set to NULL). - * @param pImp Importer instance (pImp->mScene must be valid) - */ - void ExecuteOnScene( Importer* pImp); - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - virtual void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * A process should throw an ImportErrorException* if it fails. - * This method must be implemented by deriving classes. - * @param pScene The imported data to work at. - */ - virtual void Execute( aiScene* pScene) = 0; - - - // ------------------------------------------------------------------- - /** Assign a new SharedPostProcessInfo to the step. This object - * allows multiple postprocess steps to share data. - * @param sh May be NULL - */ - inline void SetSharedData(SharedPostProcessInfo* sh) { - shared = sh; - } - - // ------------------------------------------------------------------- - /** Get the shared data that is assigned to the step. - */ - inline SharedPostProcessInfo* GetSharedData() { - return shared; - } - -protected: - - /** See the doc of #SharedPostProcessInfo for more details */ - SharedPostProcessInfo* shared; - - /** Currently active progress handler */ - ProgressHandler* progress; -}; - - -} // end of namespace Assimp - -#endif // AI_BASEPROCESS_H_INC diff --git a/thirdparty/assimp/code/Common/Bitmap.cpp b/thirdparty/assimp/code/Common/Bitmap.cpp deleted file mode 100644 index b22b71ea9e..0000000000 --- a/thirdparty/assimp/code/Common/Bitmap.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Bitmap.cpp - * @brief Defines bitmap format helper for textures - * - * Used for file formats which embed their textures into the model file. - */ - - -#include <assimp/Bitmap.h> -#include <assimp/texture.h> -#include <assimp/IOStream.hpp> -#include <assimp/ByteSwapper.h> - -namespace Assimp { - - void Bitmap::Save(aiTexture* texture, IOStream* file) { - if(file != NULL) { - Header header; - DIB dib; - - dib.size = DIB::dib_size; - dib.width = texture->mWidth; - dib.height = texture->mHeight; - dib.planes = 1; - dib.bits_per_pixel = 8 * mBytesPerPixel; - dib.compression = 0; - dib.image_size = (((dib.width * mBytesPerPixel) + 3) & 0x0000FFFC) * dib.height; - dib.x_resolution = 0; - dib.y_resolution = 0; - dib.nb_colors = 0; - dib.nb_important_colors = 0; - - header.type = 0x4D42; // 'BM' - header.offset = Header::header_size + DIB::dib_size; - header.size = header.offset + dib.image_size; - header.reserved1 = 0; - header.reserved2 = 0; - - WriteHeader(header, file); - WriteDIB(dib, file); - WriteData(texture, file); - } - } - - template<typename T> - inline - std::size_t Copy(uint8_t* data, const T &field) { -#ifdef AI_BUILD_BIG_ENDIAN - T field_swapped=AI_BE(field); - std::memcpy(data, &field_swapped, sizeof(field)); return sizeof(field); -#else - std::memcpy(data, &AI_BE(field), sizeof(field)); return sizeof(field); -#endif - } - - void Bitmap::WriteHeader(Header& header, IOStream* file) { - uint8_t data[Header::header_size]; - - std::size_t offset = 0; - - offset += Copy(&data[offset], header.type); - offset += Copy(&data[offset], header.size); - offset += Copy(&data[offset], header.reserved1); - offset += Copy(&data[offset], header.reserved2); - Copy(&data[offset], header.offset); - - file->Write(data, Header::header_size, 1); - } - - void Bitmap::WriteDIB(DIB& dib, IOStream* file) { - uint8_t data[DIB::dib_size]; - - std::size_t offset = 0; - - offset += Copy(&data[offset], dib.size); - offset += Copy(&data[offset], dib.width); - offset += Copy(&data[offset], dib.height); - offset += Copy(&data[offset], dib.planes); - offset += Copy(&data[offset], dib.bits_per_pixel); - offset += Copy(&data[offset], dib.compression); - offset += Copy(&data[offset], dib.image_size); - offset += Copy(&data[offset], dib.x_resolution); - offset += Copy(&data[offset], dib.y_resolution); - offset += Copy(&data[offset], dib.nb_colors); - Copy(&data[offset], dib.nb_important_colors); - - file->Write(data, DIB::dib_size, 1); - } - - void Bitmap::WriteData(aiTexture* texture, IOStream* file) { - static const std::size_t padding_offset = 4; - static const uint8_t padding_data[padding_offset] = {0x0, 0x0, 0x0, 0x0}; - - unsigned int padding = (padding_offset - ((mBytesPerPixel * texture->mWidth) % padding_offset)) % padding_offset; - uint8_t pixel[mBytesPerPixel]; - - for(std::size_t i = 0; i < texture->mHeight; ++i) { - for(std::size_t j = 0; j < texture->mWidth; ++j) { - const aiTexel& texel = texture->pcData[(texture->mHeight - i - 1) * texture->mWidth + j]; // Bitmap files are stored in bottom-up format - - pixel[0] = texel.r; - pixel[1] = texel.g; - pixel[2] = texel.b; - pixel[3] = texel.a; - - file->Write(pixel, mBytesPerPixel, 1); - } - - file->Write(padding_data, padding, 1); - } - } - -} diff --git a/thirdparty/assimp/code/Common/CreateAnimMesh.cpp b/thirdparty/assimp/code/Common/CreateAnimMesh.cpp deleted file mode 100644 index 98b60e5319..0000000000 --- a/thirdparty/assimp/code/Common/CreateAnimMesh.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (C) 2016 The Qt Company Ltd. -Copyright (c) 2006-2012, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -#include <assimp/CreateAnimMesh.h> - -namespace Assimp { - -aiAnimMesh *aiCreateAnimMesh(const aiMesh *mesh) -{ - aiAnimMesh *animesh = new aiAnimMesh; - animesh->mNumVertices = mesh->mNumVertices; - if (mesh->mVertices) { - animesh->mVertices = new aiVector3D[animesh->mNumVertices]; - std::memcpy(animesh->mVertices, mesh->mVertices, mesh->mNumVertices * sizeof(aiVector3D)); - } - if (mesh->mNormals) { - animesh->mNormals = new aiVector3D[animesh->mNumVertices]; - std::memcpy(animesh->mNormals, mesh->mNormals, mesh->mNumVertices * sizeof(aiVector3D)); - } - if (mesh->mTangents) { - animesh->mTangents = new aiVector3D[animesh->mNumVertices]; - std::memcpy(animesh->mTangents, mesh->mTangents, mesh->mNumVertices * sizeof(aiVector3D)); - } - if (mesh->mBitangents) { - animesh->mBitangents = new aiVector3D[animesh->mNumVertices]; - std::memcpy(animesh->mBitangents, mesh->mBitangents, mesh->mNumVertices * sizeof(aiVector3D)); - } - - for (int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i) { - if (mesh->mColors[i]) { - animesh->mColors[i] = new aiColor4D[animesh->mNumVertices]; - std::memcpy(animesh->mColors[i], mesh->mColors[i], mesh->mNumVertices * sizeof(aiColor4D)); - } else { - animesh->mColors[i] = NULL; - } - } - - for (int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (mesh->mTextureCoords[i]) { - animesh->mTextureCoords[i] = new aiVector3D[animesh->mNumVertices]; - std::memcpy(animesh->mTextureCoords[i], mesh->mTextureCoords[i], mesh->mNumVertices * sizeof(aiVector3D)); - } else { - animesh->mTextureCoords[i] = NULL; - } - } - return animesh; -} - -} // end of namespace Assimp diff --git a/thirdparty/assimp/code/Common/DefaultIOStream.cpp b/thirdparty/assimp/code/Common/DefaultIOStream.cpp deleted file mode 100644 index 1c100b6189..0000000000 --- a/thirdparty/assimp/code/Common/DefaultIOStream.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file DefaultIOStream.cpp - * @brief Default File I/O implementation for #Importer - */ - - -#include <assimp/ai_assert.h> -#include <assimp/DefaultIOStream.h> -#include <sys/types.h> -#include <sys/stat.h> - -using namespace Assimp; - -// ---------------------------------------------------------------------------------- -DefaultIOStream::~DefaultIOStream() -{ - if (mFile) { - ::fclose(mFile); - mFile = nullptr; - } -} - -// ---------------------------------------------------------------------------------- -size_t DefaultIOStream::Read(void* pvBuffer, - size_t pSize, - size_t pCount) -{ - ai_assert(NULL != pvBuffer && 0 != pSize && 0 != pCount); - return (mFile ? ::fread(pvBuffer, pSize, pCount, mFile) : 0); -} - -// ---------------------------------------------------------------------------------- -size_t DefaultIOStream::Write(const void* pvBuffer, - size_t pSize, - size_t pCount) -{ - ai_assert(NULL != pvBuffer && 0 != pSize && 0 != pCount); - return (mFile ? ::fwrite(pvBuffer, pSize, pCount, mFile) : 0); -} - -// ---------------------------------------------------------------------------------- -aiReturn DefaultIOStream::Seek(size_t pOffset, - aiOrigin pOrigin) -{ - if (!mFile) { - return AI_FAILURE; - } - - // Just to check whether our enum maps one to one with the CRT constants - static_assert(aiOrigin_CUR == SEEK_CUR && - aiOrigin_END == SEEK_END && aiOrigin_SET == SEEK_SET, "aiOrigin_CUR == SEEK_CUR && \ - aiOrigin_END == SEEK_END && aiOrigin_SET == SEEK_SET"); - - // do the seek - return (0 == ::fseek(mFile, (long)pOffset,(int)pOrigin) ? AI_SUCCESS : AI_FAILURE); -} - -// ---------------------------------------------------------------------------------- -size_t DefaultIOStream::Tell() const -{ - if (!mFile) { - return 0; - } - return ::ftell(mFile); -} - -// ---------------------------------------------------------------------------------- -size_t DefaultIOStream::FileSize() const -{ - if (! mFile || mFilename.empty()) { - return 0; - } - - if (SIZE_MAX == mCachedSize ) { - - // Although fseek/ftell would allow us to reuse the existing file handle here, - // it is generally unsafe because: - // - For binary streams, it is not technically well-defined - // - For text files the results are meaningless - // That's why we use the safer variant fstat here. - // - // See here for details: - // https://www.securecoding.cert.org/confluence/display/seccode/FIO19-C.+Do+not+use+fseek()+and+ftell()+to+compute+the+size+of+a+regular+file -#if defined _WIN32 && (!defined __GNUC__ || __MSVCRT_VERSION__ >= 0x0601) - struct __stat64 fileStat; - //using fileno + fstat avoids having to handle the filename - int err = _fstat64( _fileno(mFile), &fileStat ); - if (0 != err) - return 0; - mCachedSize = (size_t) (fileStat.st_size); -#elif defined __GNUC__ || defined __APPLE__ || defined __MACH__ || defined __FreeBSD__ - struct stat fileStat; - int err = stat(mFilename.c_str(), &fileStat ); - if (0 != err) - return 0; - const unsigned long long cachedSize = fileStat.st_size; - mCachedSize = static_cast< size_t >( cachedSize ); -#else -# error "Unknown platform" -#endif - } - return mCachedSize; -} - -// ---------------------------------------------------------------------------------- -void DefaultIOStream::Flush() -{ - if (mFile) { - ::fflush(mFile); - } -} - -// ---------------------------------------------------------------------------------- diff --git a/thirdparty/assimp/code/Common/DefaultIOSystem.cpp b/thirdparty/assimp/code/Common/DefaultIOSystem.cpp deleted file mode 100644 index 6fdc24dd80..0000000000 --- a/thirdparty/assimp/code/Common/DefaultIOSystem.cpp +++ /dev/null @@ -1,216 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file Default implementation of IOSystem using the standard C file functions */ - -#include <assimp/StringComparison.h> - -#include <assimp/DefaultIOSystem.h> -#include <assimp/DefaultIOStream.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/ai_assert.h> -#include <stdlib.h> - -#ifdef __unix__ -#include <sys/param.h> -#include <stdlib.h> -#endif - -#ifdef _WIN32 -#include <windows.h> -#endif - -using namespace Assimp; - -#ifdef _WIN32 -static std::wstring Utf8ToWide(const char* in) -{ - int size = MultiByteToWideChar(CP_UTF8, 0, in, -1, nullptr, 0); - // size includes terminating null; std::wstring adds null automatically - std::wstring out(static_cast<size_t>(size) - 1, L'\0'); - MultiByteToWideChar(CP_UTF8, 0, in, -1, &out[0], size); - return out; -} - -static std::string WideToUtf8(const wchar_t* in) -{ - int size = WideCharToMultiByte(CP_UTF8, 0, in, -1, nullptr, 0, nullptr, nullptr); - // size includes terminating null; std::string adds null automatically - std::string out(static_cast<size_t>(size) - 1, '\0'); - WideCharToMultiByte(CP_UTF8, 0, in, -1, &out[0], size, nullptr, nullptr); - return out; -} -#endif - -// ------------------------------------------------------------------------------------------------ -// Tests for the existence of a file at the given path. -bool DefaultIOSystem::Exists(const char* pFile) const -{ -#ifdef _WIN32 - struct __stat64 filestat; - if (_wstat64(Utf8ToWide(pFile).c_str(), &filestat) != 0) { - return false; - } -#else - FILE* file = ::fopen(pFile, "rb"); - if (!file) - return false; - - ::fclose(file); -#endif - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Open a new file with a given path. -IOStream* DefaultIOSystem::Open(const char* strFile, const char* strMode) -{ - ai_assert(strFile != nullptr); - ai_assert(strMode != nullptr); - FILE* file; -#ifdef _WIN32 - file = ::_wfopen(Utf8ToWide(strFile).c_str(), Utf8ToWide(strMode).c_str()); -#else - file = ::fopen(strFile, strMode); -#endif - if (!file) - return nullptr; - - return new DefaultIOStream(file, strFile); -} - -// ------------------------------------------------------------------------------------------------ -// Closes the given file and releases all resources associated with it. -void DefaultIOSystem::Close(IOStream* pFile) -{ - delete pFile; -} - -// ------------------------------------------------------------------------------------------------ -// Returns the operation specific directory separator -char DefaultIOSystem::getOsSeparator() const -{ -#ifndef _WIN32 - return '/'; -#else - return '\\'; -#endif -} - -// ------------------------------------------------------------------------------------------------ -// IOSystem default implementation (ComparePaths isn't a pure virtual function) -bool IOSystem::ComparePaths(const char* one, const char* second) const -{ - return !ASSIMP_stricmp(one, second); -} - -// ------------------------------------------------------------------------------------------------ -// Convert a relative path into an absolute path -inline static std::string MakeAbsolutePath(const char* in) -{ - ai_assert(in); - std::string out; -#ifdef _WIN32 - wchar_t* ret = ::_wfullpath(nullptr, Utf8ToWide(in).c_str(), 0); - if (ret) { - out = WideToUtf8(ret); - free(ret); - } -#else - char* ret = realpath(in, nullptr); - if (ret) { - out = ret; - free(ret); - } -#endif - if (!ret) { - // preserve the input path, maybe someone else is able to fix - // the path before it is accessed (e.g. our file system filter) - ASSIMP_LOG_WARN_F("Invalid path: ", std::string(in)); - out = in; - } - return out; -} - -// ------------------------------------------------------------------------------------------------ -// DefaultIOSystem's more specialized implementation -bool DefaultIOSystem::ComparePaths(const char* one, const char* second) const -{ - // chances are quite good both paths are formatted identically, - // so we can hopefully return here already - if (!ASSIMP_stricmp(one, second)) - return true; - - std::string temp1 = MakeAbsolutePath(one); - std::string temp2 = MakeAbsolutePath(second); - - return !ASSIMP_stricmp(temp1, temp2); -} - -// ------------------------------------------------------------------------------------------------ -std::string DefaultIOSystem::fileName(const std::string& path) -{ - std::string ret = path; - std::size_t last = ret.find_last_of("\\/"); - if (last != std::string::npos) ret = ret.substr(last + 1); - return ret; -} - -// ------------------------------------------------------------------------------------------------ -std::string DefaultIOSystem::completeBaseName(const std::string& path) -{ - std::string ret = fileName(path); - std::size_t pos = ret.find_last_of('.'); - if (pos != std::string::npos) ret = ret.substr(0, pos); - return ret; -} - -// ------------------------------------------------------------------------------------------------ -std::string DefaultIOSystem::absolutePath(const std::string& path) -{ - std::string ret = path; - std::size_t last = ret.find_last_of("\\/"); - if (last != std::string::npos) ret = ret.substr(0, last); - return ret; -} - -// ------------------------------------------------------------------------------------------------ diff --git a/thirdparty/assimp/code/Common/DefaultLogger.cpp b/thirdparty/assimp/code/Common/DefaultLogger.cpp deleted file mode 100644 index de3528d2b4..0000000000 --- a/thirdparty/assimp/code/Common/DefaultLogger.cpp +++ /dev/null @@ -1,418 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file DefaultLogger.cpp - * @brief Implementation of DefaultLogger (and Logger) - */ - -// Default log streams -#include "Win32DebugLogStream.h" -#include "StdOStreamLogStream.h" -#include "FileLogStream.h" -#include <assimp/StringUtils.h> - -#include <assimp/DefaultIOSystem.h> -#include <assimp/NullLogger.hpp> -#include <assimp/DefaultLogger.hpp> -#include <assimp/ai_assert.h> -#include <iostream> -#include <stdio.h> - -#ifndef ASSIMP_BUILD_SINGLETHREADED -# include <thread> -# include <mutex> - std::mutex loggerMutex; -#endif - -namespace Assimp { - -// ---------------------------------------------------------------------------------- -NullLogger DefaultLogger::s_pNullLogger; -Logger *DefaultLogger::m_pLogger = &DefaultLogger::s_pNullLogger; - -static const unsigned int SeverityAll = Logger::Info | Logger::Err | Logger::Warn | Logger::Debugging; - -// ---------------------------------------------------------------------------------- -// Represents a log-stream + its error severity -struct LogStreamInfo { - unsigned int m_uiErrorSeverity; - LogStream *m_pStream; - - // Constructor - LogStreamInfo( unsigned int uiErrorSev, LogStream *pStream ) : - m_uiErrorSeverity( uiErrorSev ), - m_pStream( pStream ) { - // empty - } - - // Destructor - ~LogStreamInfo() { - delete m_pStream; - } -}; - -// ---------------------------------------------------------------------------------- -// Construct a default log stream -LogStream* LogStream::createDefaultStream(aiDefaultLogStream streams, - const char* name /*= "AssimpLog.txt"*/, - IOSystem* io /*= NULL*/) -{ - switch (streams) - { - // This is a platform-specific feature - case aiDefaultLogStream_DEBUGGER: -#ifdef WIN32 - return new Win32DebugLogStream(); -#else - return nullptr; -#endif - - // Platform-independent default streams - case aiDefaultLogStream_STDERR: - return new StdOStreamLogStream(std::cerr); - case aiDefaultLogStream_STDOUT: - return new StdOStreamLogStream(std::cout); - case aiDefaultLogStream_FILE: - return (name && *name ? new FileLogStream(name,io) : nullptr ); - default: - // We don't know this default log stream, so raise an assertion - ai_assert(false); - - }; - - // For compilers without dead code path detection - return NULL; -} - -// ---------------------------------------------------------------------------------- -// Creates the only singleton instance -Logger *DefaultLogger::create(const char* name /*= "AssimpLog.txt"*/, - LogSeverity severity /*= NORMAL*/, - unsigned int defStreams /*= aiDefaultLogStream_DEBUGGER | aiDefaultLogStream_FILE*/, - IOSystem* io /*= NULL*/) { - // enter the mutex here to avoid concurrency problems -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(loggerMutex); -#endif - - if ( m_pLogger && !isNullLogger() ) { - delete m_pLogger; - } - - m_pLogger = new DefaultLogger( severity ); - - // Attach default log streams - // Stream the log to the MSVC debugger? - if ( defStreams & aiDefaultLogStream_DEBUGGER ) { - m_pLogger->attachStream( LogStream::createDefaultStream( aiDefaultLogStream_DEBUGGER ) ); - } - - // Stream the log to COUT? - if ( defStreams & aiDefaultLogStream_STDOUT ) { - m_pLogger->attachStream( LogStream::createDefaultStream( aiDefaultLogStream_STDOUT ) ); - } - - // Stream the log to CERR? - if ( defStreams & aiDefaultLogStream_STDERR ) { - m_pLogger->attachStream( LogStream::createDefaultStream( aiDefaultLogStream_STDERR ) ); - } - - // Stream the log to a file - if ( defStreams & aiDefaultLogStream_FILE && name && *name ) { - m_pLogger->attachStream( LogStream::createDefaultStream( aiDefaultLogStream_FILE, name, io ) ); - } - - return m_pLogger; -} - -// ---------------------------------------------------------------------------------- -void Logger::debug(const char* message) { - - // SECURITY FIX: otherwise it's easy to produce overruns since - // sometimes importers will include data from the input file - // (i.e. node names) in their messages. - if (strlen(message)>MAX_LOG_MESSAGE_LENGTH) { - return; - } - return OnDebug(message); -} - -// ---------------------------------------------------------------------------------- -void Logger::info(const char* message) { - - // SECURITY FIX: see above - if (strlen(message)>MAX_LOG_MESSAGE_LENGTH) { - return; - } - return OnInfo(message); -} - -// ---------------------------------------------------------------------------------- -void Logger::warn(const char* message) { - - // SECURITY FIX: see above - if (strlen(message)>MAX_LOG_MESSAGE_LENGTH) { - return; - } - return OnWarn(message); -} - -// ---------------------------------------------------------------------------------- -void Logger::error(const char* message) { - // SECURITY FIX: see above - if (strlen(message)>MAX_LOG_MESSAGE_LENGTH) { - return; - } - return OnError(message); -} - -// ---------------------------------------------------------------------------------- -void DefaultLogger::set( Logger *logger ) { - // enter the mutex here to avoid concurrency problems -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(loggerMutex); -#endif - - if ( nullptr == logger ) { - logger = &s_pNullLogger; - } - if ( nullptr != m_pLogger && !isNullLogger() ) { - delete m_pLogger; - } - - DefaultLogger::m_pLogger = logger; -} - -// ---------------------------------------------------------------------------------- -bool DefaultLogger::isNullLogger() { - return m_pLogger == &s_pNullLogger; -} - -// ---------------------------------------------------------------------------------- -Logger *DefaultLogger::get() { - return m_pLogger; -} - -// ---------------------------------------------------------------------------------- -// Kills the only instance -void DefaultLogger::kill() { - // enter the mutex here to avoid concurrency problems -#ifndef ASSIMP_BUILD_SINGLETHREADED - std::lock_guard<std::mutex> lock(loggerMutex); -#endif - - if ( m_pLogger == &s_pNullLogger ) { - return; - } - delete m_pLogger; - m_pLogger = &s_pNullLogger; -} - -// ---------------------------------------------------------------------------------- -// Debug message -void DefaultLogger::OnDebug( const char* message ) { - if ( m_Severity == Logger::NORMAL ) { - return; - } - - static const size_t Size = MAX_LOG_MESSAGE_LENGTH + 16; - char msg[Size]; - ai_snprintf(msg, Size, "Debug, T%u: %s", GetThreadID(), message); - - WriteToStreams( msg, Logger::Debugging ); -} - -// ---------------------------------------------------------------------------------- -// Logs an info -void DefaultLogger::OnInfo( const char* message ){ - static const size_t Size = MAX_LOG_MESSAGE_LENGTH + 16; - char msg[Size]; - ai_snprintf(msg, Size, "Info, T%u: %s", GetThreadID(), message ); - - WriteToStreams( msg , Logger::Info ); -} - -// ---------------------------------------------------------------------------------- -// Logs a warning -void DefaultLogger::OnWarn( const char* message ) { - static const size_t Size = MAX_LOG_MESSAGE_LENGTH + 16; - char msg[Size]; - ai_snprintf(msg, Size, "Warn, T%u: %s", GetThreadID(), message ); - - WriteToStreams( msg, Logger::Warn ); -} - -// ---------------------------------------------------------------------------------- -// Logs an error -void DefaultLogger::OnError( const char* message ) { - static const size_t Size = MAX_LOG_MESSAGE_LENGTH + 16; - char msg[ Size ]; - ai_snprintf(msg, Size, "Error, T%u: %s", GetThreadID(), message ); - - WriteToStreams( msg, Logger::Err ); -} - -// ---------------------------------------------------------------------------------- -// Will attach a new stream -bool DefaultLogger::attachStream( LogStream *pStream, unsigned int severity ) { - if ( nullptr == pStream ) { - return false; - } - - if (0 == severity) { - severity = Logger::Info | Logger::Err | Logger::Warn | Logger::Debugging; - } - - for ( StreamIt it = m_StreamArray.begin(); - it != m_StreamArray.end(); - ++it ) - { - if ( (*it)->m_pStream == pStream ) { - (*it)->m_uiErrorSeverity |= severity; - return true; - } - } - - LogStreamInfo *pInfo = new LogStreamInfo( severity, pStream ); - m_StreamArray.push_back( pInfo ); - return true; -} - -// ---------------------------------------------------------------------------------- -// Detach a stream -bool DefaultLogger::detatchStream( LogStream *pStream, unsigned int severity ) { - if ( nullptr == pStream ) { - return false; - } - - if (0 == severity) { - severity = SeverityAll; - } - - bool res( false ); - for ( StreamIt it = m_StreamArray.begin(); it != m_StreamArray.end(); ++it ) { - if ( (*it)->m_pStream == pStream ) { - (*it)->m_uiErrorSeverity &= ~severity; - if ( (*it)->m_uiErrorSeverity == 0 ) { - // don't delete the underlying stream 'cause the caller gains ownership again - (**it).m_pStream = nullptr; - delete *it; - m_StreamArray.erase( it ); - res = true; - break; - } - return true; - } - } - return res; -} - -// ---------------------------------------------------------------------------------- -// Constructor -DefaultLogger::DefaultLogger(LogSeverity severity) - : Logger ( severity ) - , noRepeatMsg (false) - , lastLen( 0 ) { - lastMsg[0] = '\0'; -} - -// ---------------------------------------------------------------------------------- -// Destructor -DefaultLogger::~DefaultLogger() { - for ( StreamIt it = m_StreamArray.begin(); it != m_StreamArray.end(); ++it ) { - // also frees the underlying stream, we are its owner. - delete *it; - } -} - -// ---------------------------------------------------------------------------------- -// Writes message to stream -void DefaultLogger::WriteToStreams(const char *message, ErrorSeverity ErrorSev ) { - ai_assert(nullptr != message); - - // Check whether this is a repeated message - if (! ::strncmp( message,lastMsg, lastLen-1)) - { - if (!noRepeatMsg) - { - noRepeatMsg = true; - message = "Skipping one or more lines with the same contents\n"; - } - else return; - } - else - { - // append a new-line character to the message to be printed - lastLen = ::strlen(message); - ::memcpy(lastMsg,message,lastLen+1); - ::strcat(lastMsg+lastLen,"\n"); - - message = lastMsg; - noRepeatMsg = false; - ++lastLen; - } - for ( ConstStreamIt it = m_StreamArray.begin(); - it != m_StreamArray.end(); - ++it) - { - if ( ErrorSev & (*it)->m_uiErrorSeverity ) - (*it)->m_pStream->write( message); - } -} - -// ---------------------------------------------------------------------------------- -// Returns thread id, if not supported only a zero will be returned. -unsigned int DefaultLogger::GetThreadID() -{ - // fixme: we can get this value via std::threads - // std::this_thread::get_id().hash() returns a (big) size_t, not sure if this is useful in this case. -#ifdef WIN32 - return (unsigned int)::GetCurrentThreadId(); -#else - return 0; // not supported -#endif -} - -// ---------------------------------------------------------------------------------- - -} // !namespace Assimp diff --git a/thirdparty/assimp/code/Common/DefaultProgressHandler.h b/thirdparty/assimp/code/Common/DefaultProgressHandler.h deleted file mode 100644 index bd2cce00be..0000000000 --- a/thirdparty/assimp/code/Common/DefaultProgressHandler.h +++ /dev/null @@ -1,65 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file ProgressHandler.hpp - * @brief Abstract base class 'ProgressHandler'. - */ -#ifndef INCLUDED_AI_DEFAULTPROGRESSHANDLER_H -#define INCLUDED_AI_DEFAULTPROGRESSHANDLER_H - -#include <assimp/ProgressHandler.hpp> - -namespace Assimp { - -// ------------------------------------------------------------------------------------ -/** @brief Internal default implementation of the #ProgressHandler interface. */ -class DefaultProgressHandler : public ProgressHandler { - - virtual bool Update(float /*percentage*/) { - return false; - } - - -}; // !class DefaultProgressHandler -} // Namespace Assimp - -#endif diff --git a/thirdparty/assimp/code/Common/Exporter.cpp b/thirdparty/assimp/code/Common/Exporter.cpp deleted file mode 100644 index 4ce1a2bd80..0000000000 --- a/thirdparty/assimp/code/Common/Exporter.cpp +++ /dev/null @@ -1,636 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Exporter.cpp - -Assimp export interface. While it's public interface bears many similarities -to the import interface (in fact, it is largely symmetric), the internal -implementations differs a lot. Exporters are considered stateless and are -simple callbacks which we maintain in a global list along with their -description strings. - -Here we implement only the C++ interface (Assimp::Exporter). -*/ - -#ifndef ASSIMP_BUILD_NO_EXPORT - -#include <assimp/BlobIOSystem.h> -#include <assimp/SceneCombiner.h> -#include <assimp/DefaultIOSystem.h> -#include <assimp/Exporter.hpp> -#include <assimp/mesh.h> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/Exceptional.h> - -#include "Common/DefaultProgressHandler.h" -#include "Common/BaseProcess.h" -#include "Common/ScenePrivate.h" -#include "PostProcessing/CalcTangentsProcess.h" -#include "PostProcessing/MakeVerboseFormat.h" -#include "PostProcessing/JoinVerticesProcess.h" -#include "PostProcessing/ConvertToLHProcess.h" -#include "PostProcessing/PretransformVertices.h" - -#include <memory> - -namespace Assimp { - -// PostStepRegistry.cpp -void GetPostProcessingStepInstanceList(std::vector< BaseProcess* >& out); - -// ------------------------------------------------------------------------------------------------ -// Exporter worker function prototypes. Should not be necessary to #ifndef them, it's just a prototype -// do not use const, because some exporter need to convert the scene temporary -void ExportSceneCollada(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneXFile(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneStep(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneObj(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneObjNoMtl(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneSTL(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneSTLBinary(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportScenePly(const char*,IOSystem*, const aiScene*, const ExportProperties*); -void ExportScenePlyBinary(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportScene3DS(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneGLTF(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneGLB(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneGLTF2(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneGLB2(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneAssbin(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneAssxml(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneX3D(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneFBX(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneFBXA(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportScene3MF( const char*, IOSystem*, const aiScene*, const ExportProperties* ); -void ExportSceneM3D(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportSceneA3D(const char*, IOSystem*, const aiScene*, const ExportProperties*); -void ExportAssimp2Json(const char* , IOSystem*, const aiScene* , const Assimp::ExportProperties*); - -// ------------------------------------------------------------------------------------------------ -// global array of all export formats which Assimp supports in its current build -Exporter::ExportFormatEntry gExporters[] = -{ -#ifndef ASSIMP_BUILD_NO_COLLADA_EXPORTER - Exporter::ExportFormatEntry( "collada", "COLLADA - Digital Asset Exchange Schema", "dae", &ExportSceneCollada ), -#endif - -#ifndef ASSIMP_BUILD_NO_X_EXPORTER - Exporter::ExportFormatEntry( "x", "X Files", "x", &ExportSceneXFile, - aiProcess_MakeLeftHanded | aiProcess_FlipWindingOrder | aiProcess_FlipUVs ), -#endif - -#ifndef ASSIMP_BUILD_NO_STEP_EXPORTER - Exporter::ExportFormatEntry( "stp", "Step Files", "stp", &ExportSceneStep, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_OBJ_EXPORTER - Exporter::ExportFormatEntry( "obj", "Wavefront OBJ format", "obj", &ExportSceneObj, - aiProcess_GenSmoothNormals /*| aiProcess_PreTransformVertices */ ), - Exporter::ExportFormatEntry( "objnomtl", "Wavefront OBJ format without material file", "obj", &ExportSceneObjNoMtl, - aiProcess_GenSmoothNormals /*| aiProcess_PreTransformVertices */ ), -#endif - -#ifndef ASSIMP_BUILD_NO_STL_EXPORTER - Exporter::ExportFormatEntry( "stl", "Stereolithography", "stl" , &ExportSceneSTL, - aiProcess_Triangulate | aiProcess_GenNormals | aiProcess_PreTransformVertices - ), - Exporter::ExportFormatEntry( "stlb", "Stereolithography (binary)", "stl" , &ExportSceneSTLBinary, - aiProcess_Triangulate | aiProcess_GenNormals | aiProcess_PreTransformVertices - ), -#endif - -#ifndef ASSIMP_BUILD_NO_PLY_EXPORTER - Exporter::ExportFormatEntry( "ply", "Stanford Polygon Library", "ply" , &ExportScenePly, - aiProcess_PreTransformVertices - ), - Exporter::ExportFormatEntry( "plyb", "Stanford Polygon Library (binary)", "ply", &ExportScenePlyBinary, - aiProcess_PreTransformVertices - ), -#endif - -#ifndef ASSIMP_BUILD_NO_3DS_EXPORTER - Exporter::ExportFormatEntry( "3ds", "Autodesk 3DS (legacy)", "3ds" , &ExportScene3DS, - aiProcess_Triangulate | aiProcess_SortByPType | aiProcess_JoinIdenticalVertices ), -#endif - -#ifndef ASSIMP_BUILD_NO_GLTF_EXPORTER - Exporter::ExportFormatEntry( "gltf2", "GL Transmission Format v. 2", "gltf", &ExportSceneGLTF2, - aiProcess_JoinIdenticalVertices | aiProcess_Triangulate | aiProcess_SortByPType ), - Exporter::ExportFormatEntry( "glb2", "GL Transmission Format v. 2 (binary)", "glb", &ExportSceneGLB2, - aiProcess_JoinIdenticalVertices | aiProcess_Triangulate | aiProcess_SortByPType ), - Exporter::ExportFormatEntry( "gltf", "GL Transmission Format", "gltf", &ExportSceneGLTF, - aiProcess_JoinIdenticalVertices | aiProcess_Triangulate | aiProcess_SortByPType ), - Exporter::ExportFormatEntry( "glb", "GL Transmission Format (binary)", "glb", &ExportSceneGLB, - aiProcess_JoinIdenticalVertices | aiProcess_Triangulate | aiProcess_SortByPType ), -#endif - -#ifndef ASSIMP_BUILD_NO_ASSBIN_EXPORTER - Exporter::ExportFormatEntry( "assbin", "Assimp Binary File", "assbin" , &ExportSceneAssbin, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_ASSXML_EXPORTER - Exporter::ExportFormatEntry( "assxml", "Assimp XML Document", "assxml" , &ExportSceneAssxml, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_X3D_EXPORTER - Exporter::ExportFormatEntry( "x3d", "Extensible 3D", "x3d" , &ExportSceneX3D, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - Exporter::ExportFormatEntry( "fbx", "Autodesk FBX (binary)", "fbx", &ExportSceneFBX, 0 ), - Exporter::ExportFormatEntry( "fbxa", "Autodesk FBX (ascii)", "fbx", &ExportSceneFBXA, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_M3D_EXPORTER - Exporter::ExportFormatEntry( "m3d", "Model 3D (binary)", "m3d", &ExportSceneM3D, 0 ), - Exporter::ExportFormatEntry( "a3d", "Model 3D (ascii)", "m3d", &ExportSceneA3D, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_3MF_EXPORTER - Exporter::ExportFormatEntry( "3mf", "The 3MF-File-Format", "3mf", &ExportScene3MF, 0 ), -#endif - -#ifndef ASSIMP_BUILD_NO_ASSJSON_EXPORTER - Exporter::ExportFormatEntry( "assjson", "Assimp JSON Document", "json", &ExportAssimp2Json, 0) -#endif -}; - -#define ASSIMP_NUM_EXPORTERS (sizeof(gExporters)/sizeof(gExporters[0])) - - -class ExporterPimpl { -public: - ExporterPimpl() - : blob() - , mIOSystem(new Assimp::DefaultIOSystem()) - , mIsDefaultIOHandler(true) - , mProgressHandler( nullptr ) - , mIsDefaultProgressHandler( true ) - , mPostProcessingSteps() - , mError() - , mExporters() { - GetPostProcessingStepInstanceList(mPostProcessingSteps); - - // grab all built-in exporters - if ( 0 != ( ASSIMP_NUM_EXPORTERS ) ) { - mExporters.resize( ASSIMP_NUM_EXPORTERS ); - std::copy( gExporters, gExporters + ASSIMP_NUM_EXPORTERS, mExporters.begin() ); - } - } - - ~ExporterPimpl() { - delete blob; - - // Delete all post-processing plug-ins - for( unsigned int a = 0; a < mPostProcessingSteps.size(); a++) { - delete mPostProcessingSteps[a]; - } - delete mProgressHandler; - } - -public: - aiExportDataBlob* blob; - std::shared_ptr< Assimp::IOSystem > mIOSystem; - bool mIsDefaultIOHandler; - - /** The progress handler */ - ProgressHandler *mProgressHandler; - bool mIsDefaultProgressHandler; - - /** Post processing steps we can apply at the imported data. */ - std::vector< BaseProcess* > mPostProcessingSteps; - - /** Last fatal export error */ - std::string mError; - - /** Exporters, this includes those registered using #Assimp::Exporter::RegisterExporter */ - std::vector<Exporter::ExportFormatEntry> mExporters; -}; - -} // end of namespace Assimp - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -Exporter :: Exporter() -: pimpl(new ExporterPimpl()) { - pimpl->mProgressHandler = new DefaultProgressHandler(); -} - -// ------------------------------------------------------------------------------------------------ -Exporter::~Exporter() { - FreeBlob(); - delete pimpl; -} - -// ------------------------------------------------------------------------------------------------ -void Exporter::SetIOHandler( IOSystem* pIOHandler) { - pimpl->mIsDefaultIOHandler = !pIOHandler; - pimpl->mIOSystem.reset(pIOHandler); -} - -// ------------------------------------------------------------------------------------------------ -IOSystem* Exporter::GetIOHandler() const { - return pimpl->mIOSystem.get(); -} - -// ------------------------------------------------------------------------------------------------ -bool Exporter::IsDefaultIOHandler() const { - return pimpl->mIsDefaultIOHandler; -} - -// ------------------------------------------------------------------------------------------------ -void Exporter::SetProgressHandler(ProgressHandler* pHandler) { - ai_assert(nullptr != pimpl); - - if ( nullptr == pHandler) { - // Release pointer in the possession of the caller - pimpl->mProgressHandler = new DefaultProgressHandler(); - pimpl->mIsDefaultProgressHandler = true; - return; - } - - if (pimpl->mProgressHandler == pHandler) { - return; - } - - delete pimpl->mProgressHandler; - pimpl->mProgressHandler = pHandler; - pimpl->mIsDefaultProgressHandler = false; -} - -// ------------------------------------------------------------------------------------------------ -const aiExportDataBlob* Exporter::ExportToBlob( const aiScene* pScene, const char* pFormatId, - unsigned int pPreprocessing, const ExportProperties* pProperties) { - if (pimpl->blob) { - delete pimpl->blob; - pimpl->blob = nullptr; - } - - std::shared_ptr<IOSystem> old = pimpl->mIOSystem; - BlobIOSystem* blobio = new BlobIOSystem(); - pimpl->mIOSystem = std::shared_ptr<IOSystem>( blobio ); - - if (AI_SUCCESS != Export(pScene,pFormatId,blobio->GetMagicFileName(), pPreprocessing, pProperties)) { - pimpl->mIOSystem = old; - return nullptr; - } - - pimpl->blob = blobio->GetBlobChain(); - pimpl->mIOSystem = old; - - return pimpl->blob; -} - -// ------------------------------------------------------------------------------------------------ -aiReturn Exporter::Export( const aiScene* pScene, const char* pFormatId, const char* pPath, - unsigned int pPreprocessing, const ExportProperties* pProperties) { - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // when they create scenes from scratch, users will likely create them not in verbose - // format. They will likely not be aware that there is a flag in the scene to indicate - // this, however. To avoid surprises and bug reports, we check for duplicates in - // meshes upfront. - const bool is_verbose_format = !(pScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) || MakeVerboseFormatProcess::IsVerboseFormat(pScene); - - pimpl->mProgressHandler->UpdateFileWrite(0, 4); - - pimpl->mError = ""; - for (size_t i = 0; i < pimpl->mExporters.size(); ++i) { - const Exporter::ExportFormatEntry& exp = pimpl->mExporters[i]; - if (!strcmp(exp.mDescription.id,pFormatId)) { - try { - // Always create a full copy of the scene. We might optimize this one day, - // but for now it is the most pragmatic way. - aiScene* scenecopy_tmp = nullptr; - SceneCombiner::CopyScene(&scenecopy_tmp,pScene); - - pimpl->mProgressHandler->UpdateFileWrite(1, 4); - - std::unique_ptr<aiScene> scenecopy(scenecopy_tmp); - const ScenePrivateData* const priv = ScenePriv(pScene); - - // steps that are not idempotent, i.e. we might need to run them again, usually to get back to the - // original state before the step was applied first. When checking which steps we don't need - // to run, those are excluded. - const unsigned int nonIdempotentSteps = aiProcess_FlipWindingOrder | aiProcess_FlipUVs | aiProcess_MakeLeftHanded; - - // Erase all pp steps that were already applied to this scene - const unsigned int pp = (exp.mEnforcePP | pPreprocessing) & ~(priv && !priv->mIsCopy - ? (priv->mPPStepsApplied & ~nonIdempotentSteps) - : 0u); - - // If no extra post-processing was specified, and we obtained this scene from an - // Assimp importer, apply the reverse steps automatically. - // TODO: either drop this, or document it. Otherwise it is just a bad surprise. - //if (!pPreprocessing && priv) { - // pp |= (nonIdempotentSteps & priv->mPPStepsApplied); - //} - - // If the input scene is not in verbose format, but there is at least post-processing step that relies on it, - // we need to run the MakeVerboseFormat step first. - bool must_join_again = false; - if (!is_verbose_format) { - bool verbosify = false; - for( unsigned int a = 0; a < pimpl->mPostProcessingSteps.size(); a++) { - BaseProcess* const p = pimpl->mPostProcessingSteps[a]; - - if (p->IsActive(pp) && p->RequireVerboseFormat()) { - verbosify = true; - break; - } - } - - if (verbosify || (exp.mEnforcePP & aiProcess_JoinIdenticalVertices)) { - ASSIMP_LOG_DEBUG("export: Scene data not in verbose format, applying MakeVerboseFormat step first"); - - MakeVerboseFormatProcess proc; - proc.Execute(scenecopy.get()); - - if(!(exp.mEnforcePP & aiProcess_JoinIdenticalVertices)) { - must_join_again = true; - } - } - } - - pimpl->mProgressHandler->UpdateFileWrite(2, 4); - - if (pp) { - // the three 'conversion' steps need to be executed first because all other steps rely on the standard data layout - { - FlipWindingOrderProcess step; - if (step.IsActive(pp)) { - step.Execute(scenecopy.get()); - } - } - - { - FlipUVsProcess step; - if (step.IsActive(pp)) { - step.Execute(scenecopy.get()); - } - } - - { - MakeLeftHandedProcess step; - if (step.IsActive(pp)) { - step.Execute(scenecopy.get()); - } - } - - bool exportPointCloud(false); - if (nullptr != pProperties) { - exportPointCloud = pProperties->GetPropertyBool(AI_CONFIG_EXPORT_POINT_CLOUDS); - } - - // dispatch other processes - for( unsigned int a = 0; a < pimpl->mPostProcessingSteps.size(); a++) { - BaseProcess* const p = pimpl->mPostProcessingSteps[a]; - - if (p->IsActive(pp) - && !dynamic_cast<FlipUVsProcess*>(p) - && !dynamic_cast<FlipWindingOrderProcess*>(p) - && !dynamic_cast<MakeLeftHandedProcess*>(p)) { - if (dynamic_cast<PretransformVertices*>(p) && exportPointCloud) { - continue; - } - p->Execute(scenecopy.get()); - } - } - ScenePrivateData* const privOut = ScenePriv(scenecopy.get()); - ai_assert(nullptr != privOut); - - privOut->mPPStepsApplied |= pp; - } - - pimpl->mProgressHandler->UpdateFileWrite(3, 4); - - if(must_join_again) { - JoinVerticesProcess proc; - proc.Execute(scenecopy.get()); - } - - ExportProperties emptyProperties; // Never pass NULL ExportProperties so Exporters don't have to worry. - ExportProperties* pProp = pProperties ? (ExportProperties*)pProperties : &emptyProperties; - pProp->SetPropertyBool("bJoinIdenticalVertices", must_join_again); - exp.mExportFunction(pPath,pimpl->mIOSystem.get(),scenecopy.get(), pProp); - exp.mExportFunction(pPath,pimpl->mIOSystem.get(),scenecopy.get(), pProp); - - pimpl->mProgressHandler->UpdateFileWrite(4, 4); - } catch (DeadlyExportError& err) { - pimpl->mError = err.what(); - return AI_FAILURE; - } - return AI_SUCCESS; - } - } - - pimpl->mError = std::string("Found no exporter to handle this file format: ") + pFormatId; - ASSIMP_END_EXCEPTION_REGION(aiReturn); - - return AI_FAILURE; -} - -// ------------------------------------------------------------------------------------------------ -const char* Exporter::GetErrorString() const { - return pimpl->mError.c_str(); -} - -// ------------------------------------------------------------------------------------------------ -void Exporter::FreeBlob() { - delete pimpl->blob; - pimpl->blob = nullptr; - - pimpl->mError = ""; -} - -// ------------------------------------------------------------------------------------------------ -const aiExportDataBlob* Exporter::GetBlob() const { - return pimpl->blob; -} - -// ------------------------------------------------------------------------------------------------ -const aiExportDataBlob* Exporter::GetOrphanedBlob() const { - const aiExportDataBlob* tmp = pimpl->blob; - pimpl->blob = nullptr; - return tmp; -} - -// ------------------------------------------------------------------------------------------------ -size_t Exporter::GetExportFormatCount() const { - return pimpl->mExporters.size(); -} - -// ------------------------------------------------------------------------------------------------ -const aiExportFormatDesc* Exporter::GetExportFormatDescription( size_t index ) const { - if (index >= GetExportFormatCount()) { - return nullptr; - } - - // Return from static storage if the requested index is built-in. - if (index < sizeof(gExporters) / sizeof(gExporters[0])) { - return &gExporters[index].mDescription; - } - - return &pimpl->mExporters[index].mDescription; -} - -// ------------------------------------------------------------------------------------------------ -aiReturn Exporter::RegisterExporter(const ExportFormatEntry& desc) { - for(const ExportFormatEntry& e : pimpl->mExporters) { - if (!strcmp(e.mDescription.id,desc.mDescription.id)) { - return aiReturn_FAILURE; - } - } - - pimpl->mExporters.push_back(desc); - return aiReturn_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -void Exporter::UnregisterExporter(const char* id) { - for(std::vector<ExportFormatEntry>::iterator it = pimpl->mExporters.begin(); - it != pimpl->mExporters.end(); ++it) { - if (!strcmp((*it).mDescription.id,id)) { - pimpl->mExporters.erase(it); - break; - } - } -} - -// ------------------------------------------------------------------------------------------------ -ExportProperties::ExportProperties() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -ExportProperties::ExportProperties(const ExportProperties &other) -: mIntProperties(other.mIntProperties) -, mFloatProperties(other.mFloatProperties) -, mStringProperties(other.mStringProperties) -, mMatrixProperties(other.mMatrixProperties) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool ExportProperties::SetPropertyInteger(const char* szName, int iValue) { - return SetGenericProperty<int>(mIntProperties, szName,iValue); -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool ExportProperties::SetPropertyFloat(const char* szName, ai_real iValue) { - return SetGenericProperty<ai_real>(mFloatProperties, szName,iValue); -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool ExportProperties::SetPropertyString(const char* szName, const std::string& value) { - return SetGenericProperty<std::string>(mStringProperties, szName,value); -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool ExportProperties::SetPropertyMatrix(const char* szName, const aiMatrix4x4& value) { - return SetGenericProperty<aiMatrix4x4>(mMatrixProperties, szName,value); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -int ExportProperties::GetPropertyInteger(const char* szName, int iErrorReturn /*= 0xffffffff*/) const { - return GetGenericProperty<int>(mIntProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -ai_real ExportProperties::GetPropertyFloat(const char* szName, ai_real iErrorReturn /*= 10e10*/) const { - return GetGenericProperty<ai_real>(mFloatProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -const std::string ExportProperties::GetPropertyString(const char* szName, - const std::string& iErrorReturn /*= ""*/) const { - return GetGenericProperty<std::string>(mStringProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -const aiMatrix4x4 ExportProperties::GetPropertyMatrix(const char* szName, - const aiMatrix4x4& iErrorReturn /*= aiMatrix4x4()*/) const { - return GetGenericProperty<aiMatrix4x4>(mMatrixProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -bool ExportProperties::HasPropertyInteger(const char* szName) const { - return HasGenericProperty<int>(mIntProperties, szName); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -bool ExportProperties::HasPropertyBool(const char* szName) const { - return HasGenericProperty<int>(mIntProperties, szName); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -bool ExportProperties::HasPropertyFloat(const char* szName) const { - return HasGenericProperty<ai_real>(mFloatProperties, szName); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -bool ExportProperties::HasPropertyString(const char* szName) const { - return HasGenericProperty<std::string>(mStringProperties, szName); -} - -// ------------------------------------------------------------------------------------------------ -// Has a configuration property -bool ExportProperties::HasPropertyMatrix(const char* szName) const { - return HasGenericProperty<aiMatrix4x4>(mMatrixProperties, szName); -} - - -#endif // !ASSIMP_BUILD_NO_EXPORT diff --git a/thirdparty/assimp/code/Common/FileLogStream.h b/thirdparty/assimp/code/Common/FileLogStream.h deleted file mode 100644 index 740c503192..0000000000 --- a/thirdparty/assimp/code/Common/FileLogStream.h +++ /dev/null @@ -1,107 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -/** @file FileLofStream.h -*/ -#ifndef ASSIMP_FILELOGSTREAM_H_INC -#define ASSIMP_FILELOGSTREAM_H_INC - -#include <assimp/LogStream.hpp> -#include <assimp/IOStream.hpp> -#include <assimp/DefaultIOSystem.h> - -namespace Assimp { - -// ---------------------------------------------------------------------------------- -/** @class FileLogStream - * @brief Logstream to write into a file. - */ -class FileLogStream : - public LogStream -{ -public: - FileLogStream( const char* file, IOSystem* io = NULL ); - ~FileLogStream(); - void write( const char* message ); - -private: - IOStream *m_pStream; -}; - -// ---------------------------------------------------------------------------------- -// Constructor -inline FileLogStream::FileLogStream( const char* file, IOSystem* io ) : - m_pStream(NULL) -{ - if ( !file || 0 == *file ) - return; - - // If no IOSystem is specified: take a default one - if (!io) - { - DefaultIOSystem FileSystem; - m_pStream = FileSystem.Open( file, "wt"); - } - else m_pStream = io->Open( file, "wt" ); -} - -// ---------------------------------------------------------------------------------- -// Destructor -inline FileLogStream::~FileLogStream() -{ - // The virtual d'tor should destroy the underlying file - delete m_pStream; -} - -// ---------------------------------------------------------------------------------- -// Write method -inline void FileLogStream::write( const char* message ) -{ - if (m_pStream != NULL) - { - m_pStream->Write(message, sizeof(char), ::strlen(message)); - m_pStream->Flush(); - } -} - -// ---------------------------------------------------------------------------------- -} // !Namespace Assimp - -#endif // !! ASSIMP_FILELOGSTREAM_H_INC diff --git a/thirdparty/assimp/code/Common/FileSystemFilter.h b/thirdparty/assimp/code/Common/FileSystemFilter.h deleted file mode 100644 index 9923cdbdd3..0000000000 --- a/thirdparty/assimp/code/Common/FileSystemFilter.h +++ /dev/null @@ -1,345 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2008, assimp team -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FileSystemFilter.h - * Implements a filter system to filter calls to Exists() and Open() - * in order to improve the success rate of file opening ... - */ -#pragma once -#ifndef AI_FILESYSTEMFILTER_H_INC -#define AI_FILESYSTEMFILTER_H_INC - -#include <assimp/IOSystem.hpp> -#include <assimp/DefaultLogger.hpp> -#include <assimp/fast_atof.h> -#include <assimp/ParsingUtils.h> - -namespace Assimp { - -inline bool IsHex(char s) { - return (s>='0' && s<='9') || (s>='a' && s<='f') || (s>='A' && s<='F'); -} - -// --------------------------------------------------------------------------- -/** File system filter - */ -class FileSystemFilter : public IOSystem -{ -public: - /** Constructor. */ - FileSystemFilter(const std::string& file, IOSystem* old) - : mWrapped (old) - , mSrc_file(file) - , mSep(mWrapped->getOsSeparator()) { - ai_assert(nullptr != mWrapped); - - // Determine base directory - mBase = mSrc_file; - std::string::size_type ss2; - if (std::string::npos != (ss2 = mBase.find_last_of("\\/"))) { - mBase.erase(ss2,mBase.length()-ss2); - } else { - mBase = ""; - } - - // make sure the directory is terminated properly - char s; - - if ( mBase.empty() ) { - mBase = "."; - mBase += getOsSeparator(); - } else if ((s = *(mBase.end()-1)) != '\\' && s != '/') { - mBase += getOsSeparator(); - } - - DefaultLogger::get()->info("Import root directory is \'" + mBase + "\'"); - } - - /** Destructor. */ - ~FileSystemFilter() { - // empty - } - - // ------------------------------------------------------------------- - /** Tests for the existence of a file at the given path. */ - bool Exists( const char* pFile) const { - ai_assert( nullptr != mWrapped ); - - std::string tmp = pFile; - - // Currently this IOSystem is also used to open THE ONE FILE. - if (tmp != mSrc_file) { - BuildPath(tmp); - Cleanup(tmp); - } - - return mWrapped->Exists(tmp); - } - - // ------------------------------------------------------------------- - /** Returns the directory separator. */ - char getOsSeparator() const { - return mSep; - } - - // ------------------------------------------------------------------- - /** Open a new file with a given path. */ - IOStream* Open( const char* pFile, const char* pMode = "rb") { - ai_assert( nullptr != mWrapped ); - if ( nullptr == pFile || nullptr == pMode ) { - return nullptr; - } - - ai_assert( nullptr != pFile ); - ai_assert( nullptr != pMode ); - - // First try the unchanged path - IOStream* s = mWrapped->Open(pFile,pMode); - - if (nullptr == s) { - std::string tmp = pFile; - - // Try to convert between absolute and relative paths - BuildPath(tmp); - s = mWrapped->Open(tmp,pMode); - - if (nullptr == s) { - // Finally, look for typical issues with paths - // and try to correct them. This is our last - // resort. - tmp = pFile; - Cleanup(tmp); - BuildPath(tmp); - s = mWrapped->Open(tmp,pMode); - } - } - - return s; - } - - // ------------------------------------------------------------------- - /** Closes the given file and releases all resources associated with it. */ - void Close( IOStream* pFile) { - ai_assert( nullptr != mWrapped ); - return mWrapped->Close(pFile); - } - - // ------------------------------------------------------------------- - /** Compare two paths */ - bool ComparePaths (const char* one, const char* second) const { - ai_assert( nullptr != mWrapped ); - return mWrapped->ComparePaths (one,second); - } - - // ------------------------------------------------------------------- - /** Pushes a new directory onto the directory stack. */ - bool PushDirectory(const std::string &path ) { - ai_assert( nullptr != mWrapped ); - return mWrapped->PushDirectory(path); - } - - // ------------------------------------------------------------------- - /** Returns the top directory from the stack. */ - const std::string &CurrentDirectory() const { - ai_assert( nullptr != mWrapped ); - return mWrapped->CurrentDirectory(); - } - - // ------------------------------------------------------------------- - /** Returns the number of directories stored on the stack. */ - size_t StackSize() const { - ai_assert( nullptr != mWrapped ); - return mWrapped->StackSize(); - } - - // ------------------------------------------------------------------- - /** Pops the top directory from the stack. */ - bool PopDirectory() { - ai_assert( nullptr != mWrapped ); - return mWrapped->PopDirectory(); - } - - // ------------------------------------------------------------------- - /** Creates an new directory at the given path. */ - bool CreateDirectory(const std::string &path) { - ai_assert( nullptr != mWrapped ); - return mWrapped->CreateDirectory(path); - } - - // ------------------------------------------------------------------- - /** Will change the current directory to the given path. */ - bool ChangeDirectory(const std::string &path) { - ai_assert( nullptr != mWrapped ); - return mWrapped->ChangeDirectory(path); - } - - // ------------------------------------------------------------------- - /** Delete file. */ - bool DeleteFile(const std::string &file) { - ai_assert( nullptr != mWrapped ); - return mWrapped->DeleteFile(file); - } - -private: - // ------------------------------------------------------------------- - /** Build a valid path from a given relative or absolute path. - */ - void BuildPath (std::string& in) const { - ai_assert( nullptr != mWrapped ); - // if we can already access the file, great. - if (in.length() < 3 || mWrapped->Exists(in)) { - return; - } - - // Determine whether this is a relative path (Windows-specific - most assets are packaged on Windows). - if (in[1] != ':') { - - // append base path and try - const std::string tmp = mBase + in; - if (mWrapped->Exists(tmp)) { - in = tmp; - return; - } - } - - // Chop of the file name and look in the model directory, if - // this fails try all sub paths of the given path, i.e. - // if the given path is foo/bar/something.lwo, try - // <base>/something.lwo - // <base>/bar/something.lwo - // <base>/foo/bar/something.lwo - std::string::size_type pos = in.rfind('/'); - if (std::string::npos == pos) { - pos = in.rfind('\\'); - } - - if (std::string::npos != pos) { - std::string tmp; - std::string::size_type last_dirsep = std::string::npos; - - while(true) { - tmp = mBase; - tmp += mSep; - - std::string::size_type dirsep = in.rfind('/', last_dirsep); - if (std::string::npos == dirsep) { - dirsep = in.rfind('\\', last_dirsep); - } - - if (std::string::npos == dirsep || dirsep == 0) { - // we did try this already. - break; - } - - last_dirsep = dirsep-1; - - tmp += in.substr(dirsep+1, in.length()-pos); - if (mWrapped->Exists(tmp)) { - in = tmp; - return; - } - } - } - - // hopefully the underlying file system has another few tricks to access this file ... - } - - // ------------------------------------------------------------------- - /** Cleanup the given path - */ - void Cleanup (std::string& in) const { - if(in.empty()) { - return; - } - - // Remove a very common issue when we're parsing file names: spaces at the - // beginning of the path. - char last = 0; - std::string::iterator it = in.begin(); - while (IsSpaceOrNewLine( *it ))++it; - if (it != in.begin()) { - in.erase(in.begin(),it+1); - } - - const char separator = getOsSeparator(); - for (it = in.begin(); it != in.end(); ++it) { - // Exclude :// and \\, which remain untouched. - // https://sourceforge.net/tracker/?func=detail&aid=3031725&group_id=226462&atid=1067632 - if ( !strncmp(&*it, "://", 3 )) { - it += 3; - continue; - } - if (it == in.begin() && !strncmp(&*it, "\\\\", 2)) { - it += 2; - continue; - } - - // Cleanup path delimiters - if (*it == '/' || (*it) == '\\') { - *it = separator; - - // And we're removing double delimiters, frequent issue with - // incorrectly composited paths ... - if (last == *it) { - it = in.erase(it); - --it; - } - } else if (*it == '%' && in.end() - it > 2) { - // Hex sequence in URIs - if( IsHex((&*it)[0]) && IsHex((&*it)[1]) ) { - *it = HexOctetToDecimal(&*it); - it = in.erase(it+1,it+2); - --it; - } - } - - last = *it; - } - } - -private: - IOSystem *mWrapped; - std::string mSrc_file, mBase; - char mSep; -}; - -} //!ns Assimp - -#endif //AI_DEFAULTIOSYSTEM_H_INC diff --git a/thirdparty/assimp/code/Common/IFF.h b/thirdparty/assimp/code/Common/IFF.h deleted file mode 100644 index 91d7d48289..0000000000 --- a/thirdparty/assimp/code/Common/IFF.h +++ /dev/null @@ -1,102 +0,0 @@ -// Definitions for the Interchange File Format (IFF) -// Alexander Gessler, 2006 -// Adapted to Assimp August 2008 - -#ifndef AI_IFF_H_INCLUDED -#define AI_IFF_H_INCLUDED - -#include <assimp/ByteSwapper.h> - -namespace Assimp { -namespace IFF { - -///////////////////////////////////////////////////////////////////////////////// -//! Describes an IFF chunk header -///////////////////////////////////////////////////////////////////////////////// -struct ChunkHeader -{ - //! Type of the chunk header - FourCC - uint32_t type; - - //! Length of the chunk data, in bytes - uint32_t length; -}; - - -///////////////////////////////////////////////////////////////////////////////// -//! Describes an IFF sub chunk header -///////////////////////////////////////////////////////////////////////////////// -struct SubChunkHeader -{ - //! Type of the chunk header - FourCC - uint32_t type; - - //! Length of the chunk data, in bytes - uint16_t length; -}; - - -#define AI_IFF_FOURCC(a,b,c,d) ((uint32_t) (((uint8_t)a << 24u) | \ - ((uint8_t)b << 16u) | ((uint8_t)c << 8u) | ((uint8_t)d))) - - -#define AI_IFF_FOURCC_FORM AI_IFF_FOURCC('F','O','R','M') - - -///////////////////////////////////////////////////////////////////////////////// -//! Load a chunk header -//! @param outFile Pointer to the file data - points to the chunk data afterwards -//! @return Copy of the chunk header -///////////////////////////////////////////////////////////////////////////////// -inline ChunkHeader LoadChunk(uint8_t*& outFile) -{ - ChunkHeader head; - ::memcpy(&head.type, outFile, 4); - outFile += 4; - ::memcpy(&head.length, outFile, 4); - outFile += 4; - AI_LSWAP4(head.length); - AI_LSWAP4(head.type); - return head; -} - -///////////////////////////////////////////////////////////////////////////////// -//! Load a sub chunk header -//! @param outFile Pointer to the file data - points to the chunk data afterwards -//! @return Copy of the sub chunk header -///////////////////////////////////////////////////////////////////////////////// -inline SubChunkHeader LoadSubChunk(uint8_t*& outFile) -{ - SubChunkHeader head; - ::memcpy(&head.type, outFile, 4); - outFile += 4; - ::memcpy(&head.length, outFile, 2); - outFile += 2; - AI_LSWAP2(head.length); - AI_LSWAP4(head.type); - return head; -} - -///////////////////////////////////////////////////////////////////////////////// -//! Read the file header and return the type of the file and its size -//! @param outFile Pointer to the file data. The buffer must at -//! least be 12 bytes large. -//! @param fileType Receives the type of the file -//! @return 0 if everything was OK, otherwise an error message -///////////////////////////////////////////////////////////////////////////////// -inline const char* ReadHeader(uint8_t* outFile, uint32_t& fileType) -{ - ChunkHeader head = LoadChunk(outFile); - if(AI_IFF_FOURCC_FORM != head.type) - { - return "The file is not an IFF file: FORM chunk is missing"; - } - ::memcpy(&fileType, outFile, 4); - AI_LSWAP4(fileType); - return 0; -} - - -}} - -#endif // !! AI_IFF_H_INCLUDED diff --git a/thirdparty/assimp/code/Common/Importer.cpp b/thirdparty/assimp/code/Common/Importer.cpp deleted file mode 100644 index 91b50859a0..0000000000 --- a/thirdparty/assimp/code/Common/Importer.cpp +++ /dev/null @@ -1,1174 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Importer.cpp - * @brief Implementation of the CPP-API class #Importer - */ - -#include <assimp/version.h> -#include <assimp/config.h> -#include <assimp/importerdesc.h> - -// ------------------------------------------------------------------------------------------------ -/* Uncomment this line to prevent Assimp from catching unknown exceptions. - * - * Note that any Exception except DeadlyImportError may lead to - * undefined behaviour -> loaders could remain in an unusable state and - * further imports with the same Importer instance could fail/crash/burn ... - */ -// ------------------------------------------------------------------------------------------------ -#ifndef ASSIMP_BUILD_DEBUG -# define ASSIMP_CATCH_GLOBAL_EXCEPTIONS -#endif - -// ------------------------------------------------------------------------------------------------ -// Internal headers -// ------------------------------------------------------------------------------------------------ -#include "Common/Importer.h" -#include "Common/BaseProcess.h" -#include "Common/DefaultProgressHandler.h" -#include "PostProcessing/ProcessHelper.h" -#include "Common/ScenePreprocessor.h" -#include "Common/ScenePrivate.h" - -#include <assimp/BaseImporter.h> -#include <assimp/GenericProperty.h> -#include <assimp/MemoryIOWrapper.h> -#include <assimp/Profiler.h> -#include <assimp/TinyFormatter.h> -#include <assimp/Exceptional.h> -#include <assimp/Profiler.h> -#include <set> -#include <memory> -#include <cctype> - -#include <assimp/DefaultIOStream.h> -#include <assimp/DefaultIOSystem.h> - -#ifndef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS -# include "PostProcessing/ValidateDataStructure.h" -#endif - -using namespace Assimp::Profiling; -using namespace Assimp::Formatter; - -namespace Assimp { - // ImporterRegistry.cpp - void GetImporterInstanceList(std::vector< BaseImporter* >& out); - void DeleteImporterInstanceList(std::vector< BaseImporter* >& out); - - // PostStepRegistry.cpp - void GetPostProcessingStepInstanceList(std::vector< BaseProcess* >& out); -} - -using namespace Assimp; -using namespace Assimp::Intern; - -// ------------------------------------------------------------------------------------------------ -// Intern::AllocateFromAssimpHeap serves as abstract base class. It overrides -// new and delete (and their array counterparts) of public API classes (e.g. Logger) to -// utilize our DLL heap. -// See http://www.gotw.ca/publications/mill15.htm -// ------------------------------------------------------------------------------------------------ -void* AllocateFromAssimpHeap::operator new ( size_t num_bytes) { - return ::operator new(num_bytes); -} - -void* AllocateFromAssimpHeap::operator new ( size_t num_bytes, const std::nothrow_t& ) throw() { - try { - return AllocateFromAssimpHeap::operator new( num_bytes ); - } - catch( ... ) { - return NULL; - } -} - -void AllocateFromAssimpHeap::operator delete ( void* data) { - return ::operator delete(data); -} - -void* AllocateFromAssimpHeap::operator new[] ( size_t num_bytes) { - return ::operator new[](num_bytes); -} - -void* AllocateFromAssimpHeap::operator new[] ( size_t num_bytes, const std::nothrow_t& ) throw() { - try { - return AllocateFromAssimpHeap::operator new[]( num_bytes ); - } - catch( ... ) { - return NULL; - } -} - -void AllocateFromAssimpHeap::operator delete[] ( void* data) { - return ::operator delete[](data); -} - -// ------------------------------------------------------------------------------------------------ -// Importer constructor. -Importer::Importer() - : pimpl( new ImporterPimpl ) { - pimpl->mScene = NULL; - pimpl->mErrorString = ""; - - // Allocate a default IO handler - pimpl->mIOHandler = new DefaultIOSystem; - pimpl->mIsDefaultHandler = true; - pimpl->bExtraVerbose = false; // disable extra verbose mode by default - - pimpl->mProgressHandler = new DefaultProgressHandler(); - pimpl->mIsDefaultProgressHandler = true; - - GetImporterInstanceList(pimpl->mImporter); - GetPostProcessingStepInstanceList(pimpl->mPostProcessingSteps); - - // Allocate a SharedPostProcessInfo object and store pointers to it in all post-process steps in the list. - pimpl->mPPShared = new SharedPostProcessInfo(); - for (std::vector<BaseProcess*>::iterator it = pimpl->mPostProcessingSteps.begin(); - it != pimpl->mPostProcessingSteps.end(); - ++it) { - - (*it)->SetSharedData(pimpl->mPPShared); - } -} - -// ------------------------------------------------------------------------------------------------ -// Destructor of Importer -Importer::~Importer() -{ - // Delete all import plugins - DeleteImporterInstanceList(pimpl->mImporter); - - // Delete all post-processing plug-ins - for( unsigned int a = 0; a < pimpl->mPostProcessingSteps.size(); a++) - delete pimpl->mPostProcessingSteps[a]; - - // Delete the assigned IO and progress handler - delete pimpl->mIOHandler; - delete pimpl->mProgressHandler; - - // Kill imported scene. Destructor's should do that recursively - delete pimpl->mScene; - - // Delete shared post-processing data - delete pimpl->mPPShared; - - // and finally the pimpl itself - delete pimpl; -} - -// ------------------------------------------------------------------------------------------------ -// Register a custom post-processing step -aiReturn Importer::RegisterPPStep(BaseProcess* pImp) -{ - ai_assert(NULL != pImp); - ASSIMP_BEGIN_EXCEPTION_REGION(); - - pimpl->mPostProcessingSteps.push_back(pImp); - ASSIMP_LOG_INFO("Registering custom post-processing step"); - - ASSIMP_END_EXCEPTION_REGION(aiReturn); - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -// Register a custom loader plugin -aiReturn Importer::RegisterLoader(BaseImporter* pImp) -{ - ai_assert(NULL != pImp); - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // -------------------------------------------------------------------- - // Check whether we would have two loaders for the same file extension - // This is absolutely OK, but we should warn the developer of the new - // loader that his code will probably never be called if the first - // loader is a bit too lazy in his file checking. - // -------------------------------------------------------------------- - std::set<std::string> st; - std::string baked; - pImp->GetExtensionList(st); - - for(std::set<std::string>::const_iterator it = st.begin(); it != st.end(); ++it) { - -#ifdef ASSIMP_BUILD_DEBUG - if (IsExtensionSupported(*it)) { - ASSIMP_LOG_WARN_F("The file extension ", *it, " is already in use"); - } -#endif - baked += *it; - } - - // add the loader - pimpl->mImporter.push_back(pImp); - ASSIMP_LOG_INFO_F("Registering custom importer for these file extensions: ", baked); - ASSIMP_END_EXCEPTION_REGION(aiReturn); - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -// Unregister a custom loader plugin -aiReturn Importer::UnregisterLoader(BaseImporter* pImp) -{ - if(!pImp) { - // unregistering a NULL importer is no problem for us ... really! - return AI_SUCCESS; - } - - ASSIMP_BEGIN_EXCEPTION_REGION(); - std::vector<BaseImporter*>::iterator it = std::find(pimpl->mImporter.begin(), - pimpl->mImporter.end(),pImp); - - if (it != pimpl->mImporter.end()) { - pimpl->mImporter.erase(it); - ASSIMP_LOG_INFO("Unregistering custom importer: "); - return AI_SUCCESS; - } - ASSIMP_LOG_WARN("Unable to remove custom importer: I can't find you ..."); - ASSIMP_END_EXCEPTION_REGION(aiReturn); - return AI_FAILURE; -} - -// ------------------------------------------------------------------------------------------------ -// Unregister a custom loader plugin -aiReturn Importer::UnregisterPPStep(BaseProcess* pImp) -{ - if(!pImp) { - // unregistering a NULL ppstep is no problem for us ... really! - return AI_SUCCESS; - } - - ASSIMP_BEGIN_EXCEPTION_REGION(); - std::vector<BaseProcess*>::iterator it = std::find(pimpl->mPostProcessingSteps.begin(), - pimpl->mPostProcessingSteps.end(),pImp); - - if (it != pimpl->mPostProcessingSteps.end()) { - pimpl->mPostProcessingSteps.erase(it); - ASSIMP_LOG_INFO("Unregistering custom post-processing step"); - return AI_SUCCESS; - } - ASSIMP_LOG_WARN("Unable to remove custom post-processing step: I can't find you .."); - ASSIMP_END_EXCEPTION_REGION(aiReturn); - return AI_FAILURE; -} - -// ------------------------------------------------------------------------------------------------ -// Supplies a custom IO handler to the importer to open and access files. -void Importer::SetIOHandler( IOSystem* pIOHandler) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - // If the new handler is zero, allocate a default IO implementation. - if (!pIOHandler) - { - // Release pointer in the possession of the caller - pimpl->mIOHandler = new DefaultIOSystem(); - pimpl->mIsDefaultHandler = true; - } - // Otherwise register the custom handler - else if (pimpl->mIOHandler != pIOHandler) - { - delete pimpl->mIOHandler; - pimpl->mIOHandler = pIOHandler; - pimpl->mIsDefaultHandler = false; - } - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Get the currently set IO handler -IOSystem* Importer::GetIOHandler() const { - return pimpl->mIOHandler; -} - -// ------------------------------------------------------------------------------------------------ -// Check whether a custom IO handler is currently set -bool Importer::IsDefaultIOHandler() const { - return pimpl->mIsDefaultHandler; -} - -// ------------------------------------------------------------------------------------------------ -// Supplies a custom progress handler to get regular callbacks during importing -void Importer::SetProgressHandler ( ProgressHandler* pHandler ) { - ASSIMP_BEGIN_EXCEPTION_REGION(); - // If the new handler is zero, allocate a default implementation. - if (!pHandler) - { - // Release pointer in the possession of the caller - pimpl->mProgressHandler = new DefaultProgressHandler(); - pimpl->mIsDefaultProgressHandler = true; - } - // Otherwise register the custom handler - else if (pimpl->mProgressHandler != pHandler) - { - delete pimpl->mProgressHandler; - pimpl->mProgressHandler = pHandler; - pimpl->mIsDefaultProgressHandler = false; - } - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Get the currently set progress handler -ProgressHandler* Importer::GetProgressHandler() const { - return pimpl->mProgressHandler; -} - -// ------------------------------------------------------------------------------------------------ -// Check whether a custom progress handler is currently set -bool Importer::IsDefaultProgressHandler() const { - return pimpl->mIsDefaultProgressHandler; -} - -// ------------------------------------------------------------------------------------------------ -// Validate post process step flags -bool _ValidateFlags(unsigned int pFlags) -{ - if (pFlags & aiProcess_GenSmoothNormals && pFlags & aiProcess_GenNormals) { - ASSIMP_LOG_ERROR("#aiProcess_GenSmoothNormals and #aiProcess_GenNormals are incompatible"); - return false; - } - if (pFlags & aiProcess_OptimizeGraph && pFlags & aiProcess_PreTransformVertices) { - ASSIMP_LOG_ERROR("#aiProcess_OptimizeGraph and #aiProcess_PreTransformVertices are incompatible"); - return false; - } - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Free the current scene -void Importer::FreeScene( ) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - - delete pimpl->mScene; - pimpl->mScene = NULL; - - pimpl->mErrorString = ""; - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Get the current error string, if any -const char* Importer::GetErrorString() const -{ - /* Must remain valid as long as ReadFile() or FreeFile() are not called */ - return pimpl->mErrorString.c_str(); -} - -// ------------------------------------------------------------------------------------------------ -// Enable extra-verbose mode -void Importer::SetExtraVerbose(bool bDo) -{ - pimpl->bExtraVerbose = bDo; -} - -// ------------------------------------------------------------------------------------------------ -// Get the current scene -const aiScene* Importer::GetScene() const -{ - return pimpl->mScene; -} - -// ------------------------------------------------------------------------------------------------ -// Orphan the current scene and return it. -aiScene* Importer::GetOrphanedScene() -{ - aiScene* s = pimpl->mScene; - - ASSIMP_BEGIN_EXCEPTION_REGION(); - pimpl->mScene = NULL; - - pimpl->mErrorString = ""; /* reset error string */ - ASSIMP_END_EXCEPTION_REGION(aiScene*); - return s; -} - -// ------------------------------------------------------------------------------------------------ -// Validate post-processing flags -bool Importer::ValidateFlags(unsigned int pFlags) const -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - // run basic checks for mutually exclusive flags - if(!_ValidateFlags(pFlags)) { - return false; - } - - // ValidateDS does not anymore occur in the pp list, it plays an awesome extra role ... -#ifdef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - if (pFlags & aiProcess_ValidateDataStructure) { - return false; - } -#endif - pFlags &= ~aiProcess_ValidateDataStructure; - - // Now iterate through all bits which are set in the flags and check whether we find at least - // one pp plugin which handles it. - for (unsigned int mask = 1; mask < (1u << (sizeof(unsigned int)*8-1));mask <<= 1) { - - if (pFlags & mask) { - - bool have = false; - for( unsigned int a = 0; a < pimpl->mPostProcessingSteps.size(); a++) { - if (pimpl->mPostProcessingSteps[a]-> IsActive(mask) ) { - - have = true; - break; - } - } - if (!have) { - return false; - } - } - } - ASSIMP_END_EXCEPTION_REGION(bool); - return true; -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* Importer::ReadFileFromMemory( const void* pBuffer, - size_t pLength, - unsigned int pFlags, - const char* pHint /*= ""*/) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - if (!pHint) { - pHint = ""; - } - - if (!pBuffer || !pLength || strlen(pHint) > MaxLenHint ) { - pimpl->mErrorString = "Invalid parameters passed to ReadFileFromMemory()"; - return NULL; - } - - // prevent deletion of the previous IOHandler - IOSystem* io = pimpl->mIOHandler; - pimpl->mIOHandler = NULL; - - SetIOHandler(new MemoryIOSystem((const uint8_t*)pBuffer,pLength,io)); - - // read the file and recover the previous IOSystem - static const size_t BufSize(Importer::MaxLenHint + 28); - char fbuff[BufSize]; - ai_snprintf(fbuff, BufSize, "%s.%s",AI_MEMORYIO_MAGIC_FILENAME,pHint); - - ReadFile(fbuff,pFlags); - SetIOHandler(io); - - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - return pimpl->mScene; -} - -// ------------------------------------------------------------------------------------------------ -void WriteLogOpening(const std::string& file) -{ - ASSIMP_LOG_INFO_F("Load ", file); - - // print a full version dump. This is nice because we don't - // need to ask the authors of incoming bug reports for - // the library version they're using - a log dump is - // sufficient. - const unsigned int flags( aiGetCompileFlags() ); - std::stringstream stream; - stream << "Assimp " << aiGetVersionMajor() << "." << aiGetVersionMinor() << "." << aiGetVersionRevision() << " " -#if defined(ASSIMP_BUILD_ARCHITECTURE) - << ASSIMP_BUILD_ARCHITECTURE -#elif defined(_M_IX86) || defined(__x86_32__) || defined(__i386__) - << "x86" -#elif defined(_M_X64) || defined(__x86_64__) - << "amd64" -#elif defined(_M_IA64) || defined(__ia64__) - << "itanium" -#elif defined(__ppc__) || defined(__powerpc__) - << "ppc32" -#elif defined(__powerpc64__) - << "ppc64" -#elif defined(__arm__) - << "arm" -#else - << "<unknown architecture>" -#endif - << " " -#if defined(ASSIMP_BUILD_COMPILER) - << ( ASSIMP_BUILD_COMPILER ) -#elif defined(_MSC_VER) - << "msvc" -#elif defined(__GNUC__) - << "gcc" -#else - << "<unknown compiler>" -#endif - -#ifdef ASSIMP_BUILD_DEBUG - << " debug" -#endif - - << (flags & ASSIMP_CFLAGS_NOBOOST ? " noboost" : "") - << (flags & ASSIMP_CFLAGS_SHARED ? " shared" : "") - << (flags & ASSIMP_CFLAGS_SINGLETHREADED ? " singlethreaded" : ""); - - ASSIMP_LOG_DEBUG(stream.str()); -} - -// ------------------------------------------------------------------------------------------------ -// Reads the given file and returns its contents if successful. -const aiScene* Importer::ReadFile( const char* _pFile, unsigned int pFlags) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - const std::string pFile(_pFile); - - // ---------------------------------------------------------------------- - // Put a large try block around everything to catch all std::exception's - // that might be thrown by STL containers or by new(). - // ImportErrorException's are throw by ourselves and caught elsewhere. - //----------------------------------------------------------------------- - - WriteLogOpening(pFile); - -#ifdef ASSIMP_CATCH_GLOBAL_EXCEPTIONS - try -#endif // ! ASSIMP_CATCH_GLOBAL_EXCEPTIONS - { - // Check whether this Importer instance has already loaded - // a scene. In this case we need to delete the old one - if (pimpl->mScene) { - - ASSIMP_LOG_DEBUG("(Deleting previous scene)"); - FreeScene(); - } - - // First check if the file is accessible at all - if( !pimpl->mIOHandler->Exists( pFile)) { - - pimpl->mErrorString = "Unable to open file \"" + pFile + "\"."; - ASSIMP_LOG_ERROR(pimpl->mErrorString); - return NULL; - } - - std::unique_ptr<Profiler> profiler(GetPropertyInteger(AI_CONFIG_GLOB_MEASURE_TIME,0)?new Profiler():NULL); - if (profiler) { - profiler->BeginRegion("total"); - } - - // Find an worker class which can handle the file - BaseImporter* imp = NULL; - SetPropertyInteger("importerIndex", -1); - for( unsigned int a = 0; a < pimpl->mImporter.size(); a++) { - - if( pimpl->mImporter[a]->CanRead( pFile, pimpl->mIOHandler, false)) { - imp = pimpl->mImporter[a]; - SetPropertyInteger("importerIndex", a); - break; - } - } - - if (!imp) { - // not so bad yet ... try format auto detection. - const std::string::size_type s = pFile.find_last_of('.'); - if (s != std::string::npos) { - ASSIMP_LOG_INFO("File extension not known, trying signature-based detection"); - for( unsigned int a = 0; a < pimpl->mImporter.size(); a++) { - if( pimpl->mImporter[a]->CanRead( pFile, pimpl->mIOHandler, true)) { - imp = pimpl->mImporter[a]; - SetPropertyInteger("importerIndex", a); - break; - } - } - } - // Put a proper error message if no suitable importer was found - if( !imp) { - pimpl->mErrorString = "No suitable reader found for the file format of file \"" + pFile + "\"."; - ASSIMP_LOG_ERROR(pimpl->mErrorString); - return NULL; - } - } - - // Get file size for progress handler - IOStream * fileIO = pimpl->mIOHandler->Open( pFile ); - uint32_t fileSize = 0; - if (fileIO) - { - fileSize = static_cast<uint32_t>(fileIO->FileSize()); - pimpl->mIOHandler->Close( fileIO ); - } - - // Dispatch the reading to the worker class for this format - const aiImporterDesc *desc( imp->GetInfo() ); - std::string ext( "unknown" ); - if ( NULL != desc ) { - ext = desc->mName; - } - ASSIMP_LOG_INFO("Found a matching importer for this file format: " + ext + "." ); - pimpl->mProgressHandler->UpdateFileRead( 0, fileSize ); - - if (profiler) { - profiler->BeginRegion("import"); - } - - pimpl->mScene = imp->ReadFile( this, pFile, pimpl->mIOHandler); - pimpl->mProgressHandler->UpdateFileRead( fileSize, fileSize ); - - if (profiler) { - profiler->EndRegion("import"); - } - - SetPropertyString("sourceFilePath", pFile); - - // If successful, apply all active post processing steps to the imported data - if( pimpl->mScene) { - -#ifndef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - // The ValidateDS process is an exception. It is executed first, even before ScenePreprocessor is called. - if (pFlags & aiProcess_ValidateDataStructure) - { - ValidateDSProcess ds; - ds.ExecuteOnScene (this); - if (!pimpl->mScene) { - return NULL; - } - } -#endif // no validation - - // Preprocess the scene and prepare it for post-processing - if (profiler) { - profiler->BeginRegion("preprocess"); - } - - ScenePreprocessor pre(pimpl->mScene); - pre.ProcessScene(); - - if (profiler) { - profiler->EndRegion("preprocess"); - } - - // Ensure that the validation process won't be called twice - ApplyPostProcessing(pFlags & (~aiProcess_ValidateDataStructure)); - } - // if failed, extract the error string - else if( !pimpl->mScene) { - pimpl->mErrorString = imp->GetErrorText(); - } - - // clear any data allocated by post-process steps - pimpl->mPPShared->Clean(); - - if (profiler) { - profiler->EndRegion("total"); - } - } -#ifdef ASSIMP_CATCH_GLOBAL_EXCEPTIONS - catch (std::exception &e) - { -#if (defined _MSC_VER) && (defined _CPPRTTI) - // if we have RTTI get the full name of the exception that occurred - pimpl->mErrorString = std::string(typeid( e ).name()) + ": " + e.what(); -#else - pimpl->mErrorString = std::string("std::exception: ") + e.what(); -#endif - - ASSIMP_LOG_ERROR(pimpl->mErrorString); - delete pimpl->mScene; pimpl->mScene = NULL; - } -#endif // ! ASSIMP_CATCH_GLOBAL_EXCEPTIONS - - // either successful or failure - the pointer expresses it anyways - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - return pimpl->mScene; -} - - -// ------------------------------------------------------------------------------------------------ -// Apply post-processing to the currently bound scene -const aiScene* Importer::ApplyPostProcessing(unsigned int pFlags) -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - // Return immediately if no scene is active - if (!pimpl->mScene) { - return NULL; - } - - // If no flags are given, return the current scene with no further action - if (!pFlags) { - return pimpl->mScene; - } - - // In debug builds: run basic flag validation - ai_assert(_ValidateFlags(pFlags)); - ASSIMP_LOG_INFO("Entering post processing pipeline"); - -#ifndef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - // The ValidateDS process plays an exceptional role. It isn't contained in the global - // list of post-processing steps, so we need to call it manually. - if (pFlags & aiProcess_ValidateDataStructure) - { - ValidateDSProcess ds; - ds.ExecuteOnScene (this); - if (!pimpl->mScene) { - return NULL; - } - } -#endif // no validation -#ifdef ASSIMP_BUILD_DEBUG - if (pimpl->bExtraVerbose) - { -#ifdef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - ASSIMP_LOG_ERROR("Verbose Import is not available due to build settings"); -#endif // no validation - pFlags |= aiProcess_ValidateDataStructure; - } -#else - if (pimpl->bExtraVerbose) { - ASSIMP_LOG_WARN("Not a debug build, ignoring extra verbose setting"); - } -#endif // ! DEBUG - - std::unique_ptr<Profiler> profiler(GetPropertyInteger(AI_CONFIG_GLOB_MEASURE_TIME,0)?new Profiler():NULL); - for( unsigned int a = 0; a < pimpl->mPostProcessingSteps.size(); a++) { - - BaseProcess* process = pimpl->mPostProcessingSteps[a]; - pimpl->mProgressHandler->UpdatePostProcess(static_cast<int>(a), static_cast<int>(pimpl->mPostProcessingSteps.size()) ); - if( process->IsActive( pFlags)) { - - if (profiler) { - profiler->BeginRegion("postprocess"); - } - - process->ExecuteOnScene ( this ); - - if (profiler) { - profiler->EndRegion("postprocess"); - } - } - if( !pimpl->mScene) { - break; - } -#ifdef ASSIMP_BUILD_DEBUG - -#ifdef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - continue; -#endif // no validation - - // If the extra verbose mode is active, execute the ValidateDataStructureStep again - after each step - if (pimpl->bExtraVerbose) { - ASSIMP_LOG_DEBUG("Verbose Import: re-validating data structures"); - - ValidateDSProcess ds; - ds.ExecuteOnScene (this); - if( !pimpl->mScene) { - ASSIMP_LOG_ERROR("Verbose Import: failed to re-validate data structures"); - break; - } - } -#endif // ! DEBUG - } - pimpl->mProgressHandler->UpdatePostProcess( static_cast<int>(pimpl->mPostProcessingSteps.size()), - static_cast<int>(pimpl->mPostProcessingSteps.size()) ); - - // update private scene flags - if( pimpl->mScene ) - ScenePriv(pimpl->mScene)->mPPStepsApplied |= pFlags; - - // clear any data allocated by post-process steps - pimpl->mPPShared->Clean(); - ASSIMP_LOG_INFO("Leaving post processing pipeline"); - - ASSIMP_END_EXCEPTION_REGION(const aiScene*); - return pimpl->mScene; -} - -// ------------------------------------------------------------------------------------------------ -const aiScene* Importer::ApplyCustomizedPostProcessing( BaseProcess *rootProcess, bool requestValidation ) { - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // Return immediately if no scene is active - if ( NULL == pimpl->mScene ) { - return NULL; - } - - // If no flags are given, return the current scene with no further action - if ( NULL == rootProcess ) { - return pimpl->mScene; - } - - // In debug builds: run basic flag validation - ASSIMP_LOG_INFO( "Entering customized post processing pipeline" ); - -#ifndef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - // The ValidateDS process plays an exceptional role. It isn't contained in the global - // list of post-processing steps, so we need to call it manually. - if ( requestValidation ) - { - ValidateDSProcess ds; - ds.ExecuteOnScene( this ); - if ( !pimpl->mScene ) { - return NULL; - } - } -#endif // no validation -#ifdef ASSIMP_BUILD_DEBUG - if ( pimpl->bExtraVerbose ) - { -#ifdef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS - ASSIMP_LOG_ERROR( "Verbose Import is not available due to build settings" ); -#endif // no validation - } -#else - if ( pimpl->bExtraVerbose ) { - ASSIMP_LOG_WARN( "Not a debug build, ignoring extra verbose setting" ); - } -#endif // ! DEBUG - - std::unique_ptr<Profiler> profiler( GetPropertyInteger( AI_CONFIG_GLOB_MEASURE_TIME, 0 ) ? new Profiler() : NULL ); - - if ( profiler ) { - profiler->BeginRegion( "postprocess" ); - } - - rootProcess->ExecuteOnScene( this ); - - if ( profiler ) { - profiler->EndRegion( "postprocess" ); - } - - // If the extra verbose mode is active, execute the ValidateDataStructureStep again - after each step - if ( pimpl->bExtraVerbose || requestValidation ) { - ASSIMP_LOG_DEBUG( "Verbose Import: revalidating data structures" ); - - ValidateDSProcess ds; - ds.ExecuteOnScene( this ); - if ( !pimpl->mScene ) { - ASSIMP_LOG_ERROR( "Verbose Import: failed to revalidate data structures" ); - } - } - - // clear any data allocated by post-process steps - pimpl->mPPShared->Clean(); - ASSIMP_LOG_INFO( "Leaving customized post processing pipeline" ); - - ASSIMP_END_EXCEPTION_REGION( const aiScene* ); - - return pimpl->mScene; -} - -// ------------------------------------------------------------------------------------------------ -// Helper function to check whether an extension is supported by ASSIMP -bool Importer::IsExtensionSupported(const char* szExtension) const -{ - return nullptr != GetImporter(szExtension); -} - -// ------------------------------------------------------------------------------------------------ -size_t Importer::GetImporterCount() const -{ - return pimpl->mImporter.size(); -} - -// ------------------------------------------------------------------------------------------------ -const aiImporterDesc* Importer::GetImporterInfo(size_t index) const -{ - if (index >= pimpl->mImporter.size()) { - return NULL; - } - return pimpl->mImporter[index]->GetInfo(); -} - - -// ------------------------------------------------------------------------------------------------ -BaseImporter* Importer::GetImporter (size_t index) const -{ - if (index >= pimpl->mImporter.size()) { - return NULL; - } - return pimpl->mImporter[index]; -} - -// ------------------------------------------------------------------------------------------------ -// Find a loader plugin for a given file extension -BaseImporter* Importer::GetImporter (const char* szExtension) const -{ - return GetImporter(GetImporterIndex(szExtension)); -} - -// ------------------------------------------------------------------------------------------------ -// Find a loader plugin for a given file extension -size_t Importer::GetImporterIndex (const char* szExtension) const { - ai_assert(nullptr != szExtension); - - ASSIMP_BEGIN_EXCEPTION_REGION(); - - // skip over wildcard and dot characters at string head -- - for ( ; *szExtension == '*' || *szExtension == '.'; ++szExtension ); - - std::string ext(szExtension); - if (ext.empty()) { - return static_cast<size_t>(-1); - } - std::transform( ext.begin(), ext.end(), ext.begin(), ToLower<char> ); - - std::set<std::string> str; - for (std::vector<BaseImporter*>::const_iterator i = pimpl->mImporter.begin();i != pimpl->mImporter.end();++i) { - str.clear(); - - (*i)->GetExtensionList(str); - for (std::set<std::string>::const_iterator it = str.begin(); it != str.end(); ++it) { - if (ext == *it) { - return std::distance(static_cast< std::vector<BaseImporter*>::const_iterator >(pimpl->mImporter.begin()), i); - } - } - } - ASSIMP_END_EXCEPTION_REGION(size_t); - return static_cast<size_t>(-1); -} - -// ------------------------------------------------------------------------------------------------ -// Helper function to build a list of all file extensions supported by ASSIMP -void Importer::GetExtensionList(aiString& szOut) const -{ - ASSIMP_BEGIN_EXCEPTION_REGION(); - std::set<std::string> str; - for (std::vector<BaseImporter*>::const_iterator i = pimpl->mImporter.begin();i != pimpl->mImporter.end();++i) { - (*i)->GetExtensionList(str); - } - - // List can be empty - if( !str.empty() ) { - for (std::set<std::string>::const_iterator it = str.begin();; ) { - szOut.Append("*."); - szOut.Append((*it).c_str()); - - if (++it == str.end()) { - break; - } - szOut.Append(";"); - } - } - ASSIMP_END_EXCEPTION_REGION(void); -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool Importer::SetPropertyInteger(const char* szName, int iValue) -{ - bool existing; - ASSIMP_BEGIN_EXCEPTION_REGION(); - existing = SetGenericProperty<int>(pimpl->mIntProperties, szName,iValue); - ASSIMP_END_EXCEPTION_REGION(bool); - return existing; -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool Importer::SetPropertyFloat(const char* szName, ai_real iValue) -{ - bool existing; - ASSIMP_BEGIN_EXCEPTION_REGION(); - existing = SetGenericProperty<ai_real>(pimpl->mFloatProperties, szName,iValue); - ASSIMP_END_EXCEPTION_REGION(bool); - return existing; -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool Importer::SetPropertyString(const char* szName, const std::string& value) -{ - bool existing; - ASSIMP_BEGIN_EXCEPTION_REGION(); - existing = SetGenericProperty<std::string>(pimpl->mStringProperties, szName,value); - ASSIMP_END_EXCEPTION_REGION(bool); - return existing; -} - -// ------------------------------------------------------------------------------------------------ -// Set a configuration property -bool Importer::SetPropertyMatrix(const char* szName, const aiMatrix4x4& value) -{ - bool existing; - ASSIMP_BEGIN_EXCEPTION_REGION(); - existing = SetGenericProperty<aiMatrix4x4>(pimpl->mMatrixProperties, szName,value); - ASSIMP_END_EXCEPTION_REGION(bool); - return existing; -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -int Importer::GetPropertyInteger(const char* szName, - int iErrorReturn /*= 0xffffffff*/) const -{ - return GetGenericProperty<int>(pimpl->mIntProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -ai_real Importer::GetPropertyFloat(const char* szName, - ai_real iErrorReturn /*= 10e10*/) const -{ - return GetGenericProperty<ai_real>(pimpl->mFloatProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -const std::string Importer::GetPropertyString(const char* szName, - const std::string& iErrorReturn /*= ""*/) const -{ - return GetGenericProperty<std::string>(pimpl->mStringProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get a configuration property -const aiMatrix4x4 Importer::GetPropertyMatrix(const char* szName, - const aiMatrix4x4& iErrorReturn /*= aiMatrix4x4()*/) const -{ - return GetGenericProperty<aiMatrix4x4>(pimpl->mMatrixProperties,szName,iErrorReturn); -} - -// ------------------------------------------------------------------------------------------------ -// Get the memory requirements of a single node -inline void AddNodeWeight(unsigned int& iScene,const aiNode* pcNode) -{ - iScene += sizeof(aiNode); - iScene += sizeof(unsigned int) * pcNode->mNumMeshes; - iScene += sizeof(void*) * pcNode->mNumChildren; - - for (unsigned int i = 0; i < pcNode->mNumChildren;++i) { - AddNodeWeight(iScene,pcNode->mChildren[i]); - } -} - -// ------------------------------------------------------------------------------------------------ -// Get the memory requirements of the scene -void Importer::GetMemoryRequirements(aiMemoryInfo& in) const -{ - in = aiMemoryInfo(); - aiScene* mScene = pimpl->mScene; - - // return if we have no scene loaded - if (!pimpl->mScene) - return; - - - in.total = sizeof(aiScene); - - // add all meshes - for (unsigned int i = 0; i < mScene->mNumMeshes;++i) - { - in.meshes += sizeof(aiMesh); - if (mScene->mMeshes[i]->HasPositions()) { - in.meshes += sizeof(aiVector3D) * mScene->mMeshes[i]->mNumVertices; - } - - if (mScene->mMeshes[i]->HasNormals()) { - in.meshes += sizeof(aiVector3D) * mScene->mMeshes[i]->mNumVertices; - } - - if (mScene->mMeshes[i]->HasTangentsAndBitangents()) { - in.meshes += sizeof(aiVector3D) * mScene->mMeshes[i]->mNumVertices * 2; - } - - for (unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS;++a) { - if (mScene->mMeshes[i]->HasVertexColors(a)) { - in.meshes += sizeof(aiColor4D) * mScene->mMeshes[i]->mNumVertices; - } - else break; - } - for (unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS;++a) { - if (mScene->mMeshes[i]->HasTextureCoords(a)) { - in.meshes += sizeof(aiVector3D) * mScene->mMeshes[i]->mNumVertices; - } - else break; - } - if (mScene->mMeshes[i]->HasBones()) { - in.meshes += sizeof(void*) * mScene->mMeshes[i]->mNumBones; - for (unsigned int p = 0; p < mScene->mMeshes[i]->mNumBones;++p) { - in.meshes += sizeof(aiBone); - in.meshes += mScene->mMeshes[i]->mBones[p]->mNumWeights * sizeof(aiVertexWeight); - } - } - in.meshes += (sizeof(aiFace) + 3 * sizeof(unsigned int))*mScene->mMeshes[i]->mNumFaces; - } - in.total += in.meshes; - - // add all embedded textures - for (unsigned int i = 0; i < mScene->mNumTextures;++i) { - const aiTexture* pc = mScene->mTextures[i]; - in.textures += sizeof(aiTexture); - if (pc->mHeight) { - in.textures += 4 * pc->mHeight * pc->mWidth; - } - else in.textures += pc->mWidth; - } - in.total += in.textures; - - // add all animations - for (unsigned int i = 0; i < mScene->mNumAnimations;++i) { - const aiAnimation* pc = mScene->mAnimations[i]; - in.animations += sizeof(aiAnimation); - - // add all bone anims - for (unsigned int a = 0; a < pc->mNumChannels; ++a) { - const aiNodeAnim* pc2 = pc->mChannels[i]; - in.animations += sizeof(aiNodeAnim); - in.animations += pc2->mNumPositionKeys * sizeof(aiVectorKey); - in.animations += pc2->mNumScalingKeys * sizeof(aiVectorKey); - in.animations += pc2->mNumRotationKeys * sizeof(aiQuatKey); - } - } - in.total += in.animations; - - // add all cameras and all lights - in.total += in.cameras = sizeof(aiCamera) * mScene->mNumCameras; - in.total += in.lights = sizeof(aiLight) * mScene->mNumLights; - - // add all nodes - AddNodeWeight(in.nodes,mScene->mRootNode); - in.total += in.nodes; - - // add all materials - for (unsigned int i = 0; i < mScene->mNumMaterials;++i) { - const aiMaterial* pc = mScene->mMaterials[i]; - in.materials += sizeof(aiMaterial); - in.materials += pc->mNumAllocated * sizeof(void*); - - for (unsigned int a = 0; a < pc->mNumProperties;++a) { - in.materials += pc->mProperties[a]->mDataLength; - } - } - in.total += in.materials; -} diff --git a/thirdparty/assimp/code/Common/Importer.h b/thirdparty/assimp/code/Common/Importer.h deleted file mode 100644 index a439d99c2f..0000000000 --- a/thirdparty/assimp/code/Common/Importer.h +++ /dev/null @@ -1,247 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Importer.h mostly internal stuff for use by #Assimp::Importer */ -#pragma once -#ifndef INCLUDED_AI_IMPORTER_H -#define INCLUDED_AI_IMPORTER_H - -#include <map> -#include <vector> -#include <string> -#include <assimp/matrix4x4.h> - -struct aiScene; - -namespace Assimp { - class ProgressHandler; - class IOSystem; - class BaseImporter; - class BaseProcess; - class SharedPostProcessInfo; - - -//! @cond never -// --------------------------------------------------------------------------- -/** @brief Internal PIMPL implementation for Assimp::Importer - * - * Using this idiom here allows us to drop the dependency from - * std::vector and std::map in the public headers. Furthermore we are dropping - * any STL interface problems caused by mismatching STL settings. All - * size calculation are now done by us, not the app heap. */ -class ImporterPimpl { -public: - // Data type to store the key hash - typedef unsigned int KeyType; - - // typedefs for our four configuration maps. - // We don't need more, so there is no need for a generic solution - typedef std::map<KeyType, int> IntPropertyMap; - typedef std::map<KeyType, ai_real> FloatPropertyMap; - typedef std::map<KeyType, std::string> StringPropertyMap; - typedef std::map<KeyType, aiMatrix4x4> MatrixPropertyMap; - - /** IO handler to use for all file accesses. */ - IOSystem* mIOHandler; - bool mIsDefaultHandler; - - /** Progress handler for feedback. */ - ProgressHandler* mProgressHandler; - bool mIsDefaultProgressHandler; - - /** Format-specific importer worker objects - one for each format we can read.*/ - std::vector< BaseImporter* > mImporter; - - /** Post processing steps we can apply at the imported data. */ - std::vector< BaseProcess* > mPostProcessingSteps; - - /** The imported data, if ReadFile() was successful, NULL otherwise. */ - aiScene* mScene; - - /** The error description, if there was one. */ - std::string mErrorString; - - /** List of integer properties */ - IntPropertyMap mIntProperties; - - /** List of floating-point properties */ - FloatPropertyMap mFloatProperties; - - /** List of string properties */ - StringPropertyMap mStringProperties; - - /** List of Matrix properties */ - MatrixPropertyMap mMatrixProperties; - - /** Used for testing - extra verbose mode causes the ValidateDataStructure-Step - * to be executed before and after every single post-process step */ - bool bExtraVerbose; - - /** Used by post-process steps to share data */ - SharedPostProcessInfo* mPPShared; - - /// The default class constructor. - ImporterPimpl() AI_NO_EXCEPT; -}; - -inline -ImporterPimpl::ImporterPimpl() AI_NO_EXCEPT -: mIOHandler( nullptr ) -, mIsDefaultHandler( false ) -, mProgressHandler( nullptr ) -, mIsDefaultProgressHandler( false ) -, mImporter() -, mPostProcessingSteps() -, mScene( nullptr ) -, mErrorString() -, mIntProperties() -, mFloatProperties() -, mStringProperties() -, mMatrixProperties() -, bExtraVerbose( false ) -, mPPShared( nullptr ) { - // empty -} -//! @endcond - - -struct BatchData; - -// --------------------------------------------------------------------------- -/** FOR IMPORTER PLUGINS ONLY: A helper class to the pleasure of importers - * that need to load many external meshes recursively. - * - * The class uses several threads to load these meshes (or at least it - * could, this has not yet been implemented at the moment). - * - * @note The class may not be used by more than one thread*/ -class ASSIMP_API BatchLoader -{ - // friend of Importer - -public: - //! @cond never - // ------------------------------------------------------------------- - /** Wraps a full list of configuration properties for an importer. - * Properties can be set using SetGenericProperty */ - struct PropertyMap - { - ImporterPimpl::IntPropertyMap ints; - ImporterPimpl::FloatPropertyMap floats; - ImporterPimpl::StringPropertyMap strings; - ImporterPimpl::MatrixPropertyMap matrices; - - bool operator == (const PropertyMap& prop) const { - // fixme: really isocpp? gcc complains - return ints == prop.ints && floats == prop.floats && strings == prop.strings && matrices == prop.matrices; - } - - bool empty () const { - return ints.empty() && floats.empty() && strings.empty() && matrices.empty(); - } - }; - //! @endcond - -public: - // ------------------------------------------------------------------- - /** Construct a batch loader from a given IO system to be used - * to access external files - */ - explicit BatchLoader(IOSystem* pIO, bool validate = false ); - - // ------------------------------------------------------------------- - /** The class destructor. - */ - ~BatchLoader(); - - // ------------------------------------------------------------------- - /** Sets the validation step. True for enable validation during postprocess. - * @param enable True for validation. - */ - void setValidation( bool enabled ); - - // ------------------------------------------------------------------- - /** Returns the current validation step. - * @return The current validation step. - */ - bool getValidation() const; - - // ------------------------------------------------------------------- - /** Add a new file to the list of files to be loaded. - * @param file File to be loaded - * @param steps Post-processing steps to be executed on the file - * @param map Optional configuration properties - * @return 'Load request channel' - an unique ID that can later - * be used to access the imported file data. - * @see GetImport */ - unsigned int AddLoadRequest ( - const std::string& file, - unsigned int steps = 0, - const PropertyMap* map = NULL - ); - - // ------------------------------------------------------------------- - /** Get an imported scene. - * This polls the import from the internal request list. - * If an import is requested several times, this function - * can be called several times, too. - * - * @param which LRWC returned by AddLoadRequest(). - * @return NULL if there is no scene with this file name - * in the queue of the scene hasn't been loaded yet. */ - aiScene* GetImport( - unsigned int which - ); - - // ------------------------------------------------------------------- - /** Waits until all scenes have been loaded. This returns - * immediately if no scenes are queued.*/ - void LoadAll(); - -private: - // No need to have that in the public API ... - BatchData *m_data; -}; - -} // Namespace Assimp - -#endif // INCLUDED_AI_IMPORTER_H diff --git a/thirdparty/assimp/code/Common/ImporterRegistry.cpp b/thirdparty/assimp/code/Common/ImporterRegistry.cpp deleted file mode 100644 index b9f28f0356..0000000000 --- a/thirdparty/assimp/code/Common/ImporterRegistry.cpp +++ /dev/null @@ -1,377 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file ImporterRegistry.cpp - -Central registry for all importers available. Do not edit this file -directly (unless you are adding new loaders), instead use the -corresponding preprocessor flag to selectively disable formats. -*/ - -#include <vector> -#include <assimp/BaseImporter.h> - -// ------------------------------------------------------------------------------------------------ -// Importers -// (include_new_importers_here) -// ------------------------------------------------------------------------------------------------ -#ifndef ASSIMP_BUILD_NO_X_IMPORTER -# include "X/XFileImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_AMF_IMPORTER -# include "AMF/AMFImporter.hpp" -#endif -#ifndef ASSIMP_BUILD_NO_3DS_IMPORTER -# include "3DS/3DSLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_MD3_IMPORTER -# include "MD3/MD3Loader.h" -#endif -#ifndef ASSIMP_BUILD_NO_MDL_IMPORTER -# include "MDL/MDLLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_MD2_IMPORTER -# include "MD2/MD2Loader.h" -#endif -#ifndef ASSIMP_BUILD_NO_PLY_IMPORTER -# include "Ply/PlyLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_ASE_IMPORTER -# include "ASE/ASELoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_OBJ_IMPORTER -# include "Obj/ObjFileImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_HMP_IMPORTER -# include "HMP/HMPLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_SMD_IMPORTER -# include "SMD/SMDLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_MDC_IMPORTER -# include "MDC/MDCLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_MD5_IMPORTER -# include "MD5/MD5Loader.h" -#endif -#ifndef ASSIMP_BUILD_NO_STL_IMPORTER -# include "STL/STLLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_LWO_IMPORTER -# include "LWO/LWOLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_DXF_IMPORTER -# include "DXF/DXFLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_NFF_IMPORTER -# include "NFF/NFFLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_RAW_IMPORTER -# include "Raw/RawLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_SIB_IMPORTER -# include "SIB/SIBImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_OFF_IMPORTER -# include "OFF/OFFLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_AC_IMPORTER -# include "AC/ACLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_BVH_IMPORTER -# include "BVH/BVHLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_IRRMESH_IMPORTER -# include "Irr/IRRMeshLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_IRR_IMPORTER -# include "Irr/IRRLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_Q3D_IMPORTER -# include "Q3D/Q3DLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_B3D_IMPORTER -# include "B3D/B3DImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_COLLADA_IMPORTER -# include "Collada/ColladaLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_TERRAGEN_IMPORTER -# include "Terragen/TerragenLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_CSM_IMPORTER -# include "CSM/CSMLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_3D_IMPORTER -# include "Unreal/UnrealLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_LWS_IMPORTER -# include "LWS/LWSLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_OGRE_IMPORTER -# include "Ogre/OgreImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_OPENGEX_IMPORTER -# include "OpenGEX/OpenGEXImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_MS3D_IMPORTER -# include "MS3D/MS3DLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_COB_IMPORTER -# include "COB/COBLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_BLEND_IMPORTER -# include "Blender/BlenderLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_Q3BSP_IMPORTER -# include "Q3BSP/Q3BSPFileImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_NDO_IMPORTER -# include "NDO/NDOLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_IFC_IMPORTER -# include "Importer/IFC/IFCLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_XGL_IMPORTER -# include "XGL/XGLLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER -# include "FBX/FBXImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_ASSBIN_IMPORTER -# include "Assbin/AssbinLoader.h" -#endif -#ifndef ASSIMP_BUILD_NO_GLTF_IMPORTER -# include "glTF/glTFImporter.h" -# include "glTF2/glTF2Importer.h" -#endif -#ifndef ASSIMP_BUILD_NO_C4D_IMPORTER -# include "C4D/C4DImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_3MF_IMPORTER -# include "3MF/D3MFImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_X3D_IMPORTER -# include "X3D/X3DImporter.hpp" -#endif -#ifndef ASSIMP_BUILD_NO_MMD_IMPORTER -# include "MMD/MMDImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_M3D_IMPORTER -# include "M3D/M3DImporter.h" -#endif -#ifndef ASSIMP_BUILD_NO_STEP_IMPORTER -# include "Importer/StepFile/StepFileImporter.h" -#endif - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -void GetImporterInstanceList(std::vector< BaseImporter* >& out) -{ - // ---------------------------------------------------------------------------- - // Add an instance of each worker class here - // (register_new_importers_here) - // ---------------------------------------------------------------------------- - out.reserve(64); -#if (!defined ASSIMP_BUILD_NO_X_IMPORTER) - out.push_back( new XFileImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_OBJ_IMPORTER) - out.push_back( new ObjFileImporter()); -#endif -#ifndef ASSIMP_BUILD_NO_AMF_IMPORTER - out.push_back( new AMFImporter() ); -#endif -#if (!defined ASSIMP_BUILD_NO_3DS_IMPORTER) - out.push_back( new Discreet3DSImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_M3D_IMPORTER) - out.push_back( new M3DImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_MD3_IMPORTER) - out.push_back( new MD3Importer()); -#endif -#if (!defined ASSIMP_BUILD_NO_MD2_IMPORTER) - out.push_back( new MD2Importer()); -#endif -#if (!defined ASSIMP_BUILD_NO_PLY_IMPORTER) - out.push_back( new PLYImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_MDL_IMPORTER) - out.push_back( new MDLImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_ASE_IMPORTER) - #if (!defined ASSIMP_BUILD_NO_3DS_IMPORTER) - out.push_back( new ASEImporter()); -# endif -#endif -#if (!defined ASSIMP_BUILD_NO_HMP_IMPORTER) - out.push_back( new HMPImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_SMD_IMPORTER) - out.push_back( new SMDImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_MDC_IMPORTER) - out.push_back( new MDCImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_MD5_IMPORTER) - out.push_back( new MD5Importer()); -#endif -#if (!defined ASSIMP_BUILD_NO_STL_IMPORTER) - out.push_back( new STLImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_LWO_IMPORTER) - out.push_back( new LWOImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_DXF_IMPORTER) - out.push_back( new DXFImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_NFF_IMPORTER) - out.push_back( new NFFImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_RAW_IMPORTER) - out.push_back( new RAWImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_SIB_IMPORTER) - out.push_back( new SIBImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_OFF_IMPORTER) - out.push_back( new OFFImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_AC_IMPORTER) - out.push_back( new AC3DImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_BVH_IMPORTER) - out.push_back( new BVHLoader()); -#endif -#if (!defined ASSIMP_BUILD_NO_IRRMESH_IMPORTER) - out.push_back( new IRRMeshImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_IRR_IMPORTER) - out.push_back( new IRRImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_Q3D_IMPORTER) - out.push_back( new Q3DImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_B3D_IMPORTER) - out.push_back( new B3DImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_COLLADA_IMPORTER) - out.push_back( new ColladaLoader()); -#endif -#if (!defined ASSIMP_BUILD_NO_TERRAGEN_IMPORTER) - out.push_back( new TerragenImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_CSM_IMPORTER) - out.push_back( new CSMImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_3D_IMPORTER) - out.push_back( new UnrealImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_LWS_IMPORTER) - out.push_back( new LWSImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_OGRE_IMPORTER) - out.push_back( new Ogre::OgreImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_OPENGEX_IMPORTER ) - out.push_back( new OpenGEX::OpenGEXImporter() ); -#endif -#if (!defined ASSIMP_BUILD_NO_MS3D_IMPORTER) - out.push_back( new MS3DImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_COB_IMPORTER) - out.push_back( new COBImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_BLEND_IMPORTER) - out.push_back( new BlenderImporter()); -#endif -#if (!defined ASSIMP_BUILD_NO_Q3BSP_IMPORTER) - out.push_back( new Q3BSPFileImporter() ); -#endif -#if (!defined ASSIMP_BUILD_NO_NDO_IMPORTER) - out.push_back( new NDOImporter() ); -#endif -#if (!defined ASSIMP_BUILD_NO_IFC_IMPORTER) - out.push_back( new IFCImporter() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_XGL_IMPORTER ) - out.push_back( new XGLImporter() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_FBX_IMPORTER ) - out.push_back( new FBXImporter() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_ASSBIN_IMPORTER ) - out.push_back( new AssbinImporter() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_GLTF_IMPORTER ) - out.push_back( new glTFImporter() ); - out.push_back( new glTF2Importer() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_C4D_IMPORTER ) - out.push_back( new C4DImporter() ); -#endif -#if ( !defined ASSIMP_BUILD_NO_3MF_IMPORTER ) - out.push_back( new D3MFImporter() ); -#endif -#ifndef ASSIMP_BUILD_NO_X3D_IMPORTER - out.push_back( new X3DImporter() ); -#endif -#ifndef ASSIMP_BUILD_NO_MMD_IMPORTER - out.push_back( new MMDImporter() ); -#endif -#ifndef ASSIMP_BUILD_NO_STEP_IMPORTER - out.push_back(new StepFile::StepFileImporter()); -#endif -} - -/** will delete all registered importers. */ -void DeleteImporterInstanceList(std::vector< BaseImporter* >& deleteList){ - for(size_t i= 0; i<deleteList.size();++i){ - delete deleteList[i]; - deleteList[i]=nullptr; - }//for -} - -} // namespace Assimp diff --git a/thirdparty/assimp/code/Common/PolyTools.h b/thirdparty/assimp/code/Common/PolyTools.h deleted file mode 100644 index fbbda0e7d1..0000000000 --- a/thirdparty/assimp/code/Common/PolyTools.h +++ /dev/null @@ -1,229 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file PolyTools.h, various utilities for our dealings with arbitrary polygons */ - -#ifndef AI_POLYTOOLS_H_INCLUDED -#define AI_POLYTOOLS_H_INCLUDED - -#include <assimp/material.h> -#include <assimp/ai_assert.h> - -namespace Assimp { - -// ------------------------------------------------------------------------------- -/** Compute the signed area of a triangle. - * The function accepts an unconstrained template parameter for use with - * both aiVector3D and aiVector2D, but generally ignores the third coordinate.*/ -template <typename T> -inline double GetArea2D(const T& v1, const T& v2, const T& v3) -{ - return 0.5 * (v1.x * ((double)v3.y - v2.y) + v2.x * ((double)v1.y - v3.y) + v3.x * ((double)v2.y - v1.y)); -} - -// ------------------------------------------------------------------------------- -/** Test if a given point p2 is on the left side of the line formed by p0-p1. - * The function accepts an unconstrained template parameter for use with - * both aiVector3D and aiVector2D, but generally ignores the third coordinate.*/ -template <typename T> -inline bool OnLeftSideOfLine2D(const T& p0, const T& p1,const T& p2) -{ - return GetArea2D(p0,p2,p1) > 0; -} - -// ------------------------------------------------------------------------------- -/** Test if a given point is inside a given triangle in R2. - * The function accepts an unconstrained template parameter for use with - * both aiVector3D and aiVector2D, but generally ignores the third coordinate.*/ -template <typename T> -inline bool PointInTriangle2D(const T& p0, const T& p1,const T& p2, const T& pp) -{ - // Point in triangle test using baryzentric coordinates - const aiVector2D v0 = p1 - p0; - const aiVector2D v1 = p2 - p0; - const aiVector2D v2 = pp - p0; - - double dot00 = v0 * v0; - double dot01 = v0 * v1; - double dot02 = v0 * v2; - double dot11 = v1 * v1; - double dot12 = v1 * v2; - - const double invDenom = 1 / (dot00 * dot11 - dot01 * dot01); - dot11 = (dot11 * dot02 - dot01 * dot12) * invDenom; - dot00 = (dot00 * dot12 - dot01 * dot02) * invDenom; - - return (dot11 > 0) && (dot00 > 0) && (dot11 + dot00 < 1); -} - - -// ------------------------------------------------------------------------------- -/** Check whether the winding order of a given polygon is counter-clockwise. - * The function accepts an unconstrained template parameter, but is intended - * to be used only with aiVector2D and aiVector3D (z axis is ignored, only - * x and y are taken into account). - * @note Code taken from http://cgm.cs.mcgill.ca/~godfried/teaching/cg-projects/97/Ian/applet1.html and translated to C++ - */ -template <typename T> -inline bool IsCCW(T* in, size_t npoints) { - double aa, bb, cc, b, c, theta; - double convex_turn; - double convex_sum = 0; - - ai_assert(npoints >= 3); - - for (size_t i = 0; i < npoints - 2; i++) { - aa = ((in[i+2].x - in[i].x) * (in[i+2].x - in[i].x)) + - ((-in[i+2].y + in[i].y) * (-in[i+2].y + in[i].y)); - - bb = ((in[i+1].x - in[i].x) * (in[i+1].x - in[i].x)) + - ((-in[i+1].y + in[i].y) * (-in[i+1].y + in[i].y)); - - cc = ((in[i+2].x - in[i+1].x) * - (in[i+2].x - in[i+1].x)) + - ((-in[i+2].y + in[i+1].y) * - (-in[i+2].y + in[i+1].y)); - - b = std::sqrt(bb); - c = std::sqrt(cc); - theta = std::acos((bb + cc - aa) / (2 * b * c)); - - if (OnLeftSideOfLine2D(in[i],in[i+2],in[i+1])) { - // if (convex(in[i].x, in[i].y, - // in[i+1].x, in[i+1].y, - // in[i+2].x, in[i+2].y)) { - convex_turn = AI_MATH_PI_F - theta; - convex_sum += convex_turn; - } - else { - convex_sum -= AI_MATH_PI_F - theta; - } - } - aa = ((in[1].x - in[npoints-2].x) * - (in[1].x - in[npoints-2].x)) + - ((-in[1].y + in[npoints-2].y) * - (-in[1].y + in[npoints-2].y)); - - bb = ((in[0].x - in[npoints-2].x) * - (in[0].x - in[npoints-2].x)) + - ((-in[0].y + in[npoints-2].y) * - (-in[0].y + in[npoints-2].y)); - - cc = ((in[1].x - in[0].x) * (in[1].x - in[0].x)) + - ((-in[1].y + in[0].y) * (-in[1].y + in[0].y)); - - b = std::sqrt(bb); - c = std::sqrt(cc); - theta = std::acos((bb + cc - aa) / (2 * b * c)); - - //if (convex(in[npoints-2].x, in[npoints-2].y, - // in[0].x, in[0].y, - // in[1].x, in[1].y)) { - if (OnLeftSideOfLine2D(in[npoints-2],in[1],in[0])) { - convex_turn = AI_MATH_PI_F - theta; - convex_sum += convex_turn; - } - else { - convex_sum -= AI_MATH_PI_F - theta; - } - - return convex_sum >= (2 * AI_MATH_PI_F); -} - - -// ------------------------------------------------------------------------------- -/** Compute the normal of an arbitrary polygon in R3. - * - * The code is based on Newell's formula, that is a polygons normal is the ratio - * of its area when projected onto the three coordinate axes. - * - * @param out Receives the output normal - * @param num Number of input vertices - * @param x X data source. x[ofs_x*n] is the n'th element. - * @param y Y data source. y[ofs_y*n] is the y'th element - * @param z Z data source. z[ofs_z*n] is the z'th element - * - * @note The data arrays must have storage for at least num+2 elements. Using - * this method is much faster than the 'other' NewellNormal() - */ -template <int ofs_x, int ofs_y, int ofs_z, typename TReal> -inline void NewellNormal (aiVector3t<TReal>& out, int num, TReal* x, TReal* y, TReal* z) -{ - // Duplicate the first two vertices at the end - x[(num+0)*ofs_x] = x[0]; - x[(num+1)*ofs_x] = x[ofs_x]; - - y[(num+0)*ofs_y] = y[0]; - y[(num+1)*ofs_y] = y[ofs_y]; - - z[(num+0)*ofs_z] = z[0]; - z[(num+1)*ofs_z] = z[ofs_z]; - - TReal sum_xy = 0.0, sum_yz = 0.0, sum_zx = 0.0; - - TReal *xptr = x +ofs_x, *xlow = x, *xhigh = x + ofs_x*2; - TReal *yptr = y +ofs_y, *ylow = y, *yhigh = y + ofs_y*2; - TReal *zptr = z +ofs_z, *zlow = z, *zhigh = z + ofs_z*2; - - for (int tmp=0; tmp < num; tmp++) { - sum_xy += (*xptr) * ( (*yhigh) - (*ylow) ); - sum_yz += (*yptr) * ( (*zhigh) - (*zlow) ); - sum_zx += (*zptr) * ( (*xhigh) - (*xlow) ); - - xptr += ofs_x; - xlow += ofs_x; - xhigh += ofs_x; - - yptr += ofs_y; - ylow += ofs_y; - yhigh += ofs_y; - - zptr += ofs_z; - zlow += ofs_z; - zhigh += ofs_z; - } - out = aiVector3t<TReal>(sum_yz,sum_zx,sum_xy); -} - -} // ! Assimp - -#endif diff --git a/thirdparty/assimp/code/Common/PostStepRegistry.cpp b/thirdparty/assimp/code/Common/PostStepRegistry.cpp deleted file mode 100644 index 8ff4af0400..0000000000 --- a/thirdparty/assimp/code/Common/PostStepRegistry.cpp +++ /dev/null @@ -1,265 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file ImporterRegistry.cpp - -Central registry for all postprocessing steps available. Do not edit this file -directly (unless you are adding new steps), instead use the -corresponding preprocessor flag to selectively disable steps. -*/ - -#include "PostProcessing/ProcessHelper.h" - -#ifndef ASSIMP_BUILD_NO_CALCTANGENTS_PROCESS -# include "PostProcessing/CalcTangentsProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_JOINVERTICES_PROCESS -# include "PostProcessing/JoinVerticesProcess.h" -#endif -#if !(defined ASSIMP_BUILD_NO_MAKELEFTHANDED_PROCESS && defined ASSIMP_BUILD_NO_FLIPUVS_PROCESS && defined ASSIMP_BUILD_NO_FLIPWINDINGORDER_PROCESS) -# include "PostProcessing/ConvertToLHProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_TRIANGULATE_PROCESS -# include "PostProcessing/TriangulateProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_DROPFACENORMALS_PROCESS -# include "PostProcessing/DropFaceNormalsProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_GENFACENORMALS_PROCESS -# include "PostProcessing/GenFaceNormalsProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_GENVERTEXNORMALS_PROCESS -# include "PostProcessing/GenVertexNormalsProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_REMOVEVC_PROCESS -# include "PostProcessing/RemoveVCProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_SPLITLARGEMESHES_PROCESS -# include "PostProcessing/SplitLargeMeshes.h" -#endif -#ifndef ASSIMP_BUILD_NO_PRETRANSFORMVERTICES_PROCESS -# include "PostProcessing/PretransformVertices.h" -#endif -#ifndef ASSIMP_BUILD_NO_LIMITBONEWEIGHTS_PROCESS -# include "PostProcessing/LimitBoneWeightsProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_VALIDATEDS_PROCESS -# include "PostProcessing/ValidateDataStructure.h" -#endif -#ifndef ASSIMP_BUILD_NO_IMPROVECACHELOCALITY_PROCESS -# include "PostProcessing/ImproveCacheLocality.h" -#endif -#ifndef ASSIMP_BUILD_NO_FIXINFACINGNORMALS_PROCESS -# include "PostProcessing/FixNormalsStep.h" -#endif -#ifndef ASSIMP_BUILD_NO_REMOVE_REDUNDANTMATERIALS_PROCESS -# include "PostProcessing/RemoveRedundantMaterials.h" -#endif -#if (!defined ASSIMP_BUILD_NO_EMBEDTEXTURES_PROCESS) -# include "PostProcessing/EmbedTexturesProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_FINDINVALIDDATA_PROCESS -# include "PostProcessing/FindInvalidDataProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_FINDDEGENERATES_PROCESS -# include "PostProcessing/FindDegenerates.h" -#endif -#ifndef ASSIMP_BUILD_NO_SORTBYPTYPE_PROCESS -# include "PostProcessing/SortByPTypeProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_GENUVCOORDS_PROCESS -# include "PostProcessing/ComputeUVMappingProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_TRANSFORMTEXCOORDS_PROCESS -# include "PostProcessing/TextureTransform.h" -#endif -#ifndef ASSIMP_BUILD_NO_FINDINSTANCES_PROCESS -# include "PostProcessing/FindInstancesProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_OPTIMIZEMESHES_PROCESS -# include "PostProcessing/OptimizeMeshes.h" -#endif -#ifndef ASSIMP_BUILD_NO_OPTIMIZEGRAPH_PROCESS -# include "PostProcessing/OptimizeGraph.h" -#endif -#ifndef ASSIMP_BUILD_NO_SPLITBYBONECOUNT_PROCESS -# include "Common/SplitByBoneCountProcess.h" -#endif -#ifndef ASSIMP_BUILD_NO_DEBONE_PROCESS -# include "PostProcessing/DeboneProcess.h" -#endif -#if (!defined ASSIMP_BUILD_NO_GLOBALSCALE_PROCESS) -# include "PostProcessing/ScaleProcess.h" -#endif -#if (!defined ASSIMP_BUILD_NO_ARMATUREPOPULATE_PROCESS) -# include "PostProcessing/ArmaturePopulate.h" -#endif -#if (!defined ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS) -# include "PostProcessing/GenBoundingBoxesProcess.h" -#endif - - - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -void GetPostProcessingStepInstanceList(std::vector< BaseProcess* >& out) -{ - // ---------------------------------------------------------------------------- - // Add an instance of each post processing step here in the order - // of sequence it is executed. Steps that are added here are not - // validated - as RegisterPPStep() does - all dependencies must be given. - // ---------------------------------------------------------------------------- - out.reserve(31); -#if (!defined ASSIMP_BUILD_NO_MAKELEFTHANDED_PROCESS) - out.push_back( new MakeLeftHandedProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FLIPUVS_PROCESS) - out.push_back( new FlipUVsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FLIPWINDINGORDER_PROCESS) - out.push_back( new FlipWindingOrderProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_REMOVEVC_PROCESS) - out.push_back( new RemoveVCProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_REMOVE_REDUNDANTMATERIALS_PROCESS) - out.push_back( new RemoveRedundantMatsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_EMBEDTEXTURES_PROCESS) - out.push_back( new EmbedTexturesProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FINDINSTANCES_PROCESS) - out.push_back( new FindInstancesProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_OPTIMIZEGRAPH_PROCESS) - out.push_back( new OptimizeGraphProcess()); -#endif -#ifndef ASSIMP_BUILD_NO_GENUVCOORDS_PROCESS - out.push_back( new ComputeUVMappingProcess()); -#endif -#ifndef ASSIMP_BUILD_NO_TRANSFORMTEXCOORDS_PROCESS - out.push_back( new TextureTransformStep()); -#endif -#if (!defined ASSIMP_BUILD_NO_GLOBALSCALE_PROCESS) - out.push_back( new ScaleProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_ARMATUREPOPULATE_PROCESS) - out.push_back( new ArmaturePopulate()); -#endif -#if (!defined ASSIMP_BUILD_NO_PRETRANSFORMVERTICES_PROCESS) - out.push_back( new PretransformVertices()); -#endif -#if (!defined ASSIMP_BUILD_NO_TRIANGULATE_PROCESS) - out.push_back( new TriangulateProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FINDDEGENERATES_PROCESS) - //find degenerates should run after triangulation (to sort out small - //generated triangles) but before sort by p types (in case there are lines - //and points generated and inserted into a mesh) - out.push_back( new FindDegeneratesProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_SORTBYPTYPE_PROCESS) - out.push_back( new SortByPTypeProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FINDINVALIDDATA_PROCESS) - out.push_back( new FindInvalidDataProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_OPTIMIZEMESHES_PROCESS) - out.push_back( new OptimizeMeshesProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_FIXINFACINGNORMALS_PROCESS) - out.push_back( new FixInfacingNormalsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_SPLITBYBONECOUNT_PROCESS) - out.push_back( new SplitByBoneCountProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_SPLITLARGEMESHES_PROCESS) - out.push_back( new SplitLargeMeshesProcess_Triangle()); -#endif -#if (!defined ASSIMP_BUILD_NO_GENFACENORMALS_PROCESS) - out.push_back( new DropFaceNormalsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_GENFACENORMALS_PROCESS) - out.push_back( new GenFaceNormalsProcess()); -#endif - // ......................................................................... - // DON'T change the order of these five .. - // XXX this is actually a design weakness that dates back to the time - // when Importer would maintain the postprocessing step list exclusively. - // Now that others access it too, we need a better solution. - out.push_back( new ComputeSpatialSortProcess()); - // ......................................................................... - -#if (!defined ASSIMP_BUILD_NO_GENVERTEXNORMALS_PROCESS) - out.push_back( new GenVertexNormalsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_CALCTANGENTS_PROCESS) - out.push_back( new CalcTangentsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_JOINVERTICES_PROCESS) - out.push_back( new JoinVerticesProcess()); -#endif - - // ......................................................................... - out.push_back( new DestroySpatialSortProcess()); - // ......................................................................... - -#if (!defined ASSIMP_BUILD_NO_SPLITLARGEMESHES_PROCESS) - out.push_back( new SplitLargeMeshesProcess_Vertex()); -#endif -#if (!defined ASSIMP_BUILD_NO_DEBONE_PROCESS) - out.push_back( new DeboneProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_LIMITBONEWEIGHTS_PROCESS) - out.push_back( new LimitBoneWeightsProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_IMPROVECACHELOCALITY_PROCESS) - out.push_back( new ImproveCacheLocalityProcess()); -#endif -#if (!defined ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS) - out.push_back(new GenBoundingBoxesProcess); -#endif -} - -} diff --git a/thirdparty/assimp/code/Common/RemoveComments.cpp b/thirdparty/assimp/code/Common/RemoveComments.cpp deleted file mode 100644 index 91700a7699..0000000000 --- a/thirdparty/assimp/code/Common/RemoveComments.cpp +++ /dev/null @@ -1,113 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file RemoveComments.cpp - * @brief Defines the CommentRemover utility class - */ - -#include <assimp/RemoveComments.h> -#include <assimp/ParsingUtils.h> - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -// Remove line comments from a file -void CommentRemover::RemoveLineComments(const char* szComment, - char* szBuffer, char chReplacement /* = ' ' */) -{ - // validate parameters - ai_assert(NULL != szComment && NULL != szBuffer && *szComment); - - const size_t len = strlen(szComment); - while (*szBuffer) { - - // skip over quotes - if (*szBuffer == '\"' || *szBuffer == '\'') - while (*szBuffer++ && *szBuffer != '\"' && *szBuffer != '\''); - - if (!strncmp(szBuffer,szComment,len)) { - while (!IsLineEnd(*szBuffer)) - *szBuffer++ = chReplacement; - - if (!*szBuffer) { - break; - } - } - ++szBuffer; - } -} - -// ------------------------------------------------------------------------------------------------ -// Remove multi-line comments from a file -void CommentRemover::RemoveMultiLineComments(const char* szCommentStart, - const char* szCommentEnd,char* szBuffer, - char chReplacement) -{ - // validate parameters - ai_assert(NULL != szCommentStart && NULL != szCommentEnd && - NULL != szBuffer && *szCommentStart && *szCommentEnd); - - const size_t len = strlen(szCommentEnd); - const size_t len2 = strlen(szCommentStart); - - while (*szBuffer) { - // skip over quotes - if (*szBuffer == '\"' || *szBuffer == '\'') - while (*szBuffer++ && *szBuffer != '\"' && *szBuffer != '\''); - - if (!strncmp(szBuffer,szCommentStart,len2)) { - while (*szBuffer) { - if (!::strncmp(szBuffer,szCommentEnd,len)) { - for (unsigned int i = 0; i < len;++i) - *szBuffer++ = chReplacement; - - break; - } - *szBuffer++ = chReplacement; - } - continue; - } - ++szBuffer; - } -} - -} // !! Assimp diff --git a/thirdparty/assimp/code/Common/SGSpatialSort.cpp b/thirdparty/assimp/code/Common/SGSpatialSort.cpp deleted file mode 100644 index 120070b0aa..0000000000 --- a/thirdparty/assimp/code/Common/SGSpatialSort.cpp +++ /dev/null @@ -1,168 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the helper class to quickly find -vertices close to a given position. Special implementation for -the 3ds loader handling smooth groups correctly */ - -#include <assimp/SGSpatialSort.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -SGSpatialSort::SGSpatialSort() -{ - // define the reference plane. We choose some arbitrary vector away from all basic axises - // in the hope that no model spreads all its vertices along this plane. - mPlaneNormal.Set( 0.8523f, 0.34321f, 0.5736f); - mPlaneNormal.Normalize(); -} -// ------------------------------------------------------------------------------------------------ -// Destructor -SGSpatialSort::~SGSpatialSort() -{ - // nothing to do here, everything destructs automatically -} -// ------------------------------------------------------------------------------------------------ -void SGSpatialSort::Add(const aiVector3D& vPosition, unsigned int index, - unsigned int smoothingGroup) -{ - // store position by index and distance - float distance = vPosition * mPlaneNormal; - mPositions.push_back( Entry( index, vPosition, - distance, smoothingGroup)); -} -// ------------------------------------------------------------------------------------------------ -void SGSpatialSort::Prepare() -{ - // now sort the array ascending by distance. - std::sort( this->mPositions.begin(), this->mPositions.end()); -} -// ------------------------------------------------------------------------------------------------ -// Returns an iterator for all positions close to the given position. -void SGSpatialSort::FindPositions( const aiVector3D& pPosition, - uint32_t pSG, - float pRadius, - std::vector<unsigned int>& poResults, - bool exactMatch /*= false*/) const -{ - float dist = pPosition * mPlaneNormal; - float minDist = dist - pRadius, maxDist = dist + pRadius; - - // clear the array - poResults.clear(); - - // quick check for positions outside the range - if( mPositions.empty() ) - return; - if( maxDist < mPositions.front().mDistance) - return; - if( minDist > mPositions.back().mDistance) - return; - - // do a binary search for the minimal distance to start the iteration there - unsigned int index = (unsigned int)mPositions.size() / 2; - unsigned int binaryStepSize = (unsigned int)mPositions.size() / 4; - while( binaryStepSize > 1) - { - if( mPositions[index].mDistance < minDist) - index += binaryStepSize; - else - index -= binaryStepSize; - - binaryStepSize /= 2; - } - - // depending on the direction of the last step we need to single step a bit back or forth - // to find the actual beginning element of the range - while( index > 0 && mPositions[index].mDistance > minDist) - index--; - while( index < (mPositions.size() - 1) && mPositions[index].mDistance < minDist) - index++; - - // Mow start iterating from there until the first position lays outside of the distance range. - // Add all positions inside the distance range within the given radius to the result aray - - float squareEpsilon = pRadius * pRadius; - std::vector<Entry>::const_iterator it = mPositions.begin() + index; - std::vector<Entry>::const_iterator end = mPositions.end(); - - if (exactMatch) - { - while( it->mDistance < maxDist) - { - if((it->mPosition - pPosition).SquareLength() < squareEpsilon && it->mSmoothGroups == pSG) - { - poResults.push_back( it->mIndex); - } - ++it; - if( end == it )break; - } - } - else - { - // if the given smoothing group is 0, we'll return all surrounding vertices - if (!pSG) - { - while( it->mDistance < maxDist) - { - if((it->mPosition - pPosition).SquareLength() < squareEpsilon) - poResults.push_back( it->mIndex); - ++it; - if( end == it)break; - } - } - else while( it->mDistance < maxDist) - { - if((it->mPosition - pPosition).SquareLength() < squareEpsilon && - (it->mSmoothGroups & pSG || !it->mSmoothGroups)) - { - poResults.push_back( it->mIndex); - } - ++it; - if( end == it)break; - } - } -} - - diff --git a/thirdparty/assimp/code/Common/SceneCombiner.cpp b/thirdparty/assimp/code/Common/SceneCombiner.cpp deleted file mode 100644 index f7b13cc951..0000000000 --- a/thirdparty/assimp/code/Common/SceneCombiner.cpp +++ /dev/null @@ -1,1350 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -// TODO: refactor entire file to get rid of the "flat-copy" first approach -// to copying structures. This easily breaks in the most unintuitive way -// possible as new fields are added to assimp structures. - -// ---------------------------------------------------------------------------- -/** - * @file Implements Assimp::SceneCombiner. This is a smart utility - * class that combines multiple scenes, meshes, ... into one. Currently - * these utilities are used by the IRR and LWS loaders and the - * OptimizeGraph step. - */ -// ---------------------------------------------------------------------------- -#include <assimp/SceneCombiner.h> -#include <assimp/StringUtils.h> -#include <assimp/fast_atof.h> -#include <assimp/metadata.h> -#include <assimp/Hash.h> -#include "time.h" -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> -#include <assimp/mesh.h> -#include <stdio.h> -#include "ScenePrivate.h" - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -// Add a prefix to a string -inline -void PrefixString(aiString& string,const char* prefix, unsigned int len) { - // If the string is already prefixed, we won't prefix it a second time - if (string.length >= 1 && string.data[0] == '$') - return; - - if (len+string.length>=MAXLEN-1) { - ASSIMP_LOG_DEBUG("Can't add an unique prefix because the string is too long"); - ai_assert(false); - return; - } - - // Add the prefix - ::memmove(string.data+len,string.data,string.length+1); - ::memcpy (string.data, prefix, len); - - // And update the string's length - string.length += len; -} - -// ------------------------------------------------------------------------------------------------ -// Add node identifiers to a hashing set -void SceneCombiner::AddNodeHashes(aiNode* node, std::set<unsigned int>& hashes) { - // Add node name to hashing set if it is non-empty - empty nodes are allowed - // and they can't have any anims assigned so its absolutely safe to duplicate them. - if (node->mName.length) { - hashes.insert( SuperFastHash(node->mName.data, static_cast<uint32_t>(node->mName.length)) ); - } - - // Process all children recursively - for (unsigned int i = 0; i < node->mNumChildren;++i) - AddNodeHashes(node->mChildren[i],hashes); -} - -// ------------------------------------------------------------------------------------------------ -// Add a name prefix to all nodes in a hierarchy -void SceneCombiner::AddNodePrefixes(aiNode* node, const char* prefix, unsigned int len) { - ai_assert(NULL != prefix); - PrefixString(node->mName,prefix,len); - - // Process all children recursively - for ( unsigned int i = 0; i < node->mNumChildren; ++i ) { - AddNodePrefixes( node->mChildren[ i ], prefix, len ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Search for matching names -bool SceneCombiner::FindNameMatch(const aiString& name, std::vector<SceneHelper>& input, unsigned int cur) { - const unsigned int hash = SuperFastHash(name.data, static_cast<uint32_t>(name.length)); - - // Check whether we find a positive match in one of the given sets - for (unsigned int i = 0; i < input.size(); ++i) { - if (cur != i && input[i].hashes.find(hash) != input[i].hashes.end()) { - return true; - } - } - return false; -} - -// ------------------------------------------------------------------------------------------------ -// Add a name prefix to all nodes in a hierarchy if a hash match is found -void SceneCombiner::AddNodePrefixesChecked(aiNode* node, const char* prefix, unsigned int len, - std::vector<SceneHelper>& input, unsigned int cur) { - ai_assert(NULL != prefix); - const unsigned int hash = SuperFastHash(node->mName.data, static_cast<uint32_t>(node->mName.length)); - - // Check whether we find a positive match in one of the given sets - for (unsigned int i = 0; i < input.size(); ++i) { - if (cur != i && input[i].hashes.find(hash) != input[i].hashes.end()) { - PrefixString(node->mName,prefix,len); - break; - } - } - - // Process all children recursively - for (unsigned int i = 0; i < node->mNumChildren;++i) - AddNodePrefixesChecked(node->mChildren[i],prefix,len,input,cur); -} - -// ------------------------------------------------------------------------------------------------ -// Add an offset to all mesh indices in a node graph -void SceneCombiner::OffsetNodeMeshIndices (aiNode* node, unsigned int offset) { - for (unsigned int i = 0; i < node->mNumMeshes;++i) - node->mMeshes[i] += offset; - - for ( unsigned int i = 0; i < node->mNumChildren; ++i ) { - OffsetNodeMeshIndices( node->mChildren[ i ], offset ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Merges two scenes. Currently only used by the LWS loader. -void SceneCombiner::MergeScenes(aiScene** _dest,std::vector<aiScene*>& src, unsigned int flags) { - if ( nullptr == _dest ) { - return; - } - - // if _dest points to NULL allocate a new scene. Otherwise clear the old and reuse it - if (src.empty()) { - if (*_dest) { - (*_dest)->~aiScene(); - SceneCombiner::CopySceneFlat(_dest,src[0]); - } - else *_dest = src[0]; - return; - } - if (*_dest)(*_dest)->~aiScene(); - else *_dest = new aiScene(); - - // Create a dummy scene to serve as master for the others - aiScene* master = new aiScene(); - master->mRootNode = new aiNode(); - master->mRootNode->mName.Set("<MergeRoot>"); - - std::vector<AttachmentInfo> srcList (src.size()); - for (unsigned int i = 0; i < srcList.size();++i) { - srcList[i] = AttachmentInfo(src[i],master->mRootNode); - } - - // 'master' will be deleted afterwards - MergeScenes (_dest, master, srcList, flags); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::AttachToGraph (aiNode* attach, std::vector<NodeAttachmentInfo>& srcList) { - unsigned int cnt; - for ( cnt = 0; cnt < attach->mNumChildren; ++cnt ) { - AttachToGraph( attach->mChildren[ cnt ], srcList ); - } - - cnt = 0; - for (std::vector<NodeAttachmentInfo>::iterator it = srcList.begin(); - it != srcList.end(); ++it) - { - if ((*it).attachToNode == attach && !(*it).resolved) - ++cnt; - } - - if (cnt) { - aiNode** n = new aiNode*[cnt+attach->mNumChildren]; - if (attach->mNumChildren) { - ::memcpy(n,attach->mChildren,sizeof(void*)*attach->mNumChildren); - delete[] attach->mChildren; - } - attach->mChildren = n; - - n += attach->mNumChildren; - attach->mNumChildren += cnt; - - for (unsigned int i = 0; i < srcList.size();++i) { - NodeAttachmentInfo& att = srcList[i]; - if (att.attachToNode == attach && !att.resolved) { - *n = att.node; - (**n).mParent = attach; - ++n; - - // mark this attachment as resolved - att.resolved = true; - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::AttachToGraph ( aiScene* master, std::vector<NodeAttachmentInfo>& src) { - ai_assert(NULL != master); - AttachToGraph(master->mRootNode,src); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::MergeScenes(aiScene** _dest, aiScene* master, std::vector<AttachmentInfo>& srcList, unsigned int flags) { - if ( nullptr == _dest ) { - return; - } - - // if _dest points to NULL allocate a new scene. Otherwise clear the old and reuse it - if (srcList.empty()) { - if (*_dest) { - SceneCombiner::CopySceneFlat(_dest,master); - } - else *_dest = master; - return; - } - if (*_dest) { - (*_dest)->~aiScene(); - new (*_dest) aiScene(); - } - else *_dest = new aiScene(); - - aiScene* dest = *_dest; - - std::vector<SceneHelper> src (srcList.size()+1); - src[0].scene = master; - for (unsigned int i = 0; i < srcList.size();++i) { - src[i+1] = SceneHelper( srcList[i].scene ); - } - - // this helper array specifies which scenes are duplicates of others - std::vector<unsigned int> duplicates(src.size(),UINT_MAX); - - // this helper array is used as lookup table several times - std::vector<unsigned int> offset(src.size()); - - // Find duplicate scenes - for (unsigned int i = 0; i < src.size();++i) { - if (duplicates[i] != i && duplicates[i] != UINT_MAX) { - continue; - } - - duplicates[i] = i; - for ( unsigned int a = i+1; a < src.size(); ++a) { - if (src[i].scene == src[a].scene) { - duplicates[a] = i; - } - } - } - - // Generate unique names for all named stuff? - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES) - { -#if 0 - // Construct a proper random number generator - boost::mt19937 rng( ); - boost::uniform_int<> dist(1u,1 << 24u); - boost::variate_generator<boost::mt19937&, boost::uniform_int<> > rndGen(rng, dist); -#endif - for (unsigned int i = 1; i < src.size();++i) - { - //if (i != duplicates[i]) - //{ - // // duplicate scenes share the same UID - // ::strcpy( src[i].id, src[duplicates[i]].id ); - // src[i].idlen = src[duplicates[i]].idlen; - - // continue; - //} - - src[i].idlen = ai_snprintf(src[i].id, 32, "$%.6X$_",i); - - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - - // Compute hashes for all identifiers in this scene and store them - // in a sorted table (for convenience I'm using std::set). We hash - // just the node and animation channel names, all identifiers except - // the material names should be caught by doing this. - AddNodeHashes(src[i]->mRootNode,src[i].hashes); - - for (unsigned int a = 0; a < src[i]->mNumAnimations;++a) { - aiAnimation* anim = src[i]->mAnimations[a]; - src[i].hashes.insert(SuperFastHash(anim->mName.data,static_cast<uint32_t>(anim->mName.length))); - } - } - } - } - - unsigned int cnt; - - // First find out how large the respective output arrays must be - for ( unsigned int n = 0; n < src.size();++n ) - { - SceneHelper* cur = &src[n]; - - if (n == duplicates[n] || flags & AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY) { - dest->mNumTextures += (*cur)->mNumTextures; - dest->mNumMaterials += (*cur)->mNumMaterials; - dest->mNumMeshes += (*cur)->mNumMeshes; - } - - dest->mNumLights += (*cur)->mNumLights; - dest->mNumCameras += (*cur)->mNumCameras; - dest->mNumAnimations += (*cur)->mNumAnimations; - - // Combine the flags of all scenes - // We need to process them flag-by-flag here to get correct results - // dest->mFlags ; //|= (*cur)->mFlags; - if ((*cur)->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) { - dest->mFlags |= AI_SCENE_FLAGS_NON_VERBOSE_FORMAT; - } - } - - // generate the output texture list + an offset table for all texture indices - if (dest->mNumTextures) - { - aiTexture** pip = dest->mTextures = new aiTexture*[dest->mNumMaterials]; - cnt = 0; - for ( unsigned int n = 0; n < src.size();++n ) - { - SceneHelper* cur = &src[n]; - for (unsigned int i = 0; i < (*cur)->mNumTextures;++i) - { - if (n != duplicates[n]) - { - if ( flags & AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY) - Copy(pip,(*cur)->mTextures[i]); - - else continue; - } - else *pip = (*cur)->mTextures[i]; - ++pip; - } - - offset[n] = cnt; - cnt = (unsigned int)(pip - dest->mTextures); - } - } - - // generate the output material list + an offset table for all material indices - if (dest->mNumMaterials) - { - aiMaterial** pip = dest->mMaterials = new aiMaterial*[dest->mNumMaterials]; - cnt = 0; - for ( unsigned int n = 0; n < src.size();++n ) { - SceneHelper* cur = &src[n]; - for (unsigned int i = 0; i < (*cur)->mNumMaterials;++i) - { - if (n != duplicates[n]) - { - if ( flags & AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY) - Copy(pip,(*cur)->mMaterials[i]); - - else continue; - } - else *pip = (*cur)->mMaterials[i]; - - if ((*cur)->mNumTextures != dest->mNumTextures) { - // We need to update all texture indices of the mesh. So we need to search for - // a material property called '$tex.file' - - for (unsigned int a = 0; a < (*pip)->mNumProperties;++a) - { - aiMaterialProperty* prop = (*pip)->mProperties[a]; - if (!strncmp(prop->mKey.data,"$tex.file",9)) - { - // Check whether this texture is an embedded texture. - // In this case the property looks like this: *<n>, - // where n is the index of the texture. - aiString& s = *((aiString*)prop->mData); - if ('*' == s.data[0]) { - // Offset the index and write it back .. - const unsigned int idx = strtoul10(&s.data[1]) + offset[n]; - ASSIMP_itoa10(&s.data[1],sizeof(s.data)-1,idx); - } - } - - // Need to generate new, unique material names? - else if (!::strcmp( prop->mKey.data,"$mat.name" ) && flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_MATNAMES) - { - aiString* pcSrc = (aiString*) prop->mData; - PrefixString(*pcSrc, (*cur).id, (*cur).idlen); - } - } - } - ++pip; - } - - offset[n] = cnt; - cnt = (unsigned int)(pip - dest->mMaterials); - } - } - - // generate the output mesh list + again an offset table for all mesh indices - if (dest->mNumMeshes) - { - aiMesh** pip = dest->mMeshes = new aiMesh*[dest->mNumMeshes]; - cnt = 0; - for ( unsigned int n = 0; n < src.size();++n ) - { - SceneHelper* cur = &src[n]; - for (unsigned int i = 0; i < (*cur)->mNumMeshes;++i) - { - if (n != duplicates[n]) { - if ( flags & AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY) - Copy(pip, (*cur)->mMeshes[i]); - - else continue; - } - else *pip = (*cur)->mMeshes[i]; - - // update the material index of the mesh - (*pip)->mMaterialIndex += offset[n]; - ++pip; - } - - // reuse the offset array - store now the mesh offset in it - offset[n] = cnt; - cnt = (unsigned int)(pip - dest->mMeshes); - } - } - - std::vector <NodeAttachmentInfo> nodes; - nodes.reserve(srcList.size()); - - // ---------------------------------------------------------------------------- - // Now generate the output node graph. We need to make those - // names in the graph that are referenced by anims or lights - // or cameras unique. So we add a prefix to them ... $<rand>_ - // We could also use a counter, but using a random value allows us to - // use just one prefix if we are joining multiple scene hierarchies recursively. - // Chances are quite good we don't collide, so we try that ... - // ---------------------------------------------------------------------------- - - // Allocate space for light sources, cameras and animations - aiLight** ppLights = dest->mLights = (dest->mNumLights - ? new aiLight*[dest->mNumLights] : NULL); - - aiCamera** ppCameras = dest->mCameras = (dest->mNumCameras - ? new aiCamera*[dest->mNumCameras] : NULL); - - aiAnimation** ppAnims = dest->mAnimations = (dest->mNumAnimations - ? new aiAnimation*[dest->mNumAnimations] : NULL); - - for ( int n = static_cast<int>(src.size()-1); n >= 0 ;--n ) /* !!! important !!! */ - { - SceneHelper* cur = &src[n]; - aiNode* node; - - // To offset or not to offset, this is the question - if (n != (int)duplicates[n]) - { - // Get full scene-graph copy - Copy( &node, (*cur)->mRootNode ); - OffsetNodeMeshIndices(node,offset[duplicates[n]]); - - if (flags & AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY) { - // (note:) they are already 'offseted' by offset[duplicates[n]] - OffsetNodeMeshIndices(node,offset[n] - offset[duplicates[n]]); - } - } - else // if (n == duplicates[n]) - { - node = (*cur)->mRootNode; - OffsetNodeMeshIndices(node,offset[n]); - } - if (n) // src[0] is the master node - nodes.push_back(NodeAttachmentInfo( node,srcList[n-1].attachToNode,n )); - - // add name prefixes? - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES) { - - // or the whole scenegraph - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - AddNodePrefixesChecked(node,(*cur).id,(*cur).idlen,src,n); - } - else AddNodePrefixes(node,(*cur).id,(*cur).idlen); - - // meshes - for (unsigned int i = 0; i < (*cur)->mNumMeshes;++i) { - aiMesh* mesh = (*cur)->mMeshes[i]; - - // rename all bones - for (unsigned int a = 0; a < mesh->mNumBones;++a) { - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - if (!FindNameMatch(mesh->mBones[a]->mName,src,n)) - continue; - } - PrefixString(mesh->mBones[a]->mName,(*cur).id,(*cur).idlen); - } - } - } - - // -------------------------------------------------------------------- - // Copy light sources - for (unsigned int i = 0; i < (*cur)->mNumLights;++i,++ppLights) - { - if (n != (int)duplicates[n]) // duplicate scene? - { - Copy(ppLights, (*cur)->mLights[i]); - } - else *ppLights = (*cur)->mLights[i]; - - - // Add name prefixes? - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES) { - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - if (!FindNameMatch((*ppLights)->mName,src,n)) - continue; - } - - PrefixString((*ppLights)->mName,(*cur).id,(*cur).idlen); - } - } - - // -------------------------------------------------------------------- - // Copy cameras - for (unsigned int i = 0; i < (*cur)->mNumCameras;++i,++ppCameras) { - if (n != (int)duplicates[n]) // duplicate scene? - { - Copy(ppCameras, (*cur)->mCameras[i]); - } - else *ppCameras = (*cur)->mCameras[i]; - - // Add name prefixes? - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES) { - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - if (!FindNameMatch((*ppCameras)->mName,src,n)) - continue; - } - - PrefixString((*ppCameras)->mName,(*cur).id,(*cur).idlen); - } - } - - // -------------------------------------------------------------------- - // Copy animations - for (unsigned int i = 0; i < (*cur)->mNumAnimations;++i,++ppAnims) { - if (n != (int)duplicates[n]) // duplicate scene? - { - Copy(ppAnims, (*cur)->mAnimations[i]); - } - else *ppAnims = (*cur)->mAnimations[i]; - - // Add name prefixes? - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES) { - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - if (!FindNameMatch((*ppAnims)->mName,src,n)) - continue; - } - - PrefixString((*ppAnims)->mName,(*cur).id,(*cur).idlen); - - // don't forget to update all node animation channels - for (unsigned int a = 0; a < (*ppAnims)->mNumChannels;++a) { - if (flags & AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY) { - if (!FindNameMatch((*ppAnims)->mChannels[a]->mNodeName,src,n)) - continue; - } - - PrefixString((*ppAnims)->mChannels[a]->mNodeName,(*cur).id,(*cur).idlen); - } - } - } - } - - // Now build the output graph - AttachToGraph ( master, nodes); - dest->mRootNode = master->mRootNode; - - // Check whether we succeeded at building the output graph - for (std::vector <NodeAttachmentInfo> ::iterator it = nodes.begin(); - it != nodes.end(); ++it) - { - if (!(*it).resolved) { - if (flags & AI_INT_MERGE_SCENE_RESOLVE_CROSS_ATTACHMENTS) { - // search for this attachment point in all other imported scenes, too. - for ( unsigned int n = 0; n < src.size();++n ) { - if (n != (*it).src_idx) { - AttachToGraph(src[n].scene,nodes); - if ((*it).resolved) - break; - } - } - } - if (!(*it).resolved) { - ASSIMP_LOG_ERROR_F( "SceneCombiner: Failed to resolve attachment ", (*it).node->mName.data, - " ", (*it).attachToNode->mName.data ); - } - } - } - - // now delete all input scenes. Make sure duplicate scenes aren't - // deleted more than one time - for ( unsigned int n = 0; n < src.size();++n ) { - if (n != duplicates[n]) // duplicate scene? - continue; - - aiScene* deleteMe = src[n].scene; - - // We need to delete the arrays before the destructor is called - - // we are reusing the array members - delete[] deleteMe->mMeshes; deleteMe->mMeshes = NULL; - delete[] deleteMe->mCameras; deleteMe->mCameras = NULL; - delete[] deleteMe->mLights; deleteMe->mLights = NULL; - delete[] deleteMe->mMaterials; deleteMe->mMaterials = NULL; - delete[] deleteMe->mAnimations; deleteMe->mAnimations = NULL; - - deleteMe->mRootNode = NULL; - - // Now we can safely delete the scene - delete deleteMe; - } - - // Check flags - if (!dest->mNumMeshes || !dest->mNumMaterials) { - dest->mFlags |= AI_SCENE_FLAGS_INCOMPLETE; - } - - // We're finished -} - -// ------------------------------------------------------------------------------------------------ -// Build a list of unique bones -void SceneCombiner::BuildUniqueBoneList(std::list<BoneWithHash>& asBones, - std::vector<aiMesh*>::const_iterator it, - std::vector<aiMesh*>::const_iterator end) -{ - unsigned int iOffset = 0; - for (; it != end;++it) { - for (unsigned int l = 0; l < (*it)->mNumBones;++l) { - aiBone* p = (*it)->mBones[l]; - uint32_t itml = SuperFastHash(p->mName.data,(unsigned int)p->mName.length); - - std::list<BoneWithHash>::iterator it2 = asBones.begin(); - std::list<BoneWithHash>::iterator end2 = asBones.end(); - - for (;it2 != end2;++it2) { - if ((*it2).first == itml) { - (*it2).pSrcBones.push_back(BoneSrcIndex(p,iOffset)); - break; - } - } - if (end2 == it2) { - // need to begin a new bone entry - asBones.push_back(BoneWithHash()); - BoneWithHash& btz = asBones.back(); - - // setup members - btz.first = itml; - btz.second = &p->mName; - btz.pSrcBones.push_back(BoneSrcIndex(p,iOffset)); - } - } - iOffset += (*it)->mNumVertices; - } -} - -// ------------------------------------------------------------------------------------------------ -// Merge a list of bones -void SceneCombiner::MergeBones(aiMesh* out,std::vector<aiMesh*>::const_iterator it, - std::vector<aiMesh*>::const_iterator end) -{ - if ( nullptr == out || out->mNumBones == 0 ) { - return; - } - - // find we need to build an unique list of all bones. - // we work with hashes to make the comparisons MUCH faster, - // at least if we have many bones. - std::list<BoneWithHash> asBones; - BuildUniqueBoneList( asBones, it, end ); - - // now create the output bones - out->mNumBones = 0; - out->mBones = new aiBone*[asBones.size()]; - - for (std::list<BoneWithHash>::const_iterator boneIt = asBones.begin(),boneEnd = asBones.end(); boneIt != boneEnd; ++boneIt ) { - // Allocate a bone and setup it's name - aiBone* pc = out->mBones[out->mNumBones++] = new aiBone(); - pc->mName = aiString( *( boneIt->second )); - - std::vector< BoneSrcIndex >::const_iterator wend = boneIt->pSrcBones.end(); - - // Loop through all bones to be joined for this bone - for (std::vector< BoneSrcIndex >::const_iterator wmit = boneIt->pSrcBones.begin(); wmit != wend; ++wmit) { - pc->mNumWeights += (*wmit).first->mNumWeights; - - // NOTE: different offset matrices for bones with equal names - // are - at the moment - not handled correctly. - if (wmit != boneIt->pSrcBones.begin() && pc->mOffsetMatrix != wmit->first->mOffsetMatrix) { - ASSIMP_LOG_WARN("Bones with equal names but different offset matrices can't be joined at the moment"); - continue; - } - pc->mOffsetMatrix = wmit->first->mOffsetMatrix; - } - - // Allocate the vertex weight array - aiVertexWeight* avw = pc->mWeights = new aiVertexWeight[pc->mNumWeights]; - - // And copy the final weights - adjust the vertex IDs by the - // face index offset of the corresponding mesh. - for (std::vector< BoneSrcIndex >::const_iterator wmit = (*boneIt).pSrcBones.begin(); wmit != (*boneIt).pSrcBones.end(); ++wmit) { - if (wmit == wend) { - break; - } - - aiBone* pip = (*wmit).first; - for (unsigned int mp = 0; mp < pip->mNumWeights;++mp,++avw) { - const aiVertexWeight& vfi = pip->mWeights[mp]; - avw->mWeight = vfi.mWeight; - avw->mVertexId = vfi.mVertexId + (*wmit).second; - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Merge a list of meshes -void SceneCombiner::MergeMeshes(aiMesh** _out, unsigned int /*flags*/, - std::vector<aiMesh*>::const_iterator begin, - std::vector<aiMesh*>::const_iterator end) -{ - if ( nullptr == _out ) { - return; - } - - if (begin == end) { - *_out = NULL; // no meshes ... - return; - } - - // Allocate the output mesh - aiMesh* out = *_out = new aiMesh(); - out->mMaterialIndex = (*begin)->mMaterialIndex; - - std::string name; - // Find out how much output storage we'll need - for (std::vector<aiMesh*>::const_iterator it = begin; it != end; ++it) { - const char *meshName( (*it)->mName.C_Str() ); - name += std::string( meshName ); - if ( it != end - 1 ) { - name += "."; - } - out->mNumVertices += (*it)->mNumVertices; - out->mNumFaces += (*it)->mNumFaces; - out->mNumBones += (*it)->mNumBones; - - // combine primitive type flags - out->mPrimitiveTypes |= (*it)->mPrimitiveTypes; - } - out->mName.Set( name.c_str() ); - - if (out->mNumVertices) { - aiVector3D* pv2; - - // copy vertex positions - if ((**begin).HasPositions()) { - - pv2 = out->mVertices = new aiVector3D[out->mNumVertices]; - for (std::vector<aiMesh*>::const_iterator it = begin; it != end; ++it) { - if ((*it)->mVertices) { - ::memcpy(pv2,(*it)->mVertices,(*it)->mNumVertices*sizeof(aiVector3D)); - } - else ASSIMP_LOG_WARN("JoinMeshes: Positions expected but input mesh contains no positions"); - pv2 += (*it)->mNumVertices; - } - } - // copy normals - if ((**begin).HasNormals()) { - - pv2 = out->mNormals = new aiVector3D[out->mNumVertices]; - for (std::vector<aiMesh*>::const_iterator it = begin; it != end;++it) { - if ((*it)->mNormals) { - ::memcpy(pv2,(*it)->mNormals,(*it)->mNumVertices*sizeof(aiVector3D)); - } else { - ASSIMP_LOG_WARN( "JoinMeshes: Normals expected but input mesh contains no normals" ); - } - pv2 += (*it)->mNumVertices; - } - } - // copy tangents and bi-tangents - if ((**begin).HasTangentsAndBitangents()) { - - pv2 = out->mTangents = new aiVector3D[out->mNumVertices]; - aiVector3D* pv2b = out->mBitangents = new aiVector3D[out->mNumVertices]; - - for (std::vector<aiMesh*>::const_iterator it = begin; it != end;++it) { - if ((*it)->mTangents) { - ::memcpy(pv2, (*it)->mTangents, (*it)->mNumVertices*sizeof(aiVector3D)); - ::memcpy(pv2b,(*it)->mBitangents,(*it)->mNumVertices*sizeof(aiVector3D)); - } else { - ASSIMP_LOG_WARN( "JoinMeshes: Tangents expected but input mesh contains no tangents" ); - } - pv2 += (*it)->mNumVertices; - pv2b += (*it)->mNumVertices; - } - } - // copy texture coordinates - unsigned int n = 0; - while ((**begin).HasTextureCoords(n)) { - out->mNumUVComponents[n] = (*begin)->mNumUVComponents[n]; - - pv2 = out->mTextureCoords[n] = new aiVector3D[out->mNumVertices]; - for (std::vector<aiMesh*>::const_iterator it = begin; it != end;++it) { - if ((*it)->mTextureCoords[n]) { - ::memcpy(pv2,(*it)->mTextureCoords[n],(*it)->mNumVertices*sizeof(aiVector3D)); - } else { - ASSIMP_LOG_WARN( "JoinMeshes: UVs expected but input mesh contains no UVs" ); - } - pv2 += (*it)->mNumVertices; - } - ++n; - } - // copy vertex colors - n = 0; - while ((**begin).HasVertexColors(n)) { - aiColor4D *pVec2 = out->mColors[n] = new aiColor4D[out->mNumVertices]; - for ( std::vector<aiMesh*>::const_iterator it = begin; it != end; ++it ) { - if ((*it)->mColors[n]) { - ::memcpy( pVec2, (*it)->mColors[ n ], (*it)->mNumVertices * sizeof( aiColor4D ) ) ; - } else { - ASSIMP_LOG_WARN( "JoinMeshes: VCs expected but input mesh contains no VCs" ); - } - pVec2 += (*it)->mNumVertices; - } - ++n; - } - } - - if (out->mNumFaces) // just for safety - { - // copy faces - out->mFaces = new aiFace[out->mNumFaces]; - aiFace* pf2 = out->mFaces; - - unsigned int ofs = 0; - for (std::vector<aiMesh*>::const_iterator it = begin; it != end;++it) { - for (unsigned int m = 0; m < (*it)->mNumFaces;++m,++pf2) { - aiFace& face = (*it)->mFaces[m]; - pf2->mNumIndices = face.mNumIndices; - pf2->mIndices = face.mIndices; - - if (ofs) { - // add the offset to the vertex - for (unsigned int q = 0; q < face.mNumIndices; ++q) - face.mIndices[q] += ofs; - } - face.mIndices = NULL; - } - ofs += (*it)->mNumVertices; - } - } - - // bones - as this is quite lengthy, I moved the code to a separate function - if (out->mNumBones) - MergeBones(out,begin,end); - - // delete all source meshes - for (std::vector<aiMesh*>::const_iterator it = begin; it != end;++it) - delete *it; -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::MergeMaterials(aiMaterial** dest, - std::vector<aiMaterial*>::const_iterator begin, - std::vector<aiMaterial*>::const_iterator end) -{ - if ( nullptr == dest ) { - return; - } - - if (begin == end) { - *dest = NULL; // no materials ... - return; - } - - // Allocate the output material - aiMaterial* out = *dest = new aiMaterial(); - - // Get the maximal number of properties - unsigned int size = 0; - for (std::vector<aiMaterial*>::const_iterator it = begin; it != end; ++it) { - size += (*it)->mNumProperties; - } - - out->Clear(); - delete[] out->mProperties; - - out->mNumAllocated = size; - out->mNumProperties = 0; - out->mProperties = new aiMaterialProperty*[out->mNumAllocated]; - - for (std::vector<aiMaterial*>::const_iterator it = begin; it != end; ++it) { - for(unsigned int i = 0; i < (*it)->mNumProperties; ++i) { - aiMaterialProperty* sprop = (*it)->mProperties[i]; - - // Test if we already have a matching property - const aiMaterialProperty* prop_exist; - if(aiGetMaterialProperty(out, sprop->mKey.C_Str(), sprop->mSemantic, sprop->mIndex, &prop_exist) != AI_SUCCESS) { - // If not, we add it to the new material - aiMaterialProperty* prop = out->mProperties[out->mNumProperties] = new aiMaterialProperty(); - - prop->mDataLength = sprop->mDataLength; - prop->mData = new char[prop->mDataLength]; - ::memcpy(prop->mData, sprop->mData, prop->mDataLength); - - prop->mIndex = sprop->mIndex; - prop->mSemantic = sprop->mSemantic; - prop->mKey = sprop->mKey; - prop->mType = sprop->mType; - - out->mNumProperties++; - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -template <typename Type> -inline -void CopyPtrArray (Type**& dest, const Type* const * src, ai_uint num) { - if (!num) { - dest = NULL; - return; - } - dest = new Type*[num]; - for (ai_uint i = 0; i < num;++i) { - SceneCombiner::Copy(&dest[i],src[i]); - } -} - -// ------------------------------------------------------------------------------------------------ -template <typename Type> -inline -void GetArrayCopy(Type*& dest, ai_uint num ) { - if ( !dest ) { - return; - } - Type* old = dest; - - dest = new Type[num]; - ::memcpy(dest, old, sizeof(Type) * num); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::CopySceneFlat(aiScene** _dest,const aiScene* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - // reuse the old scene or allocate a new? - if (*_dest) { - (*_dest)->~aiScene(); - new (*_dest) aiScene(); - } else { - *_dest = new aiScene(); - } - - ::memcpy(*_dest,src,sizeof(aiScene)); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::CopyScene(aiScene** _dest,const aiScene* src,bool allocate) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - if (allocate) { - *_dest = new aiScene(); - } - aiScene* dest = *_dest; - ai_assert(nullptr != dest); - - // copy metadata - if ( nullptr != src->mMetaData ) { - dest->mMetaData = new aiMetadata( *src->mMetaData ); - } - - // copy animations - dest->mNumAnimations = src->mNumAnimations; - CopyPtrArray(dest->mAnimations,src->mAnimations, - dest->mNumAnimations); - - // copy textures - dest->mNumTextures = src->mNumTextures; - CopyPtrArray(dest->mTextures,src->mTextures, - dest->mNumTextures); - - // copy materials - dest->mNumMaterials = src->mNumMaterials; - CopyPtrArray(dest->mMaterials,src->mMaterials, - dest->mNumMaterials); - - // copy lights - dest->mNumLights = src->mNumLights; - CopyPtrArray(dest->mLights,src->mLights, - dest->mNumLights); - - // copy cameras - dest->mNumCameras = src->mNumCameras; - CopyPtrArray(dest->mCameras,src->mCameras, - dest->mNumCameras); - - // copy meshes - dest->mNumMeshes = src->mNumMeshes; - CopyPtrArray(dest->mMeshes,src->mMeshes, - dest->mNumMeshes); - - // now - copy the root node of the scene (deep copy, too) - Copy( &dest->mRootNode, src->mRootNode); - - // and keep the flags ... - dest->mFlags = src->mFlags; - - // source private data might be NULL if the scene is user-allocated (i.e. for use with the export API) - if (dest->mPrivate != NULL) { - ScenePriv(dest)->mPPStepsApplied = ScenePriv(src) ? ScenePriv(src)->mPPStepsApplied : 0; - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy( aiMesh** _dest, const aiMesh* src ) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiMesh* dest = *_dest = new aiMesh(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiMesh)); - - // and reallocate all arrays - GetArrayCopy( dest->mVertices, dest->mNumVertices ); - GetArrayCopy( dest->mNormals , dest->mNumVertices ); - GetArrayCopy( dest->mTangents, dest->mNumVertices ); - GetArrayCopy( dest->mBitangents, dest->mNumVertices ); - - unsigned int n = 0; - while (dest->HasTextureCoords(n)) - GetArrayCopy( dest->mTextureCoords[n++], dest->mNumVertices ); - - n = 0; - while (dest->HasVertexColors(n)) - GetArrayCopy( dest->mColors[n++], dest->mNumVertices ); - - // make a deep copy of all bones - CopyPtrArray(dest->mBones,dest->mBones,dest->mNumBones); - - // make a deep copy of all faces - GetArrayCopy(dest->mFaces,dest->mNumFaces); - for (unsigned int i = 0; i < dest->mNumFaces;++i) { - aiFace& f = dest->mFaces[i]; - GetArrayCopy(f.mIndices,f.mNumIndices); - } - - // make a deep copy of all blend shapes - CopyPtrArray(dest->mAnimMeshes, dest->mAnimMeshes, dest->mNumAnimMeshes); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiAnimMesh** _dest, const aiAnimMesh* src) { - if (nullptr == _dest || nullptr == src) { - return; - } - - aiAnimMesh* dest = *_dest = new aiAnimMesh(); - - // get a flat copy - ::memcpy(dest, src, sizeof(aiAnimMesh)); - - // and reallocate all arrays - GetArrayCopy(dest->mVertices, dest->mNumVertices); - GetArrayCopy(dest->mNormals, dest->mNumVertices); - GetArrayCopy(dest->mTangents, dest->mNumVertices); - GetArrayCopy(dest->mBitangents, dest->mNumVertices); - - unsigned int n = 0; - while (dest->HasTextureCoords(n)) - GetArrayCopy(dest->mTextureCoords[n++], dest->mNumVertices); - - n = 0; - while (dest->HasVertexColors(n)) - GetArrayCopy(dest->mColors[n++], dest->mNumVertices); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy (aiMaterial** _dest, const aiMaterial* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiMaterial* dest = (aiMaterial*) ( *_dest = new aiMaterial() ); - - dest->Clear(); - delete[] dest->mProperties; - - dest->mNumAllocated = src->mNumAllocated; - dest->mNumProperties = src->mNumProperties; - dest->mProperties = new aiMaterialProperty* [dest->mNumAllocated]; - - for (unsigned int i = 0; i < dest->mNumProperties;++i) - { - aiMaterialProperty* prop = dest->mProperties[i] = new aiMaterialProperty(); - aiMaterialProperty* sprop = src->mProperties[i]; - - prop->mDataLength = sprop->mDataLength; - prop->mData = new char[prop->mDataLength]; - ::memcpy(prop->mData,sprop->mData,prop->mDataLength); - - prop->mIndex = sprop->mIndex; - prop->mSemantic = sprop->mSemantic; - prop->mKey = sprop->mKey; - prop->mType = sprop->mType; - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiTexture** _dest, const aiTexture* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiTexture* dest = *_dest = new aiTexture(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiTexture)); - - // and reallocate all arrays. We must do it manually here - const char* old = (const char*)dest->pcData; - if (old) - { - unsigned int cpy; - if (!dest->mHeight)cpy = dest->mWidth; - else cpy = dest->mHeight * dest->mWidth * sizeof(aiTexel); - - if (!cpy) - { - dest->pcData = NULL; - return; - } - // the cast is legal, the aiTexel c'tor does nothing important - dest->pcData = (aiTexel*) new char[cpy]; - ::memcpy(dest->pcData, old, cpy); - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy( aiAnimation** _dest, const aiAnimation* src ) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiAnimation* dest = *_dest = new aiAnimation(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiAnimation)); - - // and reallocate all arrays - CopyPtrArray( dest->mChannels, src->mChannels, dest->mNumChannels ); - CopyPtrArray( dest->mMorphMeshChannels, src->mMorphMeshChannels, dest->mNumMorphMeshChannels ); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiNodeAnim** _dest, const aiNodeAnim* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiNodeAnim* dest = *_dest = new aiNodeAnim(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiNodeAnim)); - - // and reallocate all arrays - GetArrayCopy( dest->mPositionKeys, dest->mNumPositionKeys ); - GetArrayCopy( dest->mScalingKeys, dest->mNumScalingKeys ); - GetArrayCopy( dest->mRotationKeys, dest->mNumRotationKeys ); -} - -void SceneCombiner::Copy(aiMeshMorphAnim** _dest, const aiMeshMorphAnim* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiMeshMorphAnim* dest = *_dest = new aiMeshMorphAnim(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiMeshMorphAnim)); - - // and reallocate all arrays - GetArrayCopy( dest->mKeys, dest->mNumKeys ); - for (ai_uint i = 0; i < dest->mNumKeys;++i) { - dest->mKeys[i].mValues = new unsigned int[dest->mKeys[i].mNumValuesAndWeights]; - dest->mKeys[i].mWeights = new double[dest->mKeys[i].mNumValuesAndWeights]; - ::memcpy(dest->mKeys[i].mValues, src->mKeys[i].mValues, dest->mKeys[i].mNumValuesAndWeights * sizeof(unsigned int)); - ::memcpy(dest->mKeys[i].mWeights, src->mKeys[i].mWeights, dest->mKeys[i].mNumValuesAndWeights * sizeof(double)); - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy( aiCamera** _dest,const aiCamera* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiCamera* dest = *_dest = new aiCamera(); - - // get a flat copy, that's already OK - ::memcpy(dest,src,sizeof(aiCamera)); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiLight** _dest, const aiLight* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiLight* dest = *_dest = new aiLight(); - - // get a flat copy, that's already OK - ::memcpy(dest,src,sizeof(aiLight)); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiBone** _dest, const aiBone* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - aiBone* dest = *_dest = new aiBone(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiBone)); - - // and reallocate all arrays - GetArrayCopy( dest->mWeights, dest->mNumWeights ); -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy (aiNode** _dest, const aiNode* src) -{ - ai_assert(NULL != _dest && NULL != src); - - aiNode* dest = *_dest = new aiNode(); - - // get a flat copy - ::memcpy(dest,src,sizeof(aiNode)); - - if (src->mMetaData) { - Copy(&dest->mMetaData, src->mMetaData); - } - - // and reallocate all arrays - GetArrayCopy( dest->mMeshes, dest->mNumMeshes ); - CopyPtrArray( dest->mChildren, src->mChildren,dest->mNumChildren); - - // need to set the mParent fields to the created aiNode. - for( unsigned int i = 0; i < dest->mNumChildren; i ++ ) { - dest->mChildren[i]->mParent = dest; - } -} - -// ------------------------------------------------------------------------------------------------ -void SceneCombiner::Copy(aiMetadata** _dest, const aiMetadata* src) { - if ( nullptr == _dest || nullptr == src ) { - return; - } - - if ( 0 == src->mNumProperties ) { - return; - } - - aiMetadata* dest = *_dest = aiMetadata::Alloc( src->mNumProperties ); - std::copy(src->mKeys, src->mKeys + src->mNumProperties, dest->mKeys); - - dest->mValues = new aiMetadataEntry[src->mNumProperties]; - for (unsigned int i = 0; i < src->mNumProperties; ++i) { - aiMetadataEntry& in = src->mValues[i]; - aiMetadataEntry& out = dest->mValues[i]; - out.mType = in.mType; - switch (dest->mValues[i].mType) { - case AI_BOOL: - out.mData = new bool(*static_cast<bool*>(in.mData)); - break; - case AI_INT32: - out.mData = new int32_t(*static_cast<int32_t*>(in.mData)); - break; - case AI_UINT64: - out.mData = new uint64_t(*static_cast<uint64_t*>(in.mData)); - break; - case AI_FLOAT: - out.mData = new float(*static_cast<float*>(in.mData)); - break; - case AI_DOUBLE: - out.mData = new double(*static_cast<double*>(in.mData)); - break; - case AI_AISTRING: - out.mData = new aiString(*static_cast<aiString*>(in.mData)); - break; - case AI_AIVECTOR3D: - out.mData = new aiVector3D(*static_cast<aiVector3D*>(in.mData)); - break; - default: - ai_assert(false); - break; - } - } -} - -} // Namespace Assimp - diff --git a/thirdparty/assimp/code/Common/ScenePreprocessor.cpp b/thirdparty/assimp/code/Common/ScenePreprocessor.cpp deleted file mode 100644 index 432a3d7666..0000000000 --- a/thirdparty/assimp/code/Common/ScenePreprocessor.cpp +++ /dev/null @@ -1,261 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#include "ScenePreprocessor.h" -#include <assimp/ai_assert.h> -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> - - -using namespace Assimp; - -// --------------------------------------------------------------------------------------------- -void ScenePreprocessor::ProcessScene () -{ - ai_assert(scene != NULL); - - // Process all meshes - for (unsigned int i = 0; i < scene->mNumMeshes;++i) - ProcessMesh(scene->mMeshes[i]); - - // - nothing to do for nodes for the moment - // - nothing to do for textures for the moment - // - nothing to do for lights for the moment - // - nothing to do for cameras for the moment - - // Process all animations - for (unsigned int i = 0; i < scene->mNumAnimations;++i) - ProcessAnimation(scene->mAnimations[i]); - - // Generate a default material if none was specified - if (!scene->mNumMaterials && scene->mNumMeshes) { - scene->mMaterials = new aiMaterial*[2]; - aiMaterial* helper; - - aiString name; - - scene->mMaterials[scene->mNumMaterials] = helper = new aiMaterial(); - aiColor3D clr(0.6f,0.6f,0.6f); - helper->AddProperty(&clr,1,AI_MATKEY_COLOR_DIFFUSE); - - // setup the default name to make this material identifiable - name.Set(AI_DEFAULT_MATERIAL_NAME); - helper->AddProperty(&name,AI_MATKEY_NAME); - - ASSIMP_LOG_DEBUG("ScenePreprocessor: Adding default material \'" AI_DEFAULT_MATERIAL_NAME "\'"); - - for (unsigned int i = 0; i < scene->mNumMeshes;++i) { - scene->mMeshes[i]->mMaterialIndex = scene->mNumMaterials; - } - - scene->mNumMaterials++; - } -} - -// --------------------------------------------------------------------------------------------- -void ScenePreprocessor::ProcessMesh (aiMesh* mesh) -{ - // If aiMesh::mNumUVComponents is *not* set assign the default value of 2 - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (!mesh->mTextureCoords[i]) { - mesh->mNumUVComponents[i] = 0; - } else { - if (!mesh->mNumUVComponents[i]) - mesh->mNumUVComponents[i] = 2; - - aiVector3D* p = mesh->mTextureCoords[i], *end = p+mesh->mNumVertices; - - // Ensure unused components are zeroed. This will make 1D texture channels work - // as if they were 2D channels .. just in case an application doesn't handle - // this case - if (2 == mesh->mNumUVComponents[i]) { - for (; p != end; ++p) - p->z = 0.f; - } - else if (1 == mesh->mNumUVComponents[i]) { - for (; p != end; ++p) - p->z = p->y = 0.f; - } - else if (3 == mesh->mNumUVComponents[i]) { - // Really 3D coordinates? Check whether the third coordinate is != 0 for at least one element - for (; p != end; ++p) { - if (p->z != 0) - break; - } - if (p == end) { - ASSIMP_LOG_WARN("ScenePreprocessor: UVs are declared to be 3D but they're obviously not. Reverting to 2D."); - mesh->mNumUVComponents[i] = 2; - } - } - } - } - - // If the information which primitive types are there in the - // mesh is currently not available, compute it. - if (!mesh->mPrimitiveTypes) { - for (unsigned int a = 0; a < mesh->mNumFaces; ++a) { - aiFace& face = mesh->mFaces[a]; - switch (face.mNumIndices) - { - case 3u: - mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - - case 2u: - mesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - - case 1u: - mesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - - default: - mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - break; - } - } - } - - // If tangents and normals are given but no bitangents compute them - if (mesh->mTangents && mesh->mNormals && !mesh->mBitangents) { - - mesh->mBitangents = new aiVector3D[mesh->mNumVertices]; - for (unsigned int i = 0; i < mesh->mNumVertices;++i) { - mesh->mBitangents[i] = mesh->mNormals[i] ^ mesh->mTangents[i]; - } - } -} - -// --------------------------------------------------------------------------------------------- -void ScenePreprocessor::ProcessAnimation (aiAnimation* anim) -{ - double first = 10e10, last = -10e10; - for (unsigned int i = 0; i < anim->mNumChannels;++i) { - aiNodeAnim* channel = anim->mChannels[i]; - - /* If the exact duration of the animation is not given - * compute it now. - */ - if (anim->mDuration == -1.) { - - // Position keys - for (unsigned int j = 0; j < channel->mNumPositionKeys;++j) { - aiVectorKey& key = channel->mPositionKeys[j]; - first = std::min (first, key.mTime); - last = std::max (last, key.mTime); - } - - // Scaling keys - for (unsigned int j = 0; j < channel->mNumScalingKeys;++j ) { - aiVectorKey& key = channel->mScalingKeys[j]; - first = std::min (first, key.mTime); - last = std::max (last, key.mTime); - } - - // Rotation keys - for (unsigned int j = 0; j < channel->mNumRotationKeys;++j ) { - aiQuatKey& key = channel->mRotationKeys[ j ]; - first = std::min (first, key.mTime); - last = std::max (last, key.mTime); - } - } - - /* Check whether the animation channel has no rotation - * or position tracks. In this case we generate a dummy - * track from the information we have in the transformation - * matrix of the corresponding node. - */ - if (!channel->mNumRotationKeys || !channel->mNumPositionKeys || !channel->mNumScalingKeys) { - // Find the node that belongs to this animation - aiNode* node = scene->mRootNode->FindNode(channel->mNodeName); - if (node) // ValidateDS will complain later if 'node' is NULL - { - // Decompose the transformation matrix of the node - aiVector3D scaling, position; - aiQuaternion rotation; - - node->mTransformation.Decompose(scaling, rotation,position); - - // No rotation keys? Generate a dummy track - if (!channel->mNumRotationKeys) { - channel->mNumRotationKeys = 1; - channel->mRotationKeys = new aiQuatKey[1]; - aiQuatKey& q = channel->mRotationKeys[0]; - - q.mTime = 0.; - q.mValue = rotation; - - ASSIMP_LOG_DEBUG("ScenePreprocessor: Dummy rotation track has been generated"); - } - - // No scaling keys? Generate a dummy track - if (!channel->mNumScalingKeys) { - channel->mNumScalingKeys = 1; - channel->mScalingKeys = new aiVectorKey[1]; - aiVectorKey& q = channel->mScalingKeys[0]; - - q.mTime = 0.; - q.mValue = scaling; - - ASSIMP_LOG_DEBUG("ScenePreprocessor: Dummy scaling track has been generated"); - } - - // No position keys? Generate a dummy track - if (!channel->mNumPositionKeys) { - channel->mNumPositionKeys = 1; - channel->mPositionKeys = new aiVectorKey[1]; - aiVectorKey& q = channel->mPositionKeys[0]; - - q.mTime = 0.; - q.mValue = position; - - ASSIMP_LOG_DEBUG("ScenePreprocessor: Dummy position track has been generated"); - } - } - } - } - - if (anim->mDuration == -1.) { - ASSIMP_LOG_DEBUG("ScenePreprocessor: Setting animation duration"); - anim->mDuration = last - std::min( first, 0. ); - } -} diff --git a/thirdparty/assimp/code/Common/ScenePreprocessor.h b/thirdparty/assimp/code/Common/ScenePreprocessor.h deleted file mode 100644 index 3f4c8d7c3f..0000000000 --- a/thirdparty/assimp/code/Common/ScenePreprocessor.h +++ /dev/null @@ -1,125 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to search all meshes for - degenerated faces */ -#ifndef AI_SCENE_PREPROCESSOR_H_INC -#define AI_SCENE_PREPROCESSOR_H_INC - -#include <assimp/defs.h> -#include <stddef.h> - -struct aiScene; -struct aiAnimation; -struct aiMesh; - -class ScenePreprocessorTest; -namespace Assimp { - -// ---------------------------------------------------------------------------------- -/** ScenePreprocessor: Preprocess a scene before any post-processing - * steps are executed. - * - * The step computes data that needn't necessarily be provided by the - * importer, such as aiMesh::mPrimitiveTypes. -*/ -// ---------------------------------------------------------------------------------- -class ASSIMP_API ScenePreprocessor -{ - // Make ourselves a friend of the corresponding test unit. - friend class ::ScenePreprocessorTest; -public: - - // ---------------------------------------------------------------- - /** Default c'tpr. Use SetScene() to assign a scene to the object. - */ - ScenePreprocessor() - : scene (NULL) - {} - - /** Constructs the object and assigns a specific scene to it - */ - ScenePreprocessor(aiScene* _scene) - : scene (_scene) - {} - - // ---------------------------------------------------------------- - /** Assign a (new) scene to the object. - * - * One 'SceneProcessor' can be used for multiple scenes. - * Call ProcessScene to have the scene preprocessed. - * @param sc Scene to be processed. - */ - void SetScene (aiScene* sc) { - scene = sc; - } - - // ---------------------------------------------------------------- - /** Preprocess the current scene - */ - void ProcessScene (); - -protected: - - // ---------------------------------------------------------------- - /** Preprocess an animation in the scene - * @param anim Anim to be preprocessed. - */ - void ProcessAnimation (aiAnimation* anim); - - - // ---------------------------------------------------------------- - /** Preprocess a mesh in the scene - * @param mesh Mesh to be preprocessed. - */ - void ProcessMesh (aiMesh* mesh); - -protected: - - //! Scene we're currently working on - aiScene* scene; -}; - - -} // ! end namespace Assimp - -#endif // include guard diff --git a/thirdparty/assimp/code/Common/ScenePrivate.h b/thirdparty/assimp/code/Common/ScenePrivate.h deleted file mode 100644 index f336aafc9a..0000000000 --- a/thirdparty/assimp/code/Common/ScenePrivate.h +++ /dev/null @@ -1,105 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Stuff to deal with aiScene::mPrivate - */ -#pragma once -#ifndef AI_SCENEPRIVATE_H_INCLUDED -#define AI_SCENEPRIVATE_H_INCLUDED - -#include <assimp/ai_assert.h> -#include <assimp/scene.h> - -namespace Assimp { - -// Forward declarations -class Importer; - -struct ScenePrivateData { - // The struct constructor. - ScenePrivateData() AI_NO_EXCEPT; - - // Importer that originally loaded the scene though the C-API - // If set, this object is owned by this private data instance. - Assimp::Importer* mOrigImporter; - - // List of post-processing steps already applied to the scene. - unsigned int mPPStepsApplied; - - // true if the scene is a copy made with aiCopyScene() - // or the corresponding C++ API. This means that user code - // may have made modifications to it, so mPPStepsApplied - // and mOrigImporter are no longer safe to rely on and only - // serve informative purposes. - bool mIsCopy; -}; - -inline -ScenePrivateData::ScenePrivateData() AI_NO_EXCEPT -: mOrigImporter( nullptr ) -, mPPStepsApplied( 0 ) -, mIsCopy( false ) { - // empty -} - -// Access private data stored in the scene -inline -ScenePrivateData* ScenePriv(aiScene* in) { - ai_assert( nullptr != in ); - if ( nullptr == in ) { - return nullptr; - } - return static_cast<ScenePrivateData*>(in->mPrivate); -} - -inline -const ScenePrivateData* ScenePriv(const aiScene* in) { - ai_assert( nullptr != in ); - if ( nullptr == in ) { - return nullptr; - } - return static_cast<const ScenePrivateData*>(in->mPrivate); -} - -} // Namespace Assimp - -#endif // AI_SCENEPRIVATE_H_INCLUDED diff --git a/thirdparty/assimp/code/Common/SkeletonMeshBuilder.cpp b/thirdparty/assimp/code/Common/SkeletonMeshBuilder.cpp deleted file mode 100644 index 06cfe034e9..0000000000 --- a/thirdparty/assimp/code/Common/SkeletonMeshBuilder.cpp +++ /dev/null @@ -1,270 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file SkeletonMeshBuilder.cpp - * @brief Implementation of a little class to construct a dummy mesh for a skeleton - */ - -#include <assimp/scene.h> -#include <assimp/SkeletonMeshBuilder.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// The constructor processes the given scene and adds a mesh there. -SkeletonMeshBuilder::SkeletonMeshBuilder( aiScene* pScene, aiNode* root, bool bKnobsOnly) -{ - // nothing to do if there's mesh data already present at the scene - if( pScene->mNumMeshes > 0 || pScene->mRootNode == NULL) - return; - - if (!root) - root = pScene->mRootNode; - - mKnobsOnly = bKnobsOnly; - - // build some faces around each node - CreateGeometry( root ); - - // create a mesh to hold all the generated faces - pScene->mNumMeshes = 1; - pScene->mMeshes = new aiMesh*[1]; - pScene->mMeshes[0] = CreateMesh(); - // and install it at the root node - root->mNumMeshes = 1; - root->mMeshes = new unsigned int[1]; - root->mMeshes[0] = 0; - - // create a dummy material for the mesh - if(pScene->mNumMaterials==0){ - pScene->mNumMaterials = 1; - pScene->mMaterials = new aiMaterial*[1]; - pScene->mMaterials[0] = CreateMaterial(); - } -} - -// ------------------------------------------------------------------------------------------------ -// Recursively builds a simple mesh representation for the given node -void SkeletonMeshBuilder::CreateGeometry( const aiNode* pNode) -{ - // add a joint entry for the node. - const unsigned int vertexStartIndex = static_cast<unsigned int>(mVertices.size()); - - // now build the geometry. - if( pNode->mNumChildren > 0 && !mKnobsOnly) - { - // If the node has children, we build little pointers to each of them - for( unsigned int a = 0; a < pNode->mNumChildren; a++) - { - // find a suitable coordinate system - const aiMatrix4x4& childTransform = pNode->mChildren[a]->mTransformation; - aiVector3D childpos( childTransform.a4, childTransform.b4, childTransform.c4); - ai_real distanceToChild = childpos.Length(); - if( distanceToChild < 0.0001) - continue; - aiVector3D up = aiVector3D( childpos).Normalize(); - - aiVector3D orth( 1.0, 0.0, 0.0); - if( std::fabs( orth * up) > 0.99) - orth.Set( 0.0, 1.0, 0.0); - - aiVector3D front = (up ^ orth).Normalize(); - aiVector3D side = (front ^ up).Normalize(); - - unsigned int localVertexStart = static_cast<unsigned int>(mVertices.size()); - mVertices.push_back( -front * distanceToChild * (ai_real)0.1); - mVertices.push_back( childpos); - mVertices.push_back( -side * distanceToChild * (ai_real)0.1); - mVertices.push_back( -side * distanceToChild * (ai_real)0.1); - mVertices.push_back( childpos); - mVertices.push_back( front * distanceToChild * (ai_real)0.1); - mVertices.push_back( front * distanceToChild * (ai_real)0.1); - mVertices.push_back( childpos); - mVertices.push_back( side * distanceToChild * (ai_real)0.1); - mVertices.push_back( side * distanceToChild * (ai_real)0.1); - mVertices.push_back( childpos); - mVertices.push_back( -front * distanceToChild * (ai_real)0.1); - - mFaces.push_back( Face( localVertexStart + 0, localVertexStart + 1, localVertexStart + 2)); - mFaces.push_back( Face( localVertexStart + 3, localVertexStart + 4, localVertexStart + 5)); - mFaces.push_back( Face( localVertexStart + 6, localVertexStart + 7, localVertexStart + 8)); - mFaces.push_back( Face( localVertexStart + 9, localVertexStart + 10, localVertexStart + 11)); - } - } - else - { - // if the node has no children, it's an end node. Put a little knob there instead - aiVector3D ownpos( pNode->mTransformation.a4, pNode->mTransformation.b4, pNode->mTransformation.c4); - ai_real sizeEstimate = ownpos.Length() * ai_real( 0.18 ); - - mVertices.push_back( aiVector3D( -sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, -sizeEstimate)); - mVertices.push_back( aiVector3D( 0.0, sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, -sizeEstimate)); - mVertices.push_back( aiVector3D( sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, -sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, -sizeEstimate)); - mVertices.push_back( aiVector3D( 0.0, -sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( -sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, -sizeEstimate)); - - mVertices.push_back( aiVector3D( -sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, sizeEstimate)); - mVertices.push_back( aiVector3D( 0.0, sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, sizeEstimate)); - mVertices.push_back( aiVector3D( sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( sizeEstimate, 0.0, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, sizeEstimate)); - mVertices.push_back( aiVector3D( 0.0, -sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, -sizeEstimate, 0.0)); - mVertices.push_back( aiVector3D( 0.0, 0.0, sizeEstimate)); - mVertices.push_back( aiVector3D( -sizeEstimate, 0.0, 0.0)); - - mFaces.push_back( Face( vertexStartIndex + 0, vertexStartIndex + 1, vertexStartIndex + 2)); - mFaces.push_back( Face( vertexStartIndex + 3, vertexStartIndex + 4, vertexStartIndex + 5)); - mFaces.push_back( Face( vertexStartIndex + 6, vertexStartIndex + 7, vertexStartIndex + 8)); - mFaces.push_back( Face( vertexStartIndex + 9, vertexStartIndex + 10, vertexStartIndex + 11)); - mFaces.push_back( Face( vertexStartIndex + 12, vertexStartIndex + 13, vertexStartIndex + 14)); - mFaces.push_back( Face( vertexStartIndex + 15, vertexStartIndex + 16, vertexStartIndex + 17)); - mFaces.push_back( Face( vertexStartIndex + 18, vertexStartIndex + 19, vertexStartIndex + 20)); - mFaces.push_back( Face( vertexStartIndex + 21, vertexStartIndex + 22, vertexStartIndex + 23)); - } - - unsigned int numVertices = static_cast<unsigned int>(mVertices.size() - vertexStartIndex); - if( numVertices > 0) - { - // create a bone affecting all the newly created vertices - aiBone* bone = new aiBone; - mBones.push_back( bone); - bone->mName = pNode->mName; - - // calculate the bone offset matrix by concatenating the inverse transformations of all parents - bone->mOffsetMatrix = aiMatrix4x4( pNode->mTransformation).Inverse(); - for( aiNode* parent = pNode->mParent; parent != NULL; parent = parent->mParent) - bone->mOffsetMatrix = aiMatrix4x4( parent->mTransformation).Inverse() * bone->mOffsetMatrix; - - // add all the vertices to the bone's influences - bone->mNumWeights = numVertices; - bone->mWeights = new aiVertexWeight[numVertices]; - for( unsigned int a = 0; a < numVertices; a++) - bone->mWeights[a] = aiVertexWeight( vertexStartIndex + a, 1.0); - - // HACK: (thom) transform all vertices to the bone's local space. Should be done before adding - // them to the array, but I'm tired now and I'm annoyed. - aiMatrix4x4 boneToMeshTransform = aiMatrix4x4( bone->mOffsetMatrix).Inverse(); - for( unsigned int a = vertexStartIndex; a < mVertices.size(); a++) - mVertices[a] = boneToMeshTransform * mVertices[a]; - } - - // and finally recurse into the children list - for( unsigned int a = 0; a < pNode->mNumChildren; a++) - CreateGeometry( pNode->mChildren[a]); -} - -// ------------------------------------------------------------------------------------------------ -// Creates the mesh from the internally accumulated stuff and returns it. -aiMesh* SkeletonMeshBuilder::CreateMesh() -{ - aiMesh* mesh = new aiMesh(); - - // add points - mesh->mNumVertices = static_cast<unsigned int>(mVertices.size()); - mesh->mVertices = new aiVector3D[mesh->mNumVertices]; - std::copy( mVertices.begin(), mVertices.end(), mesh->mVertices); - - mesh->mNormals = new aiVector3D[mesh->mNumVertices]; - - // add faces - mesh->mNumFaces = static_cast<unsigned int>(mFaces.size()); - mesh->mFaces = new aiFace[mesh->mNumFaces]; - for( unsigned int a = 0; a < mesh->mNumFaces; a++) - { - const Face& inface = mFaces[a]; - aiFace& outface = mesh->mFaces[a]; - outface.mNumIndices = 3; - outface.mIndices = new unsigned int[3]; - outface.mIndices[0] = inface.mIndices[0]; - outface.mIndices[1] = inface.mIndices[1]; - outface.mIndices[2] = inface.mIndices[2]; - - // Compute per-face normals ... we don't want the bones to be smoothed ... they're built to visualize - // the skeleton, so it's good if there's a visual difference to the rest of the geometry - aiVector3D nor = ((mVertices[inface.mIndices[2]] - mVertices[inface.mIndices[0]]) ^ - (mVertices[inface.mIndices[1]] - mVertices[inface.mIndices[0]])); - - if (nor.Length() < 1e-5) /* ensure that FindInvalidData won't remove us ...*/ - nor = aiVector3D(1.0,0.0,0.0); - - for (unsigned int n = 0; n < 3; ++n) - mesh->mNormals[inface.mIndices[n]] = nor; - } - - // add the bones - mesh->mNumBones = static_cast<unsigned int>(mBones.size()); - mesh->mBones = new aiBone*[mesh->mNumBones]; - std::copy( mBones.begin(), mBones.end(), mesh->mBones); - - // default - mesh->mMaterialIndex = 0; - - return mesh; -} - -// ------------------------------------------------------------------------------------------------ -// Creates a dummy material and returns it. -aiMaterial* SkeletonMeshBuilder::CreateMaterial() -{ - aiMaterial* matHelper = new aiMaterial; - - // Name - aiString matName( std::string( "SkeletonMaterial")); - matHelper->AddProperty( &matName, AI_MATKEY_NAME); - - // Prevent backface culling - const int no_cull = 1; - matHelper->AddProperty(&no_cull,1,AI_MATKEY_TWOSIDED); - - return matHelper; -} diff --git a/thirdparty/assimp/code/Common/SpatialSort.cpp b/thirdparty/assimp/code/Common/SpatialSort.cpp deleted file mode 100644 index a4f3a4e4b8..0000000000 --- a/thirdparty/assimp/code/Common/SpatialSort.cpp +++ /dev/null @@ -1,342 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the helper class to quickly find vertices close to a given position */ - -#include <assimp/SpatialSort.h> -#include <assimp/ai_assert.h> - -using namespace Assimp; - -// CHAR_BIT seems to be defined under MVSC, but not under GCC. Pray that the correct value is 8. -#ifndef CHAR_BIT -# define CHAR_BIT 8 -#endif - -// ------------------------------------------------------------------------------------------------ -// Constructs a spatially sorted representation from the given position array. -SpatialSort::SpatialSort( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset) - - // define the reference plane. We choose some arbitrary vector away from all basic axises - // in the hope that no model spreads all its vertices along this plane. - : mPlaneNormal(0.8523f, 0.34321f, 0.5736f) -{ - mPlaneNormal.Normalize(); - Fill(pPositions,pNumPositions,pElementOffset); -} - -// ------------------------------------------------------------------------------------------------ -SpatialSort :: SpatialSort() -: mPlaneNormal(0.8523f, 0.34321f, 0.5736f) -{ - mPlaneNormal.Normalize(); -} - -// ------------------------------------------------------------------------------------------------ -// Destructor -SpatialSort::~SpatialSort() -{ - // nothing to do here, everything destructs automatically -} - -// ------------------------------------------------------------------------------------------------ -void SpatialSort::Fill( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset, - bool pFinalize /*= true */) -{ - mPositions.clear(); - Append(pPositions,pNumPositions,pElementOffset,pFinalize); -} - -// ------------------------------------------------------------------------------------------------ -void SpatialSort :: Finalize() -{ - std::sort( mPositions.begin(), mPositions.end()); -} - -// ------------------------------------------------------------------------------------------------ -void SpatialSort::Append( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset, - bool pFinalize /*= true */) -{ - // store references to all given positions along with their distance to the reference plane - const size_t initial = mPositions.size(); - mPositions.reserve(initial + (pFinalize?pNumPositions:pNumPositions*2)); - for( unsigned int a = 0; a < pNumPositions; a++) - { - const char* tempPointer = reinterpret_cast<const char*> (pPositions); - const aiVector3D* vec = reinterpret_cast<const aiVector3D*> (tempPointer + a * pElementOffset); - - // store position by index and distance - ai_real distance = *vec * mPlaneNormal; - mPositions.push_back( Entry( static_cast<unsigned int>(a+initial), *vec, distance)); - } - - if (pFinalize) { - // now sort the array ascending by distance. - Finalize(); - } -} - -// ------------------------------------------------------------------------------------------------ -// Returns an iterator for all positions close to the given position. -void SpatialSort::FindPositions( const aiVector3D& pPosition, - ai_real pRadius, std::vector<unsigned int>& poResults) const -{ - const ai_real dist = pPosition * mPlaneNormal; - const ai_real minDist = dist - pRadius, maxDist = dist + pRadius; - - // clear the array - poResults.clear(); - - // quick check for positions outside the range - if( mPositions.size() == 0) - return; - if( maxDist < mPositions.front().mDistance) - return; - if( minDist > mPositions.back().mDistance) - return; - - // do a binary search for the minimal distance to start the iteration there - unsigned int index = (unsigned int)mPositions.size() / 2; - unsigned int binaryStepSize = (unsigned int)mPositions.size() / 4; - while( binaryStepSize > 1) - { - if( mPositions[index].mDistance < minDist) - index += binaryStepSize; - else - index -= binaryStepSize; - - binaryStepSize /= 2; - } - - // depending on the direction of the last step we need to single step a bit back or forth - // to find the actual beginning element of the range - while( index > 0 && mPositions[index].mDistance > minDist) - index--; - while( index < (mPositions.size() - 1) && mPositions[index].mDistance < minDist) - index++; - - // Mow start iterating from there until the first position lays outside of the distance range. - // Add all positions inside the distance range within the given radius to the result aray - std::vector<Entry>::const_iterator it = mPositions.begin() + index; - const ai_real pSquared = pRadius*pRadius; - while( it->mDistance < maxDist) - { - if( (it->mPosition - pPosition).SquareLength() < pSquared) - poResults.push_back( it->mIndex); - ++it; - if( it == mPositions.end()) - break; - } - - // that's it -} - -namespace { - - // Binary, signed-integer representation of a single-precision floating-point value. - // IEEE 754 says: "If two floating-point numbers in the same format are ordered then they are - // ordered the same way when their bits are reinterpreted as sign-magnitude integers." - // This allows us to convert all floating-point numbers to signed integers of arbitrary size - // and then use them to work with ULPs (Units in the Last Place, for high-precision - // computations) or to compare them (integer comparisons are faster than floating-point - // comparisons on many platforms). - typedef ai_int BinFloat; - - // -------------------------------------------------------------------------------------------- - // Converts the bit pattern of a floating-point number to its signed integer representation. - BinFloat ToBinary( const ai_real & pValue) { - - // If this assertion fails, signed int is not big enough to store a float on your platform. - // Please correct the declaration of BinFloat a few lines above - but do it in a portable, - // #ifdef'd manner! - static_assert( sizeof(BinFloat) >= sizeof(ai_real), "sizeof(BinFloat) >= sizeof(ai_real)"); - - #if defined( _MSC_VER) - // If this assertion fails, Visual C++ has finally moved to ILP64. This means that this - // code has just become legacy code! Find out the current value of _MSC_VER and modify - // the #if above so it evaluates false on the current and all upcoming VC versions (or - // on the current platform, if LP64 or LLP64 are still used on other platforms). - static_assert( sizeof(BinFloat) == sizeof(ai_real), "sizeof(BinFloat) == sizeof(ai_real)"); - - // This works best on Visual C++, but other compilers have their problems with it. - const BinFloat binValue = reinterpret_cast<BinFloat const &>(pValue); - #else - // On many compilers, reinterpreting a float address as an integer causes aliasing - // problems. This is an ugly but more or less safe way of doing it. - union { - ai_real asFloat; - BinFloat asBin; - } conversion; - conversion.asBin = 0; // zero empty space in case sizeof(BinFloat) > sizeof(float) - conversion.asFloat = pValue; - const BinFloat binValue = conversion.asBin; - #endif - - // floating-point numbers are of sign-magnitude format, so find out what signed number - // representation we must convert negative values to. - // See http://en.wikipedia.org/wiki/Signed_number_representations. - - // Two's complement? - if( (-42 == (~42 + 1)) && (binValue & 0x80000000)) - return BinFloat(1 << (CHAR_BIT * sizeof(BinFloat) - 1)) - binValue; - // One's complement? - else if ( (-42 == ~42) && (binValue & 0x80000000)) - return BinFloat(-0) - binValue; - // Sign-magnitude? - else if( (-42 == (42 | (-0))) && (binValue & 0x80000000)) // -0 = 1000... binary - return binValue; - else - return binValue; - } - -} // namespace - -// ------------------------------------------------------------------------------------------------ -// Fills an array with indices of all positions identical to the given position. In opposite to -// FindPositions(), not an epsilon is used but a (very low) tolerance of four floating-point units. -void SpatialSort::FindIdenticalPositions( const aiVector3D& pPosition, - std::vector<unsigned int>& poResults) const -{ - // Epsilons have a huge disadvantage: they are of constant precision, while floating-point - // values are of log2 precision. If you apply e=0.01 to 100, the epsilon is rather small, but - // if you apply it to 0.001, it is enormous. - - // The best way to overcome this is the unit in the last place (ULP). A precision of 2 ULPs - // tells us that a float does not differ more than 2 bits from the "real" value. ULPs are of - // logarithmic precision - around 1, they are 1*(2^24) and around 10000, they are 0.00125. - - // For standard C math, we can assume a precision of 0.5 ULPs according to IEEE 754. The - // incoming vertex positions might have already been transformed, probably using rather - // inaccurate SSE instructions, so we assume a tolerance of 4 ULPs to safely identify - // identical vertex positions. - static const int toleranceInULPs = 4; - // An interesting point is that the inaccuracy grows linear with the number of operations: - // multiplying to numbers, each inaccurate to four ULPs, results in an inaccuracy of four ULPs - // plus 0.5 ULPs for the multiplication. - // To compute the distance to the plane, a dot product is needed - that is a multiplication and - // an addition on each number. - static const int distanceToleranceInULPs = toleranceInULPs + 1; - // The squared distance between two 3D vectors is computed the same way, but with an additional - // subtraction. - static const int distance3DToleranceInULPs = distanceToleranceInULPs + 1; - - // Convert the plane distance to its signed integer representation so the ULPs tolerance can be - // applied. For some reason, VC won't optimize two calls of the bit pattern conversion. - const BinFloat minDistBinary = ToBinary( pPosition * mPlaneNormal) - distanceToleranceInULPs; - const BinFloat maxDistBinary = minDistBinary + 2 * distanceToleranceInULPs; - - // clear the array in this strange fashion because a simple clear() would also deallocate - // the array which we want to avoid - poResults.resize( 0 ); - - // do a binary search for the minimal distance to start the iteration there - unsigned int index = (unsigned int)mPositions.size() / 2; - unsigned int binaryStepSize = (unsigned int)mPositions.size() / 4; - while( binaryStepSize > 1) - { - // Ugly, but conditional jumps are faster with integers than with floats - if( minDistBinary > ToBinary(mPositions[index].mDistance)) - index += binaryStepSize; - else - index -= binaryStepSize; - - binaryStepSize /= 2; - } - - // depending on the direction of the last step we need to single step a bit back or forth - // to find the actual beginning element of the range - while( index > 0 && minDistBinary < ToBinary(mPositions[index].mDistance) ) - index--; - while( index < (mPositions.size() - 1) && minDistBinary > ToBinary(mPositions[index].mDistance)) - index++; - - // Now start iterating from there until the first position lays outside of the distance range. - // Add all positions inside the distance range within the tolerance to the result array - std::vector<Entry>::const_iterator it = mPositions.begin() + index; - while( ToBinary(it->mDistance) < maxDistBinary) - { - if( distance3DToleranceInULPs >= ToBinary((it->mPosition - pPosition).SquareLength())) - poResults.push_back(it->mIndex); - ++it; - if( it == mPositions.end()) - break; - } - - // that's it -} - -// ------------------------------------------------------------------------------------------------ -unsigned int SpatialSort::GenerateMappingTable(std::vector<unsigned int>& fill, ai_real pRadius) const -{ - fill.resize(mPositions.size(),UINT_MAX); - ai_real dist, maxDist; - - unsigned int t=0; - const ai_real pSquared = pRadius*pRadius; - for (size_t i = 0; i < mPositions.size();) { - dist = mPositions[i].mPosition * mPlaneNormal; - maxDist = dist + pRadius; - - fill[mPositions[i].mIndex] = t; - const aiVector3D& oldpos = mPositions[i].mPosition; - for (++i; i < fill.size() && mPositions[i].mDistance < maxDist - && (mPositions[i].mPosition - oldpos).SquareLength() < pSquared; ++i) - { - fill[mPositions[i].mIndex] = t; - } - ++t; - } - -#ifdef ASSIMP_BUILD_DEBUG - - // debug invariant: mPositions[i].mIndex values must range from 0 to mPositions.size()-1 - for (size_t i = 0; i < fill.size(); ++i) { - ai_assert(fill[i]<mPositions.size()); - } - -#endif - return t; -} diff --git a/thirdparty/assimp/code/Common/SplitByBoneCountProcess.cpp b/thirdparty/assimp/code/Common/SplitByBoneCountProcess.cpp deleted file mode 100644 index 2ef66a9afc..0000000000 --- a/thirdparty/assimp/code/Common/SplitByBoneCountProcess.cpp +++ /dev/null @@ -1,407 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - - -/// @file SplitByBoneCountProcess.cpp -/// Implementation of the SplitByBoneCount postprocessing step - -// internal headers of the post-processing framework -#include "SplitByBoneCountProcess.h" -#include <assimp/postprocess.h> -#include <assimp/DefaultLogger.hpp> - -#include <limits> -#include <assimp/TinyFormatter.h> - -using namespace Assimp; -using namespace Assimp::Formatter; - -// ------------------------------------------------------------------------------------------------ -// Constructor -SplitByBoneCountProcess::SplitByBoneCountProcess() -{ - // set default, might be overridden by importer config - mMaxBoneCount = AI_SBBC_DEFAULT_MAX_BONES; -} - -// ------------------------------------------------------------------------------------------------ -// Destructor -SplitByBoneCountProcess::~SplitByBoneCountProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag. -bool SplitByBoneCountProcess::IsActive( unsigned int pFlags) const -{ - return !!(pFlags & aiProcess_SplitByBoneCount); -} - -// ------------------------------------------------------------------------------------------------ -// Updates internal properties -void SplitByBoneCountProcess::SetupProperties(const Importer* pImp) -{ - mMaxBoneCount = pImp->GetPropertyInteger(AI_CONFIG_PP_SBBC_MAX_BONES,AI_SBBC_DEFAULT_MAX_BONES); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SplitByBoneCountProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("SplitByBoneCountProcess begin"); - - // early out - bool isNecessary = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a) - if( pScene->mMeshes[a]->mNumBones > mMaxBoneCount ) - isNecessary = true; - - if( !isNecessary ) - { - ASSIMP_LOG_DEBUG( format() << "SplitByBoneCountProcess early-out: no meshes with more than " << mMaxBoneCount << " bones." ); - return; - } - - // we need to do something. Let's go. - mSubMeshIndices.clear(); - mSubMeshIndices.resize( pScene->mNumMeshes); - - // build a new array of meshes for the scene - std::vector<aiMesh*> meshes; - - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a) - { - aiMesh* srcMesh = pScene->mMeshes[a]; - - std::vector<aiMesh*> newMeshes; - SplitMesh( pScene->mMeshes[a], newMeshes); - - // mesh was split - if( !newMeshes.empty() ) - { - // store new meshes and indices of the new meshes - for( unsigned int b = 0; b < newMeshes.size(); ++b) - { - mSubMeshIndices[a].push_back( static_cast<unsigned int>(meshes.size())); - meshes.push_back( newMeshes[b]); - } - - // and destroy the source mesh. It should be completely contained inside the new submeshes - delete srcMesh; - } - else - { - // Mesh is kept unchanged - store it's new place in the mesh array - mSubMeshIndices[a].push_back( static_cast<unsigned int>(meshes.size())); - meshes.push_back( srcMesh); - } - } - - // rebuild the scene's mesh array - pScene->mNumMeshes = static_cast<unsigned int>(meshes.size()); - delete [] pScene->mMeshes; - pScene->mMeshes = new aiMesh*[pScene->mNumMeshes]; - std::copy( meshes.begin(), meshes.end(), pScene->mMeshes); - - // recurse through all nodes and translate the node's mesh indices to fit the new mesh array - UpdateNode( pScene->mRootNode); - - ASSIMP_LOG_DEBUG( format() << "SplitByBoneCountProcess end: split " << mSubMeshIndices.size() << " meshes into " << meshes.size() << " submeshes." ); -} - -// ------------------------------------------------------------------------------------------------ -// Splits the given mesh by bone count. -void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vector<aiMesh*>& poNewMeshes) const -{ - // skip if not necessary - if( pMesh->mNumBones <= mMaxBoneCount ) - return; - - // necessary optimisation: build a list of all affecting bones for each vertex - // TODO: (thom) maybe add a custom allocator here to avoid allocating tens of thousands of small arrays - typedef std::pair<unsigned int, float> BoneWeight; - std::vector< std::vector<BoneWeight> > vertexBones( pMesh->mNumVertices); - for( unsigned int a = 0; a < pMesh->mNumBones; ++a) - { - const aiBone* bone = pMesh->mBones[a]; - for( unsigned int b = 0; b < bone->mNumWeights; ++b) - vertexBones[ bone->mWeights[b].mVertexId ].push_back( BoneWeight( a, bone->mWeights[b].mWeight)); - } - - unsigned int numFacesHandled = 0; - std::vector<bool> isFaceHandled( pMesh->mNumFaces, false); - while( numFacesHandled < pMesh->mNumFaces ) - { - // which bones are used in the current submesh - unsigned int numBones = 0; - std::vector<bool> isBoneUsed( pMesh->mNumBones, false); - // indices of the faces which are going to go into this submesh - std::vector<unsigned int> subMeshFaces; - subMeshFaces.reserve( pMesh->mNumFaces); - // accumulated vertex count of all the faces in this submesh - unsigned int numSubMeshVertices = 0; - // a small local array of new bones for the current face. State of all used bones for that face - // can only be updated AFTER the face is completely analysed. Thanks to imre for the fix. - std::vector<unsigned int> newBonesAtCurrentFace; - - // add faces to the new submesh as long as all bones affecting the faces' vertices fit in the limit - for( unsigned int a = 0; a < pMesh->mNumFaces; ++a) - { - // skip if the face is already stored in a submesh - if( isFaceHandled[a] ) - continue; - - const aiFace& face = pMesh->mFaces[a]; - // check every vertex if its bones would still fit into the current submesh - for( unsigned int b = 0; b < face.mNumIndices; ++b ) - { - const std::vector<BoneWeight>& vb = vertexBones[face.mIndices[b]]; - for( unsigned int c = 0; c < vb.size(); ++c) - { - unsigned int boneIndex = vb[c].first; - // if the bone is already used in this submesh, it's ok - if( isBoneUsed[boneIndex] ) - continue; - - // if it's not used, yet, we would need to add it. Store its bone index - if( std::find( newBonesAtCurrentFace.begin(), newBonesAtCurrentFace.end(), boneIndex) == newBonesAtCurrentFace.end() ) - newBonesAtCurrentFace.push_back( boneIndex); - } - } - - // leave out the face if the new bones required for this face don't fit the bone count limit anymore - if( numBones + newBonesAtCurrentFace.size() > mMaxBoneCount ) - continue; - - // mark all new bones as necessary - while( !newBonesAtCurrentFace.empty() ) - { - unsigned int newIndex = newBonesAtCurrentFace.back(); - newBonesAtCurrentFace.pop_back(); // this also avoids the deallocation which comes with a clear() - if( isBoneUsed[newIndex] ) - continue; - - isBoneUsed[newIndex] = true; - numBones++; - } - - // store the face index and the vertex count - subMeshFaces.push_back( a); - numSubMeshVertices += face.mNumIndices; - - // remember that this face is handled - isFaceHandled[a] = true; - numFacesHandled++; - } - - // create a new mesh to hold this subset of the source mesh - aiMesh* newMesh = new aiMesh; - if( pMesh->mName.length > 0 ) - newMesh->mName.Set( format() << pMesh->mName.data << "_sub" << poNewMeshes.size()); - newMesh->mMaterialIndex = pMesh->mMaterialIndex; - newMesh->mPrimitiveTypes = pMesh->mPrimitiveTypes; - poNewMeshes.push_back( newMesh); - - // create all the arrays for this mesh if the old mesh contained them - newMesh->mNumVertices = numSubMeshVertices; - newMesh->mNumFaces = static_cast<unsigned int>(subMeshFaces.size()); - newMesh->mVertices = new aiVector3D[newMesh->mNumVertices]; - if( pMesh->HasNormals() ) - newMesh->mNormals = new aiVector3D[newMesh->mNumVertices]; - if( pMesh->HasTangentsAndBitangents() ) - { - newMesh->mTangents = new aiVector3D[newMesh->mNumVertices]; - newMesh->mBitangents = new aiVector3D[newMesh->mNumVertices]; - } - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a ) - { - if( pMesh->HasTextureCoords( a) ) - newMesh->mTextureCoords[a] = new aiVector3D[newMesh->mNumVertices]; - newMesh->mNumUVComponents[a] = pMesh->mNumUVComponents[a]; - } - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; ++a ) - { - if( pMesh->HasVertexColors( a) ) - newMesh->mColors[a] = new aiColor4D[newMesh->mNumVertices]; - } - - // and copy over the data, generating faces with linear indices along the way - newMesh->mFaces = new aiFace[subMeshFaces.size()]; - unsigned int nvi = 0; // next vertex index - std::vector<unsigned int> previousVertexIndices( numSubMeshVertices, std::numeric_limits<unsigned int>::max()); // per new vertex: its index in the source mesh - for( unsigned int a = 0; a < subMeshFaces.size(); ++a ) - { - const aiFace& srcFace = pMesh->mFaces[subMeshFaces[a]]; - aiFace& dstFace = newMesh->mFaces[a]; - dstFace.mNumIndices = srcFace.mNumIndices; - dstFace.mIndices = new unsigned int[dstFace.mNumIndices]; - - // accumulate linearly all the vertices of the source face - for( unsigned int b = 0; b < dstFace.mNumIndices; ++b ) - { - unsigned int srcIndex = srcFace.mIndices[b]; - dstFace.mIndices[b] = nvi; - previousVertexIndices[nvi] = srcIndex; - - newMesh->mVertices[nvi] = pMesh->mVertices[srcIndex]; - if( pMesh->HasNormals() ) - newMesh->mNormals[nvi] = pMesh->mNormals[srcIndex]; - if( pMesh->HasTangentsAndBitangents() ) - { - newMesh->mTangents[nvi] = pMesh->mTangents[srcIndex]; - newMesh->mBitangents[nvi] = pMesh->mBitangents[srcIndex]; - } - for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++c ) - { - if( pMesh->HasTextureCoords( c) ) - newMesh->mTextureCoords[c][nvi] = pMesh->mTextureCoords[c][srcIndex]; - } - for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS; ++c ) - { - if( pMesh->HasVertexColors( c) ) - newMesh->mColors[c][nvi] = pMesh->mColors[c][srcIndex]; - } - - nvi++; - } - } - - ai_assert( nvi == numSubMeshVertices ); - - // Create the bones for the new submesh: first create the bone array - newMesh->mNumBones = 0; - newMesh->mBones = new aiBone*[numBones]; - - std::vector<unsigned int> mappedBoneIndex( pMesh->mNumBones, std::numeric_limits<unsigned int>::max()); - for( unsigned int a = 0; a < pMesh->mNumBones; ++a ) - { - if( !isBoneUsed[a] ) - continue; - - // create the new bone - const aiBone* srcBone = pMesh->mBones[a]; - aiBone* dstBone = new aiBone; - mappedBoneIndex[a] = newMesh->mNumBones; - newMesh->mBones[newMesh->mNumBones++] = dstBone; - dstBone->mName = srcBone->mName; - dstBone->mOffsetMatrix = srcBone->mOffsetMatrix; - dstBone->mNumWeights = 0; - } - - ai_assert( newMesh->mNumBones == numBones ); - - // iterate over all new vertices and count which bones affected its old vertex in the source mesh - for( unsigned int a = 0; a < numSubMeshVertices; ++a ) - { - unsigned int oldIndex = previousVertexIndices[a]; - const std::vector<BoneWeight>& bonesOnThisVertex = vertexBones[oldIndex]; - - for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b ) - { - unsigned int newBoneIndex = mappedBoneIndex[ bonesOnThisVertex[b].first ]; - if( newBoneIndex != std::numeric_limits<unsigned int>::max() ) - newMesh->mBones[newBoneIndex]->mNumWeights++; - } - } - - // allocate all bone weight arrays accordingly - for( unsigned int a = 0; a < newMesh->mNumBones; ++a ) - { - aiBone* bone = newMesh->mBones[a]; - ai_assert( bone->mNumWeights > 0 ); - bone->mWeights = new aiVertexWeight[bone->mNumWeights]; - bone->mNumWeights = 0; // for counting up in the next step - } - - // now copy all the bone vertex weights for all the vertices which made it into the new submesh - for( unsigned int a = 0; a < numSubMeshVertices; ++a) - { - // find the source vertex for it in the source mesh - unsigned int previousIndex = previousVertexIndices[a]; - // these bones were affecting it - const std::vector<BoneWeight>& bonesOnThisVertex = vertexBones[previousIndex]; - // all of the bones affecting it should be present in the new submesh, or else - // the face it comprises shouldn't be present - for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b) - { - unsigned int newBoneIndex = mappedBoneIndex[ bonesOnThisVertex[b].first ]; - ai_assert( newBoneIndex != std::numeric_limits<unsigned int>::max() ); - aiVertexWeight* dstWeight = newMesh->mBones[newBoneIndex]->mWeights + newMesh->mBones[newBoneIndex]->mNumWeights; - newMesh->mBones[newBoneIndex]->mNumWeights++; - - dstWeight->mVertexId = a; - dstWeight->mWeight = bonesOnThisVertex[b].second; - } - } - - // I have the strange feeling that this will break apart at some point in time... - } -} - -// ------------------------------------------------------------------------------------------------ -// Recursively updates the node's mesh list to account for the changed mesh list -void SplitByBoneCountProcess::UpdateNode( aiNode* pNode) const -{ - // rebuild the node's mesh index list - if( pNode->mNumMeshes > 0 ) - { - std::vector<unsigned int> newMeshList; - for( unsigned int a = 0; a < pNode->mNumMeshes; ++a) - { - unsigned int srcIndex = pNode->mMeshes[a]; - const std::vector<unsigned int>& replaceMeshes = mSubMeshIndices[srcIndex]; - newMeshList.insert( newMeshList.end(), replaceMeshes.begin(), replaceMeshes.end()); - } - - delete [] pNode->mMeshes; - pNode->mNumMeshes = static_cast<unsigned int>(newMeshList.size()); - pNode->mMeshes = new unsigned int[pNode->mNumMeshes]; - std::copy( newMeshList.begin(), newMeshList.end(), pNode->mMeshes); - } - - // do that also recursively for all children - for( unsigned int a = 0; a < pNode->mNumChildren; ++a ) - { - UpdateNode( pNode->mChildren[a]); - } -} diff --git a/thirdparty/assimp/code/Common/SplitByBoneCountProcess.h b/thirdparty/assimp/code/Common/SplitByBoneCountProcess.h deleted file mode 100644 index 6c904a9df4..0000000000 --- a/thirdparty/assimp/code/Common/SplitByBoneCountProcess.h +++ /dev/null @@ -1,111 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/// @file SplitByBoneCountProcess.h -/// Defines a post processing step to split meshes with many bones into submeshes -#ifndef AI_SPLITBYBONECOUNTPROCESS_H_INC -#define AI_SPLITBYBONECOUNTPROCESS_H_INC - -#include <vector> -#include "BaseProcess.h" - -#include <assimp/mesh.h> -#include <assimp/scene.h> - -namespace Assimp -{ - - -/** Postprocessing filter to split meshes with many bones into submeshes - * so that each submesh has a certain max bone count. - * - * Applied BEFORE the JoinVertices-Step occurs. - * Returns NON-UNIQUE vertices, splits by bone count. -*/ -class SplitByBoneCountProcess : public BaseProcess -{ -public: - - SplitByBoneCountProcess(); - ~SplitByBoneCountProcess(); - -public: - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. A - * bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - virtual void SetupProperties(const Importer* pImp); - -protected: - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - /// Splits the given mesh by bone count. - /// @param pMesh the Mesh to split. Is not changed at all, but might be superfluous in case it was split. - /// @param poNewMeshes Array of submeshes created in the process. Empty if splitting was not necessary. - void SplitMesh( const aiMesh* pMesh, std::vector<aiMesh*>& poNewMeshes) const; - - /// Recursively updates the node's mesh list to account for the changed mesh list - void UpdateNode( aiNode* pNode) const; - -public: - /// Max bone count. Splitting occurs if a mesh has more than that number of bones. - size_t mMaxBoneCount; - - /// Per mesh index: Array of indices of the new submeshes. - std::vector< std::vector<unsigned int> > mSubMeshIndices; -}; - -} // end of namespace Assimp - -#endif // !!AI_SPLITBYBONECOUNTPROCESS_H_INC diff --git a/thirdparty/assimp/code/Common/StandardShapes.cpp b/thirdparty/assimp/code/Common/StandardShapes.cpp deleted file mode 100644 index 2e5100130f..0000000000 --- a/thirdparty/assimp/code/Common/StandardShapes.cpp +++ /dev/null @@ -1,507 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file StandardShapes.cpp - * @brief Implementation of the StandardShapes class - * - * The primitive geometry data comes from - * http://geometrictools.com/Documentation/PlatonicSolids.pdf. - */ - -#include <assimp/StandardShapes.h> -#include <assimp/StringComparison.h> -#include <stddef.h> -#include <assimp/Defines.h> -#include <assimp/mesh.h> - -namespace Assimp { - - -# define ADD_TRIANGLE(n0,n1,n2) \ - positions.push_back(n0); \ - positions.push_back(n1); \ - positions.push_back(n2); - -# define ADD_PENTAGON(n0,n1,n2,n3,n4) \ - if (polygons) \ - { \ - positions.push_back(n0); \ - positions.push_back(n1); \ - positions.push_back(n2); \ - positions.push_back(n3); \ - positions.push_back(n4); \ - } \ - else \ - { \ - ADD_TRIANGLE(n0, n1, n2) \ - ADD_TRIANGLE(n0, n2, n3) \ - ADD_TRIANGLE(n0, n3, n4) \ - } - -# define ADD_QUAD(n0,n1,n2,n3) \ - if (polygons) \ - { \ - positions.push_back(n0); \ - positions.push_back(n1); \ - positions.push_back(n2); \ - positions.push_back(n3); \ - } \ - else \ - { \ - ADD_TRIANGLE(n0, n1, n2) \ - ADD_TRIANGLE(n0, n2, n3) \ - } - - -// ------------------------------------------------------------------------------------------------ -// Fast subdivision for a mesh whose verts have a magnitude of 1 -void Subdivide(std::vector<aiVector3D>& positions) -{ - // assume this to be constant - (fixme: must be 1.0? I think so) - const ai_real fl1 = positions[0].Length(); - - unsigned int origSize = (unsigned int)positions.size(); - for (unsigned int i = 0 ; i < origSize ; i+=3) - { - aiVector3D& tv0 = positions[i]; - aiVector3D& tv1 = positions[i+1]; - aiVector3D& tv2 = positions[i+2]; - - aiVector3D a = tv0, b = tv1, c = tv2; - aiVector3D v1 = aiVector3D(a.x+b.x, a.y+b.y, a.z+b.z).Normalize()*fl1; - aiVector3D v2 = aiVector3D(a.x+c.x, a.y+c.y, a.z+c.z).Normalize()*fl1; - aiVector3D v3 = aiVector3D(b.x+c.x, b.y+c.y, b.z+c.z).Normalize()*fl1; - - tv0 = v1; tv1 = v3; tv2 = v2; // overwrite the original - ADD_TRIANGLE(v1, v2, a); - ADD_TRIANGLE(v2, v3, c); - ADD_TRIANGLE(v3, v1, b); - } -} - -// ------------------------------------------------------------------------------------------------ -// Construct a mesh from given vertex positions -aiMesh* StandardShapes::MakeMesh(const std::vector<aiVector3D>& positions, - unsigned int numIndices) -{ - if (positions.empty() || !numIndices) return NULL; - - // Determine which kinds of primitives the mesh consists of - aiMesh* out = new aiMesh(); - switch (numIndices) { - case 1: - out->mPrimitiveTypes = aiPrimitiveType_POINT; - break; - case 2: - out->mPrimitiveTypes = aiPrimitiveType_LINE; - break; - case 3: - out->mPrimitiveTypes = aiPrimitiveType_TRIANGLE; - break; - default: - out->mPrimitiveTypes = aiPrimitiveType_POLYGON; - break; - }; - - out->mNumFaces = (unsigned int)positions.size() / numIndices; - out->mFaces = new aiFace[out->mNumFaces]; - for (unsigned int i = 0, a = 0; i < out->mNumFaces;++i) { - aiFace& f = out->mFaces[i]; - f.mNumIndices = numIndices; - f.mIndices = new unsigned int[numIndices]; - for (unsigned int j = 0; i < numIndices; ++i, ++a) { - f.mIndices[j] = a; - } - } - out->mNumVertices = (unsigned int)positions.size(); - out->mVertices = new aiVector3D[out->mNumVertices]; - ::memcpy(out->mVertices,&positions[0],out->mNumVertices*sizeof(aiVector3D)); - - return out; -} - -// ------------------------------------------------------------------------------------------------ -// Construct a mesh with a specific shape (callback) -aiMesh* StandardShapes::MakeMesh ( unsigned int (*GenerateFunc)( - std::vector<aiVector3D>&)) -{ - std::vector<aiVector3D> temp; - unsigned num = (*GenerateFunc)(temp); - return MakeMesh(temp,num); -} - -// ------------------------------------------------------------------------------------------------ -// Construct a mesh with a specific shape (callback) -aiMesh* StandardShapes::MakeMesh ( unsigned int (*GenerateFunc)( - std::vector<aiVector3D>&, bool)) -{ - std::vector<aiVector3D> temp; - unsigned num = (*GenerateFunc)(temp,true); - return MakeMesh(temp,num); -} - -// ------------------------------------------------------------------------------------------------ -// Construct a mesh with a specific shape (callback) -aiMesh* StandardShapes::MakeMesh (unsigned int num, void (*GenerateFunc)( - unsigned int,std::vector<aiVector3D>&)) -{ - std::vector<aiVector3D> temp; - (*GenerateFunc)(num,temp); - return MakeMesh(temp,3); -} - -// ------------------------------------------------------------------------------------------------ -// Build an incosahedron with points.magnitude == 1 -unsigned int StandardShapes::MakeIcosahedron(std::vector<aiVector3D>& positions) -{ - positions.reserve(positions.size()+60); - - const ai_real t = ( ai_real( 1.0 )+ ai_real( 2.236067977 ) ) / ai_real( 2.0 ); - const ai_real s = std::sqrt(ai_real(1.0) + t*t); - - const aiVector3D v0 = aiVector3D(t,1.0, 0.0)/s; - const aiVector3D v1 = aiVector3D(-t,1.0, 0.0)/s; - const aiVector3D v2 = aiVector3D(t,-1.0, 0.0)/s; - const aiVector3D v3 = aiVector3D(-t,-1.0, 0.0)/s; - const aiVector3D v4 = aiVector3D(1.0, 0.0, t)/s; - const aiVector3D v5 = aiVector3D(1.0, 0.0,-t)/s; - const aiVector3D v6 = aiVector3D(-1.0, 0.0,t)/s; - const aiVector3D v7 = aiVector3D(-1.0, 0.0,-t)/s; - const aiVector3D v8 = aiVector3D(0.0, t, 1.0)/s; - const aiVector3D v9 = aiVector3D(0.0,-t, 1.0)/s; - const aiVector3D v10 = aiVector3D(0.0, t,-1.0)/s; - const aiVector3D v11 = aiVector3D(0.0,-t,-1.0)/s; - - ADD_TRIANGLE(v0,v8,v4); - ADD_TRIANGLE(v0,v5,v10); - ADD_TRIANGLE(v2,v4,v9); - ADD_TRIANGLE(v2,v11,v5); - - ADD_TRIANGLE(v1,v6,v8); - ADD_TRIANGLE(v1,v10,v7); - ADD_TRIANGLE(v3,v9,v6); - ADD_TRIANGLE(v3,v7,v11); - - ADD_TRIANGLE(v0,v10,v8); - ADD_TRIANGLE(v1,v8,v10); - ADD_TRIANGLE(v2,v9,v11); - ADD_TRIANGLE(v3,v11,v9); - - ADD_TRIANGLE(v4,v2,v0); - ADD_TRIANGLE(v5,v0,v2); - ADD_TRIANGLE(v6,v1,v3); - ADD_TRIANGLE(v7,v3,v1); - - ADD_TRIANGLE(v8,v6,v4); - ADD_TRIANGLE(v9,v4,v6); - ADD_TRIANGLE(v10,v5,v7); - ADD_TRIANGLE(v11,v7,v5); - return 3; -} - -// ------------------------------------------------------------------------------------------------ -// Build a dodecahedron with points.magnitude == 1 -unsigned int StandardShapes::MakeDodecahedron(std::vector<aiVector3D>& positions, - bool polygons /*= false*/) -{ - positions.reserve(positions.size()+108); - - const ai_real a = ai_real( 1.0 ) / ai_real(1.7320508); - const ai_real b = std::sqrt(( ai_real( 3.0 )- ai_real( 2.23606797))/ ai_real( 6.0) ); - const ai_real c = std::sqrt(( ai_real( 3.0 )+ ai_real( 2.23606797f))/ ai_real( 6.0) ); - - const aiVector3D v0 = aiVector3D(a,a,a); - const aiVector3D v1 = aiVector3D(a,a,-a); - const aiVector3D v2 = aiVector3D(a,-a,a); - const aiVector3D v3 = aiVector3D(a,-a,-a); - const aiVector3D v4 = aiVector3D(-a,a,a); - const aiVector3D v5 = aiVector3D(-a,a,-a); - const aiVector3D v6 = aiVector3D(-a,-a,a); - const aiVector3D v7 = aiVector3D(-a,-a,-a); - const aiVector3D v8 = aiVector3D(b,c,0.0); - const aiVector3D v9 = aiVector3D(-b,c,0.0); - const aiVector3D v10 = aiVector3D(b,-c,0.0); - const aiVector3D v11 = aiVector3D(-b,-c,0.0); - const aiVector3D v12 = aiVector3D(c, 0.0, b); - const aiVector3D v13 = aiVector3D(c, 0.0, -b); - const aiVector3D v14 = aiVector3D(-c, 0.0, b); - const aiVector3D v15 = aiVector3D(-c, 0.0, -b); - const aiVector3D v16 = aiVector3D(0.0, b, c); - const aiVector3D v17 = aiVector3D(0.0, -b, c); - const aiVector3D v18 = aiVector3D(0.0, b, -c); - const aiVector3D v19 = aiVector3D(0.0, -b, -c); - - ADD_PENTAGON(v0, v8, v9, v4, v16); - ADD_PENTAGON(v0, v12, v13, v1, v8); - ADD_PENTAGON(v0, v16, v17, v2, v12); - ADD_PENTAGON(v8, v1, v18, v5, v9); - ADD_PENTAGON(v12, v2, v10, v3, v13); - ADD_PENTAGON(v16, v4, v14, v6, v17); - ADD_PENTAGON(v9, v5, v15, v14, v4); - - ADD_PENTAGON(v6, v11, v10, v2, v17); - ADD_PENTAGON(v3, v19, v18, v1, v13); - ADD_PENTAGON(v7, v15, v5, v18, v19); - ADD_PENTAGON(v7, v11, v6, v14, v15); - ADD_PENTAGON(v7, v19, v3, v10, v11); - return (polygons ? 5 : 3); -} - -// ------------------------------------------------------------------------------------------------ -// Build an octahedron with points.magnitude == 1 -unsigned int StandardShapes::MakeOctahedron(std::vector<aiVector3D>& positions) -{ - positions.reserve(positions.size()+24); - - const aiVector3D v0 = aiVector3D(1.0, 0.0, 0.0) ; - const aiVector3D v1 = aiVector3D(-1.0, 0.0, 0.0); - const aiVector3D v2 = aiVector3D(0.0, 1.0, 0.0); - const aiVector3D v3 = aiVector3D(0.0, -1.0, 0.0); - const aiVector3D v4 = aiVector3D(0.0, 0.0, 1.0); - const aiVector3D v5 = aiVector3D(0.0, 0.0, -1.0); - - ADD_TRIANGLE(v4,v0,v2); - ADD_TRIANGLE(v4,v2,v1); - ADD_TRIANGLE(v4,v1,v3); - ADD_TRIANGLE(v4,v3,v0); - - ADD_TRIANGLE(v5,v2,v0); - ADD_TRIANGLE(v5,v1,v2); - ADD_TRIANGLE(v5,v3,v1); - ADD_TRIANGLE(v5,v0,v3); - return 3; -} - -// ------------------------------------------------------------------------------------------------ -// Build a tetrahedron with points.magnitude == 1 -unsigned int StandardShapes::MakeTetrahedron(std::vector<aiVector3D>& positions) -{ - positions.reserve(positions.size()+9); - - const ai_real invThree = ai_real( 1.0 ) / ai_real( 3.0 ); - const ai_real a = ai_real( 1.41421 ) * invThree; - const ai_real b = ai_real( 2.4494 ) * invThree; - - const aiVector3D v0 = aiVector3D(0.0,0.0,1.0); - const aiVector3D v1 = aiVector3D(2*a,0,-invThree ); - const aiVector3D v2 = aiVector3D(-a,b,-invThree ); - const aiVector3D v3 = aiVector3D(-a,-b,-invThree ); - - ADD_TRIANGLE(v0,v1,v2); - ADD_TRIANGLE(v0,v2,v3); - ADD_TRIANGLE(v0,v3,v1); - ADD_TRIANGLE(v1,v3,v2); - return 3; -} - -// ------------------------------------------------------------------------------------------------ -// Build a hexahedron with points.magnitude == 1 -unsigned int StandardShapes::MakeHexahedron(std::vector<aiVector3D>& positions, - bool polygons /*= false*/) -{ - positions.reserve(positions.size()+36); - const ai_real length = ai_real(1.0)/ai_real(1.73205080); - - const aiVector3D v0 = aiVector3D(-1.0,-1.0,-1.0)*length; - const aiVector3D v1 = aiVector3D(1.0,-1.0,-1.0)*length; - const aiVector3D v2 = aiVector3D(1.0,1.0,-1.0)*length; - const aiVector3D v3 = aiVector3D(-1.0,1.0,-1.0)*length; - const aiVector3D v4 = aiVector3D(-1.0,-1.0,1.0)*length; - const aiVector3D v5 = aiVector3D(1.0,-1.0,1.0)*length; - const aiVector3D v6 = aiVector3D(1.0,1.0,1.0)*length; - const aiVector3D v7 = aiVector3D(-1.0,1.0,1.0)*length; - - ADD_QUAD(v0,v3,v2,v1); - ADD_QUAD(v0,v1,v5,v4); - ADD_QUAD(v0,v4,v7,v3); - ADD_QUAD(v6,v5,v1,v2); - ADD_QUAD(v6,v2,v3,v7); - ADD_QUAD(v6,v7,v4,v5); - return (polygons ? 4 : 3); -} - -// Cleanup ... -#undef ADD_TRIANGLE -#undef ADD_QUAD -#undef ADD_PENTAGON - -// ------------------------------------------------------------------------------------------------ -// Create a subdivision sphere -void StandardShapes::MakeSphere(unsigned int tess, - std::vector<aiVector3D>& positions) -{ - // Reserve enough storage. Every subdivision - // splits each triangle in 4, the icosahedron consists of 60 verts - positions.reserve(positions.size()+60 * integer_pow(4, tess)); - - // Construct an icosahedron to start with - MakeIcosahedron(positions); - - // ... and subdivide it until the requested output - // tessellation is reached - for (unsigned int i = 0; i<tess;++i) - Subdivide(positions); -} - -// ------------------------------------------------------------------------------------------------ -// Build a cone -void StandardShapes::MakeCone(ai_real height,ai_real radius1, - ai_real radius2,unsigned int tess, - std::vector<aiVector3D>& positions,bool bOpen /*= false */) -{ - // Sorry, a cone with less than 3 segments makes ABSOLUTELY NO SENSE - if (tess < 3 || !height) - return; - - size_t old = positions.size(); - - // No negative radii - radius1 = std::fabs(radius1); - radius2 = std::fabs(radius2); - - ai_real halfHeight = height / ai_real(2.0); - - // radius1 is always the smaller one - if (radius2 > radius1) - { - std::swap(radius2,radius1); - halfHeight = -halfHeight; - } - else old = SIZE_MAX; - - // Use a large epsilon to check whether the cone is pointy - if (radius1 < (radius2-radius1)*10e-3)radius1 = 0.0; - - // We will need 3*2 verts per segment + 3*2 verts per segment - // if the cone is closed - const unsigned int mem = tess*6 + (!bOpen ? tess*3 * (radius1 ? 2 : 1) : 0); - positions.reserve(positions.size () + mem); - - // Now construct all segments - const ai_real angle_delta = (ai_real)AI_MATH_TWO_PI / tess; - const ai_real angle_max = (ai_real)AI_MATH_TWO_PI; - - ai_real s = 1.0; // std::cos(angle == 0); - ai_real t = 0.0; // std::sin(angle == 0); - - for (ai_real angle = 0.0; angle < angle_max; ) - { - const aiVector3D v1 = aiVector3D (s * radius1, -halfHeight, t * radius1 ); - const aiVector3D v2 = aiVector3D (s * radius2, halfHeight, t * radius2 ); - - const ai_real next = angle + angle_delta; - ai_real s2 = std::cos(next); - ai_real t2 = std::sin(next); - - const aiVector3D v3 = aiVector3D (s2 * radius2, halfHeight, t2 * radius2 ); - const aiVector3D v4 = aiVector3D (s2 * radius1, -halfHeight, t2 * radius1 ); - - positions.push_back(v1); - positions.push_back(v2); - positions.push_back(v3); - positions.push_back(v4); - positions.push_back(v1); - positions.push_back(v3); - - if (!bOpen) - { - // generate the end 'cap' - positions.push_back(aiVector3D(s * radius2, halfHeight, t * radius2 )); - positions.push_back(aiVector3D(s2 * radius2, halfHeight, t2 * radius2 )); - positions.push_back(aiVector3D(0.0, halfHeight, 0.0)); - - - if (radius1) - { - // generate the other end 'cap' - positions.push_back(aiVector3D(s * radius1, -halfHeight, t * radius1 )); - positions.push_back(aiVector3D(s2 * radius1, -halfHeight, t2 * radius1 )); - positions.push_back(aiVector3D(0.0, -halfHeight, 0.0)); - - } - } - s = s2; - t = t2; - angle = next; - } - - // Need to flip face order? - if ( SIZE_MAX != old ) { - for (size_t p = old; p < positions.size();p += 3) { - std::swap(positions[p],positions[p+1]); - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Build a circle -void StandardShapes::MakeCircle(ai_real radius, unsigned int tess, - std::vector<aiVector3D>& positions) -{ - // Sorry, a circle with less than 3 segments makes ABSOLUTELY NO SENSE - if (tess < 3 || !radius) - return; - - radius = std::fabs(radius); - - // We will need 3 vertices per segment - positions.reserve(positions.size()+tess*3); - - const ai_real angle_delta = (ai_real)AI_MATH_TWO_PI / tess; - const ai_real angle_max = (ai_real)AI_MATH_TWO_PI; - - ai_real s = 1.0; // std::cos(angle == 0); - ai_real t = 0.0; // std::sin(angle == 0); - - for (ai_real angle = 0.0; angle < angle_max; ) - { - positions.push_back(aiVector3D(s * radius,0.0,t * radius)); - angle += angle_delta; - s = std::cos(angle); - t = std::sin(angle); - positions.push_back(aiVector3D(s * radius,0.0,t * radius)); - - positions.push_back(aiVector3D(0.0,0.0,0.0)); - } -} - -} // ! Assimp diff --git a/thirdparty/assimp/code/Common/StdOStreamLogStream.h b/thirdparty/assimp/code/Common/StdOStreamLogStream.h deleted file mode 100644 index 893e261a2b..0000000000 --- a/thirdparty/assimp/code/Common/StdOStreamLogStream.h +++ /dev/null @@ -1,101 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file StdOStreamLogStream.h -* @brief Implementation of StdOStreamLogStream -*/ - -#ifndef AI_STROSTREAMLOGSTREAM_H_INC -#define AI_STROSTREAMLOGSTREAM_H_INC - -#include <assimp/LogStream.hpp> -#include <ostream> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** @class StdOStreamLogStream - * @brief Logs into a std::ostream - */ -class StdOStreamLogStream : public LogStream { -public: - /** @brief Construction from an existing std::ostream - * @param _ostream Output stream to be used - */ - explicit StdOStreamLogStream(std::ostream& _ostream); - - /** @brief Destructor */ - ~StdOStreamLogStream(); - - /** @brief Writer */ - void write(const char* message); - -private: - std::ostream& mOstream; -}; - -// --------------------------------------------------------------------------- -// Default constructor -inline StdOStreamLogStream::StdOStreamLogStream(std::ostream& _ostream) -: mOstream (_ostream){ - // empty -} - -// --------------------------------------------------------------------------- -// Default constructor -inline StdOStreamLogStream::~StdOStreamLogStream() { - // empty -} - -// --------------------------------------------------------------------------- -// Write method -inline void StdOStreamLogStream::write(const char* message) { - mOstream << message; - mOstream.flush(); -} - -// --------------------------------------------------------------------------- - -} // Namespace Assimp - -#endif // guard diff --git a/thirdparty/assimp/code/Common/Subdivision.cpp b/thirdparty/assimp/code/Common/Subdivision.cpp deleted file mode 100644 index 60c54939f5..0000000000 --- a/thirdparty/assimp/code/Common/Subdivision.cpp +++ /dev/null @@ -1,589 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#include <assimp/Subdivision.h> -#include <assimp/SceneCombiner.h> -#include <assimp/SpatialSort.h> -#include <assimp/Vertex.h> -#include <assimp/ai_assert.h> - -#include "PostProcessing/ProcessHelper.h" - -#include <stdio.h> - -using namespace Assimp; -void mydummy() {} - -// ------------------------------------------------------------------------------------------------ -/** Subdivider stub class to implement the Catmull-Clarke subdivision algorithm. The - * implementation is basing on recursive refinement. Directly evaluating the result is also - * possible and much quicker, but it depends on lengthy matrix lookup tables. */ -// ------------------------------------------------------------------------------------------------ -class CatmullClarkSubdivider : public Subdivider { -public: - void Subdivide (aiMesh* mesh, aiMesh*& out, unsigned int num, bool discard_input); - void Subdivide (aiMesh** smesh, size_t nmesh, - aiMesh** out, unsigned int num, bool discard_input); - - // --------------------------------------------------------------------------- - /** Intermediate description of an edge between two corners of a polygon*/ - // --------------------------------------------------------------------------- - struct Edge - { - Edge() - : ref(0) - {} - Vertex edge_point, midpoint; - unsigned int ref; - }; - - typedef std::vector<unsigned int> UIntVector; - typedef std::map<uint64_t,Edge> EdgeMap; - - // --------------------------------------------------------------------------- - // Hashing function to derive an index into an #EdgeMap from two given - // 'unsigned int' vertex coordinates (!!distinct coordinates - same - // vertex position == same index!!). - // NOTE - this leads to rare hash collisions if a) sizeof(unsigned int)>4 - // and (id[0]>2^32-1 or id[0]>2^32-1). - // MAKE_EDGE_HASH() uses temporaries, so INIT_EDGE_HASH() needs to be put - // at the head of every function which is about to use MAKE_EDGE_HASH(). - // Reason is that the hash is that hash construction needs to hold the - // invariant id0<id1 to identify an edge - else two hashes would refer - // to the same edge. - // --------------------------------------------------------------------------- -#define MAKE_EDGE_HASH(id0,id1) (eh_tmp0__=id0,eh_tmp1__=id1,\ - (eh_tmp0__<eh_tmp1__?std::swap(eh_tmp0__,eh_tmp1__):mydummy()),(uint64_t)eh_tmp0__^((uint64_t)eh_tmp1__<<32u)) - - -#define INIT_EDGE_HASH_TEMPORARIES()\ - unsigned int eh_tmp0__, eh_tmp1__; - -private: - void InternSubdivide (const aiMesh* const * smesh, - size_t nmesh,aiMesh** out, unsigned int num); -}; - - -// ------------------------------------------------------------------------------------------------ -// Construct a subdivider of a specific type -Subdivider* Subdivider::Create (Algorithm algo) -{ - switch (algo) - { - case CATMULL_CLARKE: - return new CatmullClarkSubdivider(); - }; - - ai_assert(false); - return NULL; // shouldn't happen -} - -// ------------------------------------------------------------------------------------------------ -// Call the Catmull Clark subdivision algorithm for one mesh -void CatmullClarkSubdivider::Subdivide ( - aiMesh* mesh, - aiMesh*& out, - unsigned int num, - bool discard_input - ) -{ - ai_assert(mesh != out); - - Subdivide(&mesh,1,&out,num,discard_input); -} - -// ------------------------------------------------------------------------------------------------ -// Call the Catmull Clark subdivision algorithm for multiple meshes -void CatmullClarkSubdivider::Subdivide ( - aiMesh** smesh, - size_t nmesh, - aiMesh** out, - unsigned int num, - bool discard_input - ) -{ - ai_assert( NULL != smesh ); - ai_assert( NULL != out ); - - // course, both regions may not overlap - ai_assert(smesh<out || smesh+nmesh>out+nmesh); - if (!num) { - // No subdivision at all. Need to copy all the meshes .. argh. - if (discard_input) { - for (size_t s = 0; s < nmesh; ++s) { - out[s] = smesh[s]; - smesh[s] = NULL; - } - } - else { - for (size_t s = 0; s < nmesh; ++s) { - SceneCombiner::Copy(out+s,smesh[s]); - } - } - return; - } - - std::vector<aiMesh*> inmeshes; - std::vector<aiMesh*> outmeshes; - std::vector<unsigned int> maptbl; - - inmeshes.reserve(nmesh); - outmeshes.reserve(nmesh); - maptbl.reserve(nmesh); - - // Remove pure line and point meshes from the working set to reduce the - // number of edge cases the subdivider is forced to deal with. Line and - // point meshes are simply passed through. - for (size_t s = 0; s < nmesh; ++s) { - aiMesh* i = smesh[s]; - // FIX - mPrimitiveTypes might not yet be initialized - if (i->mPrimitiveTypes && (i->mPrimitiveTypes & (aiPrimitiveType_LINE|aiPrimitiveType_POINT))==i->mPrimitiveTypes) { - ASSIMP_LOG_DEBUG("Catmull-Clark Subdivider: Skipping pure line/point mesh"); - - if (discard_input) { - out[s] = i; - smesh[s] = NULL; - } - else { - SceneCombiner::Copy(out+s,i); - } - continue; - } - - outmeshes.push_back(NULL);inmeshes.push_back(i); - maptbl.push_back(static_cast<unsigned int>(s)); - } - - // Do the actual subdivision on the preallocated storage. InternSubdivide - // *always* assumes that enough storage is available, it does not bother - // checking any ranges. - ai_assert(inmeshes.size()==outmeshes.size()&&inmeshes.size()==maptbl.size()); - if (inmeshes.empty()) { - ASSIMP_LOG_WARN("Catmull-Clark Subdivider: Pure point/line scene, I can't do anything"); - return; - } - InternSubdivide(&inmeshes.front(),inmeshes.size(),&outmeshes.front(),num); - for (unsigned int i = 0; i < maptbl.size(); ++i) { - ai_assert(nullptr != outmeshes[i]); - out[maptbl[i]] = outmeshes[i]; - } - - if (discard_input) { - for (size_t s = 0; s < nmesh; ++s) { - delete smesh[s]; - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Note - this is an implementation of the standard (recursive) Cm-Cl algorithm without further -// optimizations (except we're using some nice LUTs). A description of the algorithm can be found -// here: http://en.wikipedia.org/wiki/Catmull-Clark_subdivision_surface -// -// The code is mostly O(n), however parts are O(nlogn) which is therefore the algorithm's -// expected total runtime complexity. The implementation is able to work in-place on the same -// mesh arrays. Calling #InternSubdivide() directly is not encouraged. The code can operate -// in-place unless 'smesh' and 'out' are equal (no strange overlaps or reorderings). -// Previous data is replaced/deleted then. -// ------------------------------------------------------------------------------------------------ -void CatmullClarkSubdivider::InternSubdivide ( - const aiMesh* const * smesh, - size_t nmesh, - aiMesh** out, - unsigned int num - ) -{ - ai_assert(NULL != smesh && NULL != out); - INIT_EDGE_HASH_TEMPORARIES(); - - // no subdivision requested or end of recursive refinement - if (!num) { - return; - } - - UIntVector maptbl; - SpatialSort spatial; - - // --------------------------------------------------------------------- - // 0. Offset table to index all meshes continuously, generate a spatially - // sorted representation of all vertices in all meshes. - // --------------------------------------------------------------------- - typedef std::pair<unsigned int,unsigned int> IntPair; - std::vector<IntPair> moffsets(nmesh); - unsigned int totfaces = 0, totvert = 0; - for (size_t t = 0; t < nmesh; ++t) { - const aiMesh* mesh = smesh[t]; - - spatial.Append(mesh->mVertices,mesh->mNumVertices,sizeof(aiVector3D),false); - moffsets[t] = IntPair(totfaces,totvert); - - totfaces += mesh->mNumFaces; - totvert += mesh->mNumVertices; - } - - spatial.Finalize(); - const unsigned int num_unique = spatial.GenerateMappingTable(maptbl,ComputePositionEpsilon(smesh,nmesh)); - - -#define FLATTEN_VERTEX_IDX(mesh_idx, vert_idx) (moffsets[mesh_idx].second+vert_idx) -#define FLATTEN_FACE_IDX(mesh_idx, face_idx) (moffsets[mesh_idx].first+face_idx) - - // --------------------------------------------------------------------- - // 1. Compute the centroid point for all faces - // --------------------------------------------------------------------- - std::vector<Vertex> centroids(totfaces); - unsigned int nfacesout = 0; - for (size_t t = 0, n = 0; t < nmesh; ++t) { - const aiMesh* mesh = smesh[t]; - for (unsigned int i = 0; i < mesh->mNumFaces;++i,++n) - { - const aiFace& face = mesh->mFaces[i]; - Vertex& c = centroids[n]; - - for (unsigned int a = 0; a < face.mNumIndices;++a) { - c += Vertex(mesh,face.mIndices[a]); - } - - c /= static_cast<float>(face.mNumIndices); - nfacesout += face.mNumIndices; - } - } - - { - // we want edges to go away before the recursive calls so begin a new scope - EdgeMap edges; - - // --------------------------------------------------------------------- - // 2. Set each edge point to be the average of all neighbouring - // face points and original points. Every edge exists twice - // if there is a neighboring face. - // --------------------------------------------------------------------- - for (size_t t = 0; t < nmesh; ++t) { - const aiMesh* mesh = smesh[t]; - - for (unsigned int i = 0; i < mesh->mNumFaces;++i) { - const aiFace& face = mesh->mFaces[i]; - - for (unsigned int p =0; p< face.mNumIndices; ++p) { - const unsigned int id[] = { - face.mIndices[p], - face.mIndices[p==face.mNumIndices-1?0:p+1] - }; - const unsigned int mp[] = { - maptbl[FLATTEN_VERTEX_IDX(t,id[0])], - maptbl[FLATTEN_VERTEX_IDX(t,id[1])] - }; - - Edge& e = edges[MAKE_EDGE_HASH(mp[0],mp[1])]; - e.ref++; - if (e.ref<=2) { - if (e.ref==1) { // original points (end points) - add only once - e.edge_point = e.midpoint = Vertex(mesh,id[0])+Vertex(mesh,id[1]); - e.midpoint *= 0.5f; - } - e.edge_point += centroids[FLATTEN_FACE_IDX(t,i)]; - } - } - } - } - - // --------------------------------------------------------------------- - // 3. Normalize edge points - // --------------------------------------------------------------------- - {unsigned int bad_cnt = 0; - for (EdgeMap::iterator it = edges.begin(); it != edges.end(); ++it) { - if ((*it).second.ref < 2) { - ai_assert((*it).second.ref); - ++bad_cnt; - } - (*it).second.edge_point *= 1.f/((*it).second.ref+2.f); - } - - if (bad_cnt) { - // Report the number of bad edges. bad edges are referenced by less than two - // faces in the mesh. They occur at outer model boundaries in non-closed - // shapes. - ASSIMP_LOG_DEBUG_F("Catmull-Clark Subdivider: got ", bad_cnt, " bad edges touching only one face (totally ", - static_cast<unsigned int>(edges.size()), " edges). "); - }} - - // --------------------------------------------------------------------- - // 4. Compute a vertex-face adjacency table. We can't reuse the code - // from VertexTriangleAdjacency because we need the table for multiple - // meshes and out vertex indices need to be mapped to distinct values - // first. - // --------------------------------------------------------------------- - UIntVector faceadjac(nfacesout), cntadjfac(maptbl.size(),0), ofsadjvec(maptbl.size()+1,0); { - for (size_t t = 0; t < nmesh; ++t) { - const aiMesh* const minp = smesh[t]; - for (unsigned int i = 0; i < minp->mNumFaces; ++i) { - - const aiFace& f = minp->mFaces[i]; - for (unsigned int n = 0; n < f.mNumIndices; ++n) { - ++cntadjfac[maptbl[FLATTEN_VERTEX_IDX(t,f.mIndices[n])]]; - } - } - } - unsigned int cur = 0; - for (size_t i = 0; i < cntadjfac.size(); ++i) { - ofsadjvec[i+1] = cur; - cur += cntadjfac[i]; - } - for (size_t t = 0; t < nmesh; ++t) { - const aiMesh* const minp = smesh[t]; - for (unsigned int i = 0; i < minp->mNumFaces; ++i) { - - const aiFace& f = minp->mFaces[i]; - for (unsigned int n = 0; n < f.mNumIndices; ++n) { - faceadjac[ofsadjvec[1+maptbl[FLATTEN_VERTEX_IDX(t,f.mIndices[n])]]++] = FLATTEN_FACE_IDX(t,i); - } - } - } - - // check the other way round for consistency -#ifdef ASSIMP_BUILD_DEBUG - - for (size_t t = 0; t < ofsadjvec.size()-1; ++t) { - for (unsigned int m = 0; m < cntadjfac[t]; ++m) { - const unsigned int fidx = faceadjac[ofsadjvec[t]+m]; - ai_assert(fidx < totfaces); - for (size_t n = 1; n < nmesh; ++n) { - - if (moffsets[n].first > fidx) { - const aiMesh* msh = smesh[--n]; - const aiFace& f = msh->mFaces[fidx-moffsets[n].first]; - - bool haveit = false; - for (unsigned int i = 0; i < f.mNumIndices; ++i) { - if (maptbl[FLATTEN_VERTEX_IDX(n,f.mIndices[i])]==(unsigned int)t) { - haveit = true; - break; - } - } - ai_assert(haveit); - if (!haveit) { - ASSIMP_LOG_DEBUG("Catmull-Clark Subdivider: Index not used"); - } - break; - } - } - } - } - -#endif - } - -#define GET_ADJACENT_FACES_AND_CNT(vidx,fstartout,numout) \ - fstartout = &faceadjac[ofsadjvec[vidx]], numout = cntadjfac[vidx] - - typedef std::pair<bool,Vertex> TouchedOVertex; - std::vector<TouchedOVertex > new_points(num_unique,TouchedOVertex(false,Vertex())); - // --------------------------------------------------------------------- - // 5. Spawn a quad from each face point to the corresponding edge points - // the original points being the fourth quad points. - // --------------------------------------------------------------------- - for (size_t t = 0; t < nmesh; ++t) { - const aiMesh* const minp = smesh[t]; - aiMesh* const mout = out[t] = new aiMesh(); - - for (unsigned int a = 0; a < minp->mNumFaces; ++a) { - mout->mNumFaces += minp->mFaces[a].mNumIndices; - } - - // We need random access to the old face buffer, so reuse is not possible. - mout->mFaces = new aiFace[mout->mNumFaces]; - - mout->mNumVertices = mout->mNumFaces*4; - mout->mVertices = new aiVector3D[mout->mNumVertices]; - - // quads only, keep material index - mout->mPrimitiveTypes = aiPrimitiveType_POLYGON; - mout->mMaterialIndex = minp->mMaterialIndex; - - if (minp->HasNormals()) { - mout->mNormals = new aiVector3D[mout->mNumVertices]; - } - - if (minp->HasTangentsAndBitangents()) { - mout->mTangents = new aiVector3D[mout->mNumVertices]; - mout->mBitangents = new aiVector3D[mout->mNumVertices]; - } - - for(unsigned int i = 0; minp->HasTextureCoords(i); ++i) { - mout->mTextureCoords[i] = new aiVector3D[mout->mNumVertices]; - mout->mNumUVComponents[i] = minp->mNumUVComponents[i]; - } - - for(unsigned int i = 0; minp->HasVertexColors(i); ++i) { - mout->mColors[i] = new aiColor4D[mout->mNumVertices]; - } - - mout->mNumVertices = mout->mNumFaces<<2u; - for (unsigned int i = 0, v = 0, n = 0; i < minp->mNumFaces;++i) { - - const aiFace& face = minp->mFaces[i]; - for (unsigned int a = 0; a < face.mNumIndices;++a) { - - // Get a clean new face. - aiFace& faceOut = mout->mFaces[n++]; - faceOut.mIndices = new unsigned int [faceOut.mNumIndices = 4]; - - // Spawn a new quadrilateral (ccw winding) for this original point between: - // a) face centroid - centroids[FLATTEN_FACE_IDX(t,i)].SortBack(mout,faceOut.mIndices[0]=v++); - - // b) adjacent edge on the left, seen from the centroid - const Edge& e0 = edges[MAKE_EDGE_HASH(maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])], - maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a==face.mNumIndices-1?0:a+1]) - ])]; // fixme: replace with mod face.mNumIndices? - - // c) adjacent edge on the right, seen from the centroid - const Edge& e1 = edges[MAKE_EDGE_HASH(maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])], - maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[!a?face.mNumIndices-1:a-1]) - ])]; // fixme: replace with mod face.mNumIndices? - - e0.edge_point.SortBack(mout,faceOut.mIndices[3]=v++); - e1.edge_point.SortBack(mout,faceOut.mIndices[1]=v++); - - // d= original point P with distinct index i - // F := 0 - // R := 0 - // n := 0 - // for each face f containing i - // F := F+ centroid of f - // R := R+ midpoint of edge of f from i to i+1 - // n := n+1 - // - // (F+2R+(n-3)P)/n - const unsigned int org = maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])]; - TouchedOVertex& ov = new_points[org]; - - if (!ov.first) { - ov.first = true; - - const unsigned int* adj; unsigned int cnt; - GET_ADJACENT_FACES_AND_CNT(org,adj,cnt); - - if (cnt < 3) { - ov.second = Vertex(minp,face.mIndices[a]); - } - else { - - Vertex F,R; - for (unsigned int o = 0; o < cnt; ++o) { - ai_assert(adj[o] < totfaces); - F += centroids[adj[o]]; - - // adj[0] is a global face index - search the face in the mesh list - const aiMesh* mp = NULL; - size_t nidx; - - if (adj[o] < moffsets[0].first) { - mp = smesh[nidx=0]; - } - else { - for (nidx = 1; nidx<= nmesh; ++nidx) { - if (nidx == nmesh ||moffsets[nidx].first > adj[o]) { - mp = smesh[--nidx]; - break; - } - } - } - - ai_assert(adj[o]-moffsets[nidx].first < mp->mNumFaces); - const aiFace& f = mp->mFaces[adj[o]-moffsets[nidx].first]; - bool haveit = false; - - // find our original point in the face - for (unsigned int m = 0; m < f.mNumIndices; ++m) { - if (maptbl[FLATTEN_VERTEX_IDX(nidx,f.mIndices[m])] == org) { - - // add *both* edges. this way, we can be sure that we add - // *all* adjacent edges to R. In a closed shape, every - // edge is added twice - so we simply leave out the - // factor 2.f in the amove formula and get the right - // result. - - const Edge& c0 = edges[MAKE_EDGE_HASH(org,maptbl[FLATTEN_VERTEX_IDX( - nidx,f.mIndices[!m?f.mNumIndices-1:m-1])])]; - // fixme: replace with mod face.mNumIndices? - - const Edge& c1 = edges[MAKE_EDGE_HASH(org,maptbl[FLATTEN_VERTEX_IDX( - nidx,f.mIndices[m==f.mNumIndices-1?0:m+1])])]; - // fixme: replace with mod face.mNumIndices? - R += c0.midpoint+c1.midpoint; - - haveit = true; - break; - } - } - - // this invariant *must* hold if the vertex-to-face adjacency table is valid - ai_assert(haveit); - if ( !haveit ) { - ASSIMP_LOG_WARN( "OBJ: no name for material library specified." ); - } - } - - const float div = static_cast<float>(cnt), divsq = 1.f/(div*div); - ov.second = Vertex(minp,face.mIndices[a])*((div-3.f) / div) + R*divsq + F*divsq; - } - } - ov.second.SortBack(mout,faceOut.mIndices[2]=v++); - } - } - } - } // end of scope for edges, freeing its memory - - // --------------------------------------------------------------------- - // 7. Apply the next subdivision step. - // --------------------------------------------------------------------- - if (num != 1) { - std::vector<aiMesh*> tmp(nmesh); - InternSubdivide (out,nmesh,&tmp.front(),num-1); - for (size_t i = 0; i < nmesh; ++i) { - delete out[i]; - out[i] = tmp[i]; - } - } -} diff --git a/thirdparty/assimp/code/Common/TargetAnimation.cpp b/thirdparty/assimp/code/Common/TargetAnimation.cpp deleted file mode 100644 index b8062499ff..0000000000 --- a/thirdparty/assimp/code/Common/TargetAnimation.cpp +++ /dev/null @@ -1,248 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#include "TargetAnimation.h" -#include <algorithm> -#include <assimp/ai_assert.h> - -using namespace Assimp; - - -// ------------------------------------------------------------------------------------------------ -KeyIterator::KeyIterator(const std::vector<aiVectorKey>* _objPos, - const std::vector<aiVectorKey>* _targetObjPos, - const aiVector3D* defaultObjectPos /*= NULL*/, - const aiVector3D* defaultTargetPos /*= NULL*/) - - : reachedEnd (false) - , curTime (-1.) - , objPos (_objPos) - , targetObjPos (_targetObjPos) - , nextObjPos (0) - , nextTargetObjPos(0) -{ - // Generate default transformation tracks if necessary - if (!objPos || objPos->empty()) - { - defaultObjPos.resize(1); - defaultObjPos.front().mTime = 10e10; - - if (defaultObjectPos) - defaultObjPos.front().mValue = *defaultObjectPos; - - objPos = & defaultObjPos; - } - if (!targetObjPos || targetObjPos->empty()) - { - defaultTargetObjPos.resize(1); - defaultTargetObjPos.front().mTime = 10e10; - - if (defaultTargetPos) - defaultTargetObjPos.front().mValue = *defaultTargetPos; - - targetObjPos = & defaultTargetObjPos; - } -} - -// ------------------------------------------------------------------------------------------------ -template <class T> -inline T Interpolate(const T& one, const T& two, ai_real val) -{ - return one + (two-one)*val; -} - -// ------------------------------------------------------------------------------------------------ -void KeyIterator::operator ++() -{ - // If we are already at the end of all keyframes, return - if (reachedEnd) { - return; - } - - // Now search in all arrays for the time value closest - // to our current position on the time line - double d0,d1; - - d0 = objPos->at ( std::min ( nextObjPos, static_cast<unsigned int>(objPos->size()-1)) ).mTime; - d1 = targetObjPos->at( std::min ( nextTargetObjPos, static_cast<unsigned int>(targetObjPos->size()-1)) ).mTime; - - // Easiest case - all are identical. In this - // case we don't need to interpolate so we can - // return earlier - if ( d0 == d1 ) - { - curTime = d0; - curPosition = objPos->at(nextObjPos).mValue; - curTargetPosition = targetObjPos->at(nextTargetObjPos).mValue; - - // increment counters - if (objPos->size() != nextObjPos-1) - ++nextObjPos; - - if (targetObjPos->size() != nextTargetObjPos-1) - ++nextTargetObjPos; - } - - // An object position key is closest to us - else if (d0 < d1) - { - curTime = d0; - - // interpolate the other - if (1 == targetObjPos->size() || !nextTargetObjPos) { - curTargetPosition = targetObjPos->at(0).mValue; - } - else - { - const aiVectorKey& last = targetObjPos->at(nextTargetObjPos); - const aiVectorKey& first = targetObjPos->at(nextTargetObjPos-1); - - curTargetPosition = Interpolate(first.mValue, last.mValue, (ai_real) ( - (curTime-first.mTime) / (last.mTime-first.mTime) )); - } - - if (objPos->size() != nextObjPos-1) - ++nextObjPos; - } - // A target position key is closest to us - else - { - curTime = d1; - - // interpolate the other - if (1 == objPos->size() || !nextObjPos) { - curPosition = objPos->at(0).mValue; - } - else - { - const aiVectorKey& last = objPos->at(nextObjPos); - const aiVectorKey& first = objPos->at(nextObjPos-1); - - curPosition = Interpolate(first.mValue, last.mValue, (ai_real) ( - (curTime-first.mTime) / (last.mTime-first.mTime))); - } - - if (targetObjPos->size() != nextTargetObjPos-1) - ++nextTargetObjPos; - } - - if (nextObjPos >= objPos->size()-1 && - nextTargetObjPos >= targetObjPos->size()-1) - { - // We reached the very last keyframe - reachedEnd = true; - } -} - -// ------------------------------------------------------------------------------------------------ -void TargetAnimationHelper::SetTargetAnimationChannel ( - const std::vector<aiVectorKey>* _targetPositions) -{ - ai_assert(NULL != _targetPositions); - targetPositions = _targetPositions; -} - -// ------------------------------------------------------------------------------------------------ -void TargetAnimationHelper::SetMainAnimationChannel ( - const std::vector<aiVectorKey>* _objectPositions) -{ - ai_assert(NULL != _objectPositions); - objectPositions = _objectPositions; -} - -// ------------------------------------------------------------------------------------------------ -void TargetAnimationHelper::SetFixedMainAnimationChannel( - const aiVector3D& fixed) -{ - objectPositions = NULL; // just to avoid confusion - fixedMain = fixed; -} - -// ------------------------------------------------------------------------------------------------ -void TargetAnimationHelper::Process(std::vector<aiVectorKey>* distanceTrack) -{ - ai_assert(NULL != targetPositions && NULL != distanceTrack); - - // TODO: in most cases we won't need the extra array - std::vector<aiVectorKey> real; - - std::vector<aiVectorKey>* fill = (distanceTrack == objectPositions ? &real : distanceTrack); - fill->reserve(std::max( objectPositions->size(), targetPositions->size() )); - - // Iterate through all object keys and interpolate their values if necessary. - // Then get the corresponding target position, compute the difference - // vector between object and target position. Then compute a rotation matrix - // that rotates the base vector of the object coordinate system at that time - // to match the diff vector. - - KeyIterator iter(objectPositions,targetPositions,&fixedMain); - for (;!iter.Finished();++iter) - { - const aiVector3D& position = iter.GetCurPosition(); - const aiVector3D& tposition = iter.GetCurTargetPosition(); - - // diff vector - aiVector3D diff = tposition - position; - ai_real f = diff.Length(); - - // output distance vector - if (f) - { - fill->push_back(aiVectorKey()); - aiVectorKey& v = fill->back(); - v.mTime = iter.GetCurTime(); - v.mValue = diff; - - diff /= f; - } - else - { - // FIXME: handle this - } - - // diff is now the vector in which our camera is pointing - } - - if (real.size()) { - *distanceTrack = real; - } -} diff --git a/thirdparty/assimp/code/Common/TargetAnimation.h b/thirdparty/assimp/code/Common/TargetAnimation.h deleted file mode 100644 index 91634ab5aa..0000000000 --- a/thirdparty/assimp/code/Common/TargetAnimation.h +++ /dev/null @@ -1,183 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a helper class for the ASE and 3DS loaders to - help them compute camera and spot light animation channels */ -#ifndef AI_TARGET_ANIMATION_H_INC -#define AI_TARGET_ANIMATION_H_INC - -#include <assimp/anim.h> -#include <vector> - -namespace Assimp { - - - -// --------------------------------------------------------------------------- -/** Helper class to iterate through all keys in an animation channel. - * - * Missing tracks are interpolated. This is a helper class for - * TargetAnimationHelper, but it can be freely used for other purposes. -*/ -class KeyIterator -{ -public: - - - // ------------------------------------------------------------------ - /** Constructs a new key iterator - * - * @param _objPos Object position track. May be NULL. - * @param _targetObjPos Target object position track. May be NULL. - * @param defaultObjectPos Default object position to be used if - * no animated track is available. May be NULL. - * @param defaultTargetPos Default target position to be used if - * no animated track is available. May be NULL. - */ - KeyIterator(const std::vector<aiVectorKey>* _objPos, - const std::vector<aiVectorKey>* _targetObjPos, - const aiVector3D* defaultObjectPos = NULL, - const aiVector3D* defaultTargetPos = NULL); - - // ------------------------------------------------------------------ - /** Returns true if all keys have been processed - */ - bool Finished() const - {return reachedEnd;} - - // ------------------------------------------------------------------ - /** Increment the iterator - */ - void operator++(); - inline void operator++(int) - {return ++(*this);} - - - - // ------------------------------------------------------------------ - /** Getters to retrieve the current state of the iterator - */ - inline const aiVector3D& GetCurPosition() const - {return curPosition;} - - inline const aiVector3D& GetCurTargetPosition() const - {return curTargetPosition;} - - inline double GetCurTime() const - {return curTime;} - -private: - - //! Did we reach the end? - bool reachedEnd; - - //! Represents the current position of the iterator - aiVector3D curPosition, curTargetPosition; - - double curTime; - - //! Input tracks and the next key to process - const std::vector<aiVectorKey>* objPos,*targetObjPos; - - unsigned int nextObjPos, nextTargetObjPos; - std::vector<aiVectorKey> defaultObjPos,defaultTargetObjPos; -}; - -// --------------------------------------------------------------------------- -/** Helper class for the 3DS and ASE loaders to compute camera and spot light - * animations. - * - * 3DS and ASE store the differently to Assimp - there is an animation - * channel for the camera/spot light itself and a separate position - * animation channels specifying the position of the camera/spot light - * look-at target */ -class TargetAnimationHelper -{ -public: - - TargetAnimationHelper() - : targetPositions (NULL) - , objectPositions (NULL) - {} - - - // ------------------------------------------------------------------ - /** Sets the target animation channel - * - * This channel specifies the position of the camera/spot light - * target at a specific position. - * - * @param targetPositions Translation channel*/ - void SetTargetAnimationChannel (const - std::vector<aiVectorKey>* targetPositions); - - - // ------------------------------------------------------------------ - /** Sets the main animation channel - * - * @param objectPositions Translation channel */ - void SetMainAnimationChannel ( const - std::vector<aiVectorKey>* objectPositions); - - // ------------------------------------------------------------------ - /** Sets the main animation channel to a fixed value - * - * @param fixed Fixed value for the main animation channel*/ - void SetFixedMainAnimationChannel(const aiVector3D& fixed); - - - // ------------------------------------------------------------------ - /** Computes final animation channels - * @param distanceTrack Receive camera translation keys ... != NULL. */ - void Process( std::vector<aiVectorKey>* distanceTrack ); - - -private: - - const std::vector<aiVectorKey>* targetPositions,*objectPositions; - aiVector3D fixedMain; -}; - - -} // ! end namespace Assimp - -#endif // include guard diff --git a/thirdparty/assimp/code/Common/Version.cpp b/thirdparty/assimp/code/Common/Version.cpp deleted file mode 100644 index cf1da7d5ba..0000000000 --- a/thirdparty/assimp/code/Common/Version.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -// Actually just a dummy, used by the compiler to build the precompiled header. - -#include <assimp/version.h> -#include <assimp/scene.h> -#include "ScenePrivate.h" - -#include "revision.h" - -// -------------------------------------------------------------------------------- -// Legal information string - don't remove this. -static const char* LEGAL_INFORMATION = - -"Open Asset Import Library (Assimp).\n" -"A free C/C++ library to import various 3D file formats into applications\n\n" - -"(c) 2006-2019, assimp team\n" -"License under the terms and conditions of the 3-clause BSD license\n" -"http://assimp.org\n" -; - -// ------------------------------------------------------------------------------------------------ -// Get legal string -ASSIMP_API const char* aiGetLegalString () { - return LEGAL_INFORMATION; -} - -// ------------------------------------------------------------------------------------------------ -// Get Assimp minor version -ASSIMP_API unsigned int aiGetVersionMinor () { - return VER_MINOR; -} - -// ------------------------------------------------------------------------------------------------ -// Get Assimp major version -ASSIMP_API unsigned int aiGetVersionMajor () { - return VER_MAJOR; -} - -// ------------------------------------------------------------------------------------------------ -// Get flags used for compilation -ASSIMP_API unsigned int aiGetCompileFlags () { - - unsigned int flags = 0; - -#ifdef ASSIMP_BUILD_BOOST_WORKAROUND - flags |= ASSIMP_CFLAGS_NOBOOST; -#endif -#ifdef ASSIMP_BUILD_SINGLETHREADED - flags |= ASSIMP_CFLAGS_SINGLETHREADED; -#endif -#ifdef ASSIMP_BUILD_DEBUG - flags |= ASSIMP_CFLAGS_DEBUG; -#endif -#ifdef ASSIMP_BUILD_DLL_EXPORT - flags |= ASSIMP_CFLAGS_SHARED; -#endif -#ifdef _STLPORT_VERSION - flags |= ASSIMP_CFLAGS_STLPORT; -#endif - - return flags; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API unsigned int aiGetVersionRevision() { - return GitVersion; -} - -ASSIMP_API const char *aiGetBranchName() { - return GitBranch; -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiScene::aiScene() -: mFlags(0) -, mRootNode(nullptr) -, mNumMeshes(0) -, mMeshes(nullptr) -, mNumMaterials(0) -, mMaterials(nullptr) -, mNumAnimations(0) -, mAnimations(nullptr) -, mNumTextures(0) -, mTextures(nullptr) -, mNumLights(0) -, mLights(nullptr) -, mNumCameras(0) -, mCameras(nullptr) -, mMetaData(nullptr) -, mPrivate(new Assimp::ScenePrivateData()) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -ASSIMP_API aiScene::~aiScene() { - // delete all sub-objects recursively - delete mRootNode; - - // To make sure we won't crash if the data is invalid it's - // much better to check whether both mNumXXX and mXXX are - // valid instead of relying on just one of them. - if (mNumMeshes && mMeshes) - for( unsigned int a = 0; a < mNumMeshes; a++) - delete mMeshes[a]; - delete [] mMeshes; - - if (mNumMaterials && mMaterials) { - for (unsigned int a = 0; a < mNumMaterials; ++a ) { - delete mMaterials[ a ]; - } - } - delete [] mMaterials; - - if (mNumAnimations && mAnimations) - for( unsigned int a = 0; a < mNumAnimations; a++) - delete mAnimations[a]; - delete [] mAnimations; - - if (mNumTextures && mTextures) - for( unsigned int a = 0; a < mNumTextures; a++) - delete mTextures[a]; - delete [] mTextures; - - if (mNumLights && mLights) - for( unsigned int a = 0; a < mNumLights; a++) - delete mLights[a]; - delete [] mLights; - - if (mNumCameras && mCameras) - for( unsigned int a = 0; a < mNumCameras; a++) - delete mCameras[a]; - delete [] mCameras; - - aiMetadata::Dealloc(mMetaData); - mMetaData = nullptr; - - delete static_cast<Assimp::ScenePrivateData*>( mPrivate ); -} - diff --git a/thirdparty/assimp/code/Common/VertexTriangleAdjacency.cpp b/thirdparty/assimp/code/Common/VertexTriangleAdjacency.cpp deleted file mode 100644 index 7cfd1a3505..0000000000 --- a/thirdparty/assimp/code/Common/VertexTriangleAdjacency.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the VertexTriangleAdjacency helper class - */ - -// internal headers -#include "VertexTriangleAdjacency.h" -#include <assimp/mesh.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -VertexTriangleAdjacency::VertexTriangleAdjacency(aiFace *pcFaces, - unsigned int iNumFaces, - unsigned int iNumVertices /*= 0*/, - bool bComputeNumTriangles /*= false*/) -{ - // compute the number of referenced vertices if it wasn't specified by the caller - const aiFace* const pcFaceEnd = pcFaces + iNumFaces; - if (!iNumVertices) { - for (aiFace* pcFace = pcFaces; pcFace != pcFaceEnd; ++pcFace) { - ai_assert( nullptr != pcFace ); - ai_assert(3 == pcFace->mNumIndices); - iNumVertices = std::max(iNumVertices,pcFace->mIndices[0]); - iNumVertices = std::max(iNumVertices,pcFace->mIndices[1]); - iNumVertices = std::max(iNumVertices,pcFace->mIndices[2]); - } - } - - mNumVertices = iNumVertices; - - unsigned int* pi; - - // allocate storage - if (bComputeNumTriangles) { - pi = mLiveTriangles = new unsigned int[iNumVertices+1]; - ::memset(mLiveTriangles,0,sizeof(unsigned int)*(iNumVertices+1)); - mOffsetTable = new unsigned int[iNumVertices+2]+1; - } else { - pi = mOffsetTable = new unsigned int[iNumVertices+2]+1; - ::memset(mOffsetTable,0,sizeof(unsigned int)*(iNumVertices+1)); - mLiveTriangles = NULL; // important, otherwise the d'tor would crash - } - - // get a pointer to the end of the buffer - unsigned int* piEnd = pi+iNumVertices; - *piEnd++ = 0u; - - // first pass: compute the number of faces referencing each vertex - for (aiFace* pcFace = pcFaces; pcFace != pcFaceEnd; ++pcFace) - { - unsigned nind = pcFace->mNumIndices; - unsigned * ind = pcFace->mIndices; - if (nind > 0) pi[ind[0]]++; - if (nind > 1) pi[ind[1]]++; - if (nind > 2) pi[ind[2]]++; - } - - // second pass: compute the final offset table - unsigned int iSum = 0; - unsigned int* piCurOut = this->mOffsetTable; - for (unsigned int* piCur = pi; piCur != piEnd;++piCur,++piCurOut) { - - unsigned int iLastSum = iSum; - iSum += *piCur; - *piCurOut = iLastSum; - } - pi = this->mOffsetTable; - - // third pass: compute the final table - this->mAdjacencyTable = new unsigned int[iSum]; - iSum = 0; - for (aiFace* pcFace = pcFaces; pcFace != pcFaceEnd; ++pcFace,++iSum) { - unsigned nind = pcFace->mNumIndices; - unsigned * ind = pcFace->mIndices; - - if (nind > 0) mAdjacencyTable[pi[ind[0]]++] = iSum; - if (nind > 1) mAdjacencyTable[pi[ind[1]]++] = iSum; - if (nind > 2) mAdjacencyTable[pi[ind[2]]++] = iSum; - } - // fourth pass: undo the offset computations made during the third pass - // We could do this in a separate buffer, but this would be TIMES slower. - --mOffsetTable; - *mOffsetTable = 0u; -} -// ------------------------------------------------------------------------------------------------ -VertexTriangleAdjacency::~VertexTriangleAdjacency() -{ - // delete allocated storage - delete[] mOffsetTable; - delete[] mAdjacencyTable; - delete[] mLiveTriangles; -} diff --git a/thirdparty/assimp/code/Common/VertexTriangleAdjacency.h b/thirdparty/assimp/code/Common/VertexTriangleAdjacency.h deleted file mode 100644 index f3be47612d..0000000000 --- a/thirdparty/assimp/code/Common/VertexTriangleAdjacency.h +++ /dev/null @@ -1,117 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a helper class to compute a vertex-triangle adjacency map */ -#ifndef AI_VTADJACENCY_H_INC -#define AI_VTADJACENCY_H_INC - -#include "BaseProcess.h" -#include <assimp/types.h> -#include <assimp/ai_assert.h> - -struct aiMesh; -struct aiFace; - -namespace Assimp { - -// -------------------------------------------------------------------------------------------- -/** @brief The VertexTriangleAdjacency class computes a vertex-triangle - * adjacency map from a given index buffer. - * - * @note Although it is called #VertexTriangleAdjacency, the current version does also - * support arbitrary polygons. */ -// -------------------------------------------------------------------------------------------- -class ASSIMP_API VertexTriangleAdjacency { -public: - // ---------------------------------------------------------------------------- - /** @brief Construction from an existing index buffer - * @param pcFaces Index buffer - * @param iNumFaces Number of faces in the buffer - * @param iNumVertices Number of referenced vertices. This value - * is computed automatically if 0 is specified. - * @param bComputeNumTriangles If you want the class to compute - * a list containing the number of referenced triangles per vertex - * per vertex - pass true. */ - VertexTriangleAdjacency(aiFace* pcFaces,unsigned int iNumFaces, - unsigned int iNumVertices = 0, - bool bComputeNumTriangles = true); - - // ---------------------------------------------------------------------------- - /** @brief Destructor */ - ~VertexTriangleAdjacency(); - - // ---------------------------------------------------------------------------- - /** @brief Get all triangles adjacent to a vertex - * @param iVertIndex Index of the vertex - * @return A pointer to the adjacency list. */ - unsigned int* GetAdjacentTriangles(unsigned int iVertIndex) const { - ai_assert(iVertIndex < mNumVertices); - return &mAdjacencyTable[ mOffsetTable[iVertIndex]]; - } - - // ---------------------------------------------------------------------------- - /** @brief Get the number of triangles that are referenced by - * a vertex. This function returns a reference that can be modified - * @param iVertIndex Index of the vertex - * @return Number of referenced triangles */ - unsigned int& GetNumTrianglesPtr(unsigned int iVertIndex) { - ai_assert( iVertIndex < mNumVertices ); - ai_assert( nullptr != mLiveTriangles ); - return mLiveTriangles[iVertIndex]; - } - - //! Offset table - unsigned int* mOffsetTable; - - //! Adjacency table - unsigned int* mAdjacencyTable; - - //! Table containing the number of referenced triangles per vertex - unsigned int* mLiveTriangles; - - //! Debug: Number of referenced vertices - unsigned int mNumVertices; -}; - -} //! ns Assimp - -#endif // !! AI_VTADJACENCY_H_INC diff --git a/thirdparty/assimp/code/Common/Win32DebugLogStream.h b/thirdparty/assimp/code/Common/Win32DebugLogStream.h deleted file mode 100644 index a6063a261e..0000000000 --- a/thirdparty/assimp/code/Common/Win32DebugLogStream.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Win32DebugLogStream.h -* @brief Implementation of Win32DebugLogStream -*/ -#ifndef AI_WIN32DEBUGLOGSTREAM_H_INC -#define AI_WIN32DEBUGLOGSTREAM_H_INC - -#ifdef _WIN32 - -#include <assimp/LogStream.hpp> -#include "windows.h" - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** @class Win32DebugLogStream - * @brief Logs into the debug stream from win32. - */ -class Win32DebugLogStream : public LogStream { -public: - /** @brief Default constructor */ - Win32DebugLogStream(); - - /** @brief Destructor */ - ~Win32DebugLogStream(); - - /** @brief Writer */ - void write(const char* messgae); -}; - -// --------------------------------------------------------------------------- -inline -Win32DebugLogStream::Win32DebugLogStream(){ - // empty -} - -// --------------------------------------------------------------------------- -inline -Win32DebugLogStream::~Win32DebugLogStream(){ - // empty -} - -// --------------------------------------------------------------------------- -inline -void Win32DebugLogStream::write(const char* message) { - ::OutputDebugStringA( message); -} - -// --------------------------------------------------------------------------- -} // Namespace Assimp - -#endif // ! _WIN32 -#endif // guard diff --git a/thirdparty/assimp/code/Common/assbin_chunks.h b/thirdparty/assimp/code/Common/assbin_chunks.h deleted file mode 100644 index 15e4af5e7d..0000000000 --- a/thirdparty/assimp/code/Common/assbin_chunks.h +++ /dev/null @@ -1,196 +0,0 @@ -#ifndef INCLUDED_ASSBIN_CHUNKS_H -#define INCLUDED_ASSBIN_CHUNKS_H - -#define ASSBIN_VERSION_MAJOR 1 -#define ASSBIN_VERSION_MINOR 0 - -/** -@page assfile .ASS File formats - -@section over Overview -Assimp provides its own interchange format, which is intended to applications which need -to serialize 3D-models and to reload them quickly. Assimp's file formats are designed to -be read by Assimp itself. They encode additional information needed by Assimp to optimize -its postprocessing pipeline. If you once apply specific steps to a scene, then save it -and reread it from an ASS format using the same post processing settings, they won't -be executed again. - -The format comes in two flavours: XML and binary - both of them hold a complete dump of -the 'aiScene' data structure returned by the APIs. The focus for the binary format -(<tt>.assbin</tt>) is fast loading. Optional deflate compression helps reduce file size. The XML -flavour, <tt>.assxml</tt> or simply .xml, is just a plain-to-xml conversion of aiScene. - -ASSBIN is Assimp's binary interchange format. assimp_cmd (<tt><root>/tools/assimp_cmd</tt>) is able to -write it and the core library provides a loader for it. - -@section assxml XML File format - -The format is pretty much self-explanatory due to its similarity to the in-memory aiScene structure. -With few exceptions, C structures are wrapped in XML elements. - -The DTD for ASSXML can be found in <tt><root>/doc/AssXML_Scheme.xml</tt>. Or have look -at the output files generated by assimp_cmd. - -@section assbin Binary file format - -The ASSBIN file format is composed of chunks to represent the hierarchical aiScene data structure. -This makes the format extensible and allows backward-compatibility with future data structure -versions. The <tt><root>/code/assbin_chunks.h</tt> header contains some magic constants -for use by stand-alone ASSBIN loaders. Also, Assimp's own file writer can be found -in <tt><root>/tools/assimp_cmd/WriteDumb.cpp</tt> (yes, the 'b' is no typo ...). - -@verbatim - -------------------------------------------------------------------------------- -1. File structure: -------------------------------------------------------------------------------- - ----------------------- -| Header (512 bytes) | ----------------------- -| Variable chunks | ----------------------- - -------------------------------------------------------------------------------- -2. Definitions: -------------------------------------------------------------------------------- - -integer is four bytes wide, stored in little-endian byte order. -short is two bytes wide, stored in little-endian byte order. -byte is a single byte. -string is an integer n followed by n UTF-8 characters, not terminated by zero -float is an IEEE 754 single-precision floating-point value -double is an IEEE 754 double-precision floating-point value -t[n] is an array of n elements of type t - -------------------------------------------------------------------------------- -2. Header: -------------------------------------------------------------------------------- - -byte[44] Magic identification string for ASSBIN files. - 'ASSIMP.binary' - -integer Major version of the Assimp library which wrote the file -integer Minor version of the Assimp library which wrote the file - match these against ASSBIN_VERSION_MAJOR and ASSBIN_VERSION_MINOR - -integer SVN revision of the Assimp library (intended for our internal - debugging - if you write Ass files from your own APPs, set this value to 0. -integer Assimp compile flags - -short 0 for normal files, 1 for shortened dumps for regression tests - these should have the file extension assbin.regress - -short 1 if the data after the header is compressed with the DEFLATE algorithm, - 0 for uncompressed files. - For compressed files, the first integer after the header is - always the uncompressed data size - -byte[256] Zero-terminated source file name, UTF-8 -byte[128] Zero-terminated command line parameters passed to assimp_cmd, UTF-8 - -byte[64] Reserved for future use ----> Total length: 512 bytes - -------------------------------------------------------------------------------- -3. Chunks: -------------------------------------------------------------------------------- - -integer Magic chunk ID (ASSBIN_CHUNK_XXX) -integer Chunk data length, in bytes - (unknown chunks are possible, a good reader skips over them) - (chunk-data-length does not include the first two integers) - -byte[n] chunk-data-length bytes of data, depending on the chunk type - -Chunks can contain nested chunks. Nested chunks are ALWAYS at the end of the chunk, -their size is included in chunk-data-length. - -The chunk layout for all ASSIMP data structures is derived from their C declarations. -The general 'rule' to get from Assimp headers to the serialized layout is: - - 1. POD members (i.e. aiMesh::mPrimitiveTypes, aiMesh::mNumVertices), - in order of declaration. - - 2. Array-members (aiMesh::mFaces, aiMesh::mVertices, aiBone::mWeights), - in order of declaration. - - 2. Object array members (i.e aiMesh::mBones, aiScene::mMeshes) are stored in - subchunks directly following the data written in 1.) and 2.) - - - Of course, there are some exceptions to this general order: - -[[aiScene]] - - - The root node holding the scene structure is naturally stored in - a ASSBIN_CHUNK_AINODE subchunk following 1.) and 2.) (which is - empty for aiScene). - -[[aiMesh]] - - - mTextureCoords and mNumUVComponents are serialized as follows: - - [number of used uv channels times] - integer mNumUVComponents[n] - float mTextureCoords[n][3] - - -> more than AI_MAX_TEXCOORD_CHANNELS can be stored. This allows Assimp - builds with different settings for AI_MAX_TEXCOORD_CHANNELS to exchange - data. - -> the on-disk format always uses 3 floats to write UV coordinates. - If mNumUVComponents[0] is 1, the corresponding mTextureCoords array - consists of 3 floats. - - - The array member block of aiMesh is prefixed with an integer that specifies - the kinds of vertex components actually present in the mesh. This is a - bitwise combination of the ASSBIN_MESH_HAS_xxx constants. - -[[aiFace]] - - - mNumIndices is stored as short - - mIndices are written as short, if aiMesh::mNumVertices<65536 - -[[aiNode]] - - - mParent is omitted - -[[aiLight]] - - - mAttenuationXXX not written if aiLight::mType == aiLightSource_DIRECTIONAL - - mAngleXXX not written if aiLight::mType != aiLightSource_SPOT - -[[aiMaterial]] - - - mNumAllocated is omitted, for obvious reasons :-) - - - @endverbatim*/ - - -#define ASSBIN_HEADER_LENGTH 512 - -// these are the magic chunk identifiers for the binary ASS file format -#define ASSBIN_CHUNK_AICAMERA 0x1234 -#define ASSBIN_CHUNK_AILIGHT 0x1235 -#define ASSBIN_CHUNK_AITEXTURE 0x1236 -#define ASSBIN_CHUNK_AIMESH 0x1237 -#define ASSBIN_CHUNK_AINODEANIM 0x1238 -#define ASSBIN_CHUNK_AISCENE 0x1239 -#define ASSBIN_CHUNK_AIBONE 0x123a -#define ASSBIN_CHUNK_AIANIMATION 0x123b -#define ASSBIN_CHUNK_AINODE 0x123c -#define ASSBIN_CHUNK_AIMATERIAL 0x123d -#define ASSBIN_CHUNK_AIMATERIALPROPERTY 0x123e - -#define ASSBIN_MESH_HAS_POSITIONS 0x1 -#define ASSBIN_MESH_HAS_NORMALS 0x2 -#define ASSBIN_MESH_HAS_TANGENTS_AND_BITANGENTS 0x4 -#define ASSBIN_MESH_HAS_TEXCOORD_BASE 0x100 -#define ASSBIN_MESH_HAS_COLOR_BASE 0x10000 - -#define ASSBIN_MESH_HAS_TEXCOORD(n) (ASSBIN_MESH_HAS_TEXCOORD_BASE << n) -#define ASSBIN_MESH_HAS_COLOR(n) (ASSBIN_MESH_HAS_COLOR_BASE << n) - - -#endif // INCLUDED_ASSBIN_CHUNKS_H diff --git a/thirdparty/assimp/code/Common/scene.cpp b/thirdparty/assimp/code/Common/scene.cpp deleted file mode 100644 index d15619acff..0000000000 --- a/thirdparty/assimp/code/Common/scene.cpp +++ /dev/null @@ -1,140 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -#include <assimp/scene.h> - -aiNode::aiNode() -: mName("") -, mParent(nullptr) -, mNumChildren(0) -, mChildren(nullptr) -, mNumMeshes(0) -, mMeshes(nullptr) -, mMetaData(nullptr) { - // empty -} - -aiNode::aiNode(const std::string& name) -: mName(name) -, mParent(nullptr) -, mNumChildren(0) -, mChildren(nullptr) -, mNumMeshes(0) -, mMeshes(nullptr) -, mMetaData(nullptr) { - // empty -} - -/** Destructor */ -aiNode::~aiNode() { - // delete all children recursively - // to make sure we won't crash if the data is invalid ... - if (mNumChildren && mChildren) - { - for (unsigned int a = 0; a < mNumChildren; a++) - delete mChildren[a]; - } - delete[] mChildren; - delete[] mMeshes; - delete mMetaData; -} - -const aiNode *aiNode::FindNode(const char* name) const { - if (nullptr == name) { - return nullptr; - } - if (!::strcmp(mName.data, name)) { - return this; - } - for (unsigned int i = 0; i < mNumChildren; ++i) { - const aiNode* const p = mChildren[i]->FindNode(name); - if (p) { - return p; - } - } - // there is definitely no sub-node with this name - return nullptr; -} - -aiNode *aiNode::FindNode(const char* name) { - if (!::strcmp(mName.data, name))return this; - for (unsigned int i = 0; i < mNumChildren; ++i) - { - aiNode* const p = mChildren[i]->FindNode(name); - if (p) { - return p; - } - } - // there is definitely no sub-node with this name - return nullptr; -} - -void aiNode::addChildren(unsigned int numChildren, aiNode **children) { - if (nullptr == children || 0 == numChildren) { - return; - } - - for (unsigned int i = 0; i < numChildren; i++) { - aiNode *child = children[i]; - if (nullptr != child) { - child->mParent = this; - } - } - - if (mNumChildren > 0) { - aiNode **tmp = new aiNode*[mNumChildren]; - ::memcpy(tmp, mChildren, sizeof(aiNode*) * mNumChildren); - delete[] mChildren; - mChildren = new aiNode*[mNumChildren + numChildren]; - ::memcpy(mChildren, tmp, sizeof(aiNode*) * mNumChildren); - ::memcpy(&mChildren[mNumChildren], children, sizeof(aiNode*)* numChildren); - mNumChildren += numChildren; - delete[] tmp; - } - else { - mChildren = new aiNode*[numChildren]; - for (unsigned int i = 0; i < numChildren; i++) { - mChildren[i] = children[i]; - } - mNumChildren = numChildren; - } -} diff --git a/thirdparty/assimp/code/Common/simd.cpp b/thirdparty/assimp/code/Common/simd.cpp deleted file mode 100644 index 04615f408e..0000000000 --- a/thirdparty/assimp/code/Common/simd.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -#include "simd.h" - -namespace Assimp { - -bool CPUSupportsSSE2() { -#if defined(__x86_64__) || defined(_M_X64) - //* x86_64 always has SSE2 instructions */ - return true; -#elif defined(__GNUC__) && defined(i386) - // for GCC x86 we check cpuid - unsigned int d; - __asm__( - "pushl %%ebx\n\t" - "cpuid\n\t" - "popl %%ebx\n\t" - : "=d" ( d ) - :"a" ( 1 ) ); - return ( d & 0x04000000 ) != 0; -#elif (defined(_MSC_VER) && defined(_M_IX86)) - // also check cpuid for MSVC x86 - unsigned int d; - __asm { - xor eax, eax - inc eax - push ebx - cpuid - pop ebx - mov d, edx - } - return ( d & 0x04000000 ) != 0; -#else - return false; -#endif -} - - -} // Namespace Assimp diff --git a/thirdparty/assimp/code/Common/simd.h b/thirdparty/assimp/code/Common/simd.h deleted file mode 100644 index 3eecdd4581..0000000000 --- a/thirdparty/assimp/code/Common/simd.h +++ /dev/null @@ -1,53 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -#pragma once - -#include <assimp/defs.h> - -namespace Assimp { - -/// @brief Checks if the platform supports SSE2 optimization -/// @return true, if SSE2 is supported. false if SSE2 is not supported. -bool ASSIMP_API CPUSupportsSSE2(); - -} // Namespace Assimp diff --git a/thirdparty/assimp/code/FBX/FBXAnimation.cpp b/thirdparty/assimp/code/FBX/FBXAnimation.cpp deleted file mode 100644 index 874914431b..0000000000 --- a/thirdparty/assimp/code/FBX/FBXAnimation.cpp +++ /dev/null @@ -1,305 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXAnimation.cpp - * @brief Assimp::FBX::AnimationCurve, Assimp::FBX::AnimationCurveNode, - * Assimp::FBX::AnimationLayer, Assimp::FBX::AnimationStack - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXImporter.h" -#include "FBXDocumentUtil.h" - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -AnimationCurve::AnimationCurve(uint64_t id, const Element& element, const std::string& name, const Document& /*doc*/) -: Object(id, element, name) -{ - const Scope& sc = GetRequiredScope(element); - const Element& KeyTime = GetRequiredElement(sc,"KeyTime"); - const Element& KeyValueFloat = GetRequiredElement(sc,"KeyValueFloat"); - - ParseVectorDataArray(keys, KeyTime); - ParseVectorDataArray(values, KeyValueFloat); - - if(keys.size() != values.size()) { - DOMError("the number of key times does not match the number of keyframe values",&KeyTime); - } - - // check if the key times are well-ordered - if(!std::equal(keys.begin(), keys.end() - 1, keys.begin() + 1, std::less<KeyTimeList::value_type>())) { - DOMError("the keyframes are not in ascending order",&KeyTime); - } - - const Element* KeyAttrDataFloat = sc["KeyAttrDataFloat"]; - if(KeyAttrDataFloat) { - ParseVectorDataArray(attributes, *KeyAttrDataFloat); - } - - const Element* KeyAttrFlags = sc["KeyAttrFlags"]; - if(KeyAttrFlags) { - ParseVectorDataArray(flags, *KeyAttrFlags); - } -} - -// ------------------------------------------------------------------------------------------------ -AnimationCurve::~AnimationCurve() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -AnimationCurveNode::AnimationCurveNode(uint64_t id, const Element& element, const std::string& name, - const Document& doc, const char* const * target_prop_whitelist /*= NULL*/, - size_t whitelist_size /*= 0*/) -: Object(id, element, name) -, target() -, doc(doc) -{ - const Scope& sc = GetRequiredScope(element); - - // find target node - const char* whitelist[] = {"Model","NodeAttribute","Deformer"}; - const std::vector<const Connection*>& conns = doc.GetConnectionsBySourceSequenced(ID(),whitelist,3); - - for(const Connection* con : conns) { - - // link should go for a property - if (!con->PropertyName().length()) { - continue; - } - - if(target_prop_whitelist) { - const char* const s = con->PropertyName().c_str(); - bool ok = false; - for (size_t i = 0; i < whitelist_size; ++i) { - if (!strcmp(s, target_prop_whitelist[i])) { - ok = true; - break; - } - } - - if (!ok) { - throw std::range_error("AnimationCurveNode target property is not in whitelist"); - } - } - - const Object* const ob = con->DestinationObject(); - if(!ob) { - DOMWarning("failed to read destination object for AnimationCurveNode->Model link, ignoring",&element); - continue; - } - - // XXX support constraints as DOM class - //ai_assert(dynamic_cast<const Model*>(ob) || dynamic_cast<const NodeAttribute*>(ob)); - target = ob; - if(!target) { - continue; - } - - prop = con->PropertyName(); - break; - } - - if(!target) { - DOMWarning("failed to resolve target Model/NodeAttribute/Constraint for AnimationCurveNode",&element); - } - - props = GetPropertyTable(doc,"AnimationCurveNode.FbxAnimCurveNode",element,sc,false); -} - -// ------------------------------------------------------------------------------------------------ -AnimationCurveNode::~AnimationCurveNode() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -const AnimationCurveMap& AnimationCurveNode::Curves() const -{ - if ( curves.empty() ) { - // resolve attached animation curves - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"AnimationCurve"); - - for(const Connection* con : conns) { - - // link should go for a property - if (!con->PropertyName().length()) { - continue; - } - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for AnimationCurve->AnimationCurveNode link, ignoring",&element); - continue; - } - - const AnimationCurve* const anim = dynamic_cast<const AnimationCurve*>(ob); - if(!anim) { - DOMWarning("source object for ->AnimationCurveNode link is not an AnimationCurve",&element); - continue; - } - - curves[con->PropertyName()] = anim; - } - } - - return curves; -} - -// ------------------------------------------------------------------------------------------------ -AnimationLayer::AnimationLayer(uint64_t id, const Element& element, const std::string& name, const Document& doc) -: Object(id, element, name) -, doc(doc) -{ - const Scope& sc = GetRequiredScope(element); - - // note: the props table here bears little importance and is usually absent - props = GetPropertyTable(doc,"AnimationLayer.FbxAnimLayer",element,sc, true); -} - -// ------------------------------------------------------------------------------------------------ -AnimationLayer::~AnimationLayer() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -AnimationCurveNodeList AnimationLayer::Nodes(const char* const * target_prop_whitelist /*= NULL*/, - size_t whitelist_size /*= 0*/) const -{ - AnimationCurveNodeList nodes; - - // resolve attached animation nodes - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"AnimationCurveNode"); - nodes.reserve(conns.size()); - - for(const Connection* con : conns) { - - // link should not go to a property - if (con->PropertyName().length()) { - continue; - } - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for AnimationCurveNode->AnimationLayer link, ignoring",&element); - continue; - } - - const AnimationCurveNode* const anim = dynamic_cast<const AnimationCurveNode*>(ob); - if(!anim) { - DOMWarning("source object for ->AnimationLayer link is not an AnimationCurveNode",&element); - continue; - } - - if(target_prop_whitelist) { - const char* s = anim->TargetProperty().c_str(); - bool ok = false; - for (size_t i = 0; i < whitelist_size; ++i) { - if (!strcmp(s, target_prop_whitelist[i])) { - ok = true; - break; - } - } - if(!ok) { - continue; - } - } - nodes.push_back(anim); - } - - return nodes; // pray for NRVO -} - -// ------------------------------------------------------------------------------------------------ -AnimationStack::AnimationStack(uint64_t id, const Element& element, const std::string& name, const Document& doc) -: Object(id, element, name) -{ - const Scope& sc = GetRequiredScope(element); - - // note: we don't currently use any of these properties so we shouldn't bother if it is missing - props = GetPropertyTable(doc,"AnimationStack.FbxAnimStack",element,sc, true); - - // resolve attached animation layers - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"AnimationLayer"); - layers.reserve(conns.size()); - - for(const Connection* con : conns) { - - // link should not go to a property - if (con->PropertyName().length()) { - continue; - } - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for AnimationLayer->AnimationStack link, ignoring",&element); - continue; - } - - const AnimationLayer* const anim = dynamic_cast<const AnimationLayer*>(ob); - if(!anim) { - DOMWarning("source object for ->AnimationStack link is not an AnimationLayer",&element); - continue; - } - layers.push_back(anim); - } -} - -// ------------------------------------------------------------------------------------------------ -AnimationStack::~AnimationStack() -{ - // empty -} - -} //!FBX -} //!Assimp - -#endif // ASSIMP_BUILD_NO_FBX_IMPORTER diff --git a/thirdparty/assimp/code/FBX/FBXBinaryTokenizer.cpp b/thirdparty/assimp/code/FBX/FBXBinaryTokenizer.cpp deleted file mode 100644 index a4a2bc8e79..0000000000 --- a/thirdparty/assimp/code/FBX/FBXBinaryTokenizer.cpp +++ /dev/null @@ -1,466 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -/** @file FBXBinaryTokenizer.cpp - * @brief Implementation of a fake lexer for binary fbx files - - * we emit tokens so the parser needs almost no special handling - * for binary files. - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXTokenizer.h" -#include "FBXUtil.h" -#include <assimp/defs.h> -#include <stdint.h> -#include <assimp/Exceptional.h> -#include <assimp/ByteSwapper.h> - -namespace Assimp { -namespace FBX { - -//enum Flag -//{ -// e_unknown_0 = 1 << 0, -// e_unknown_1 = 1 << 1, -// e_unknown_2 = 1 << 2, -// e_unknown_3 = 1 << 3, -// e_unknown_4 = 1 << 4, -// e_unknown_5 = 1 << 5, -// e_unknown_6 = 1 << 6, -// e_unknown_7 = 1 << 7, -// e_unknown_8 = 1 << 8, -// e_unknown_9 = 1 << 9, -// e_unknown_10 = 1 << 10, -// e_unknown_11 = 1 << 11, -// e_unknown_12 = 1 << 12, -// e_unknown_13 = 1 << 13, -// e_unknown_14 = 1 << 14, -// e_unknown_15 = 1 << 15, -// e_unknown_16 = 1 << 16, -// e_unknown_17 = 1 << 17, -// e_unknown_18 = 1 << 18, -// e_unknown_19 = 1 << 19, -// e_unknown_20 = 1 << 20, -// e_unknown_21 = 1 << 21, -// e_unknown_22 = 1 << 22, -// e_unknown_23 = 1 << 23, -// e_flag_field_size_64_bit = 1 << 24, // Not sure what is -// e_unknown_25 = 1 << 25, -// e_unknown_26 = 1 << 26, -// e_unknown_27 = 1 << 27, -// e_unknown_28 = 1 << 28, -// e_unknown_29 = 1 << 29, -// e_unknown_30 = 1 << 30, -// e_unknown_31 = 1 << 31 -//}; -// -//bool check_flag(uint32_t flags, Flag to_check) -//{ -// return (flags & to_check) != 0; -//} -// ------------------------------------------------------------------------------------------------ -Token::Token(const char* sbegin, const char* send, TokenType type, size_t offset) - : - #ifdef DEBUG - contents(sbegin, static_cast<size_t>(send-sbegin)), - #endif - sbegin(sbegin) - , send(send) - , type(type) - , line(offset) - , column(BINARY_MARKER) -{ - ai_assert(sbegin); - ai_assert(send); - - // binary tokens may have zero length because they are sometimes dummies - // inserted by TokenizeBinary() - ai_assert(send >= sbegin); -} - - -namespace { - -// ------------------------------------------------------------------------------------------------ -// signal tokenization error, this is always unrecoverable. Throws DeadlyImportError. -AI_WONT_RETURN void TokenizeError(const std::string& message, size_t offset) AI_WONT_RETURN_SUFFIX; -AI_WONT_RETURN void TokenizeError(const std::string& message, size_t offset) -{ - throw DeadlyImportError(Util::AddOffset("FBX-Tokenize",message,offset)); -} - - -// ------------------------------------------------------------------------------------------------ -size_t Offset(const char* begin, const char* cursor) { - ai_assert(begin <= cursor); - - return cursor - begin; -} - -// ------------------------------------------------------------------------------------------------ -void TokenizeError(const std::string& message, const char* begin, const char* cursor) { - TokenizeError(message, Offset(begin, cursor)); -} - -// ------------------------------------------------------------------------------------------------ -uint32_t ReadWord(const char* input, const char*& cursor, const char* end) { - const size_t k_to_read = sizeof( uint32_t ); - if(Offset(cursor, end) < k_to_read ) { - TokenizeError("cannot ReadWord, out of bounds",input, cursor); - } - - uint32_t word; - ::memcpy(&word, cursor, 4); - AI_SWAP4(word); - - cursor += k_to_read; - - return word; -} - -// ------------------------------------------------------------------------------------------------ -uint64_t ReadDoubleWord(const char* input, const char*& cursor, const char* end) { - const size_t k_to_read = sizeof(uint64_t); - if(Offset(cursor, end) < k_to_read) { - TokenizeError("cannot ReadDoubleWord, out of bounds",input, cursor); - } - - uint64_t dword /*= *reinterpret_cast<const uint64_t*>(cursor)*/; - ::memcpy( &dword, cursor, sizeof( uint64_t ) ); - AI_SWAP8(dword); - - cursor += k_to_read; - - return dword; -} - -// ------------------------------------------------------------------------------------------------ -uint8_t ReadByte(const char* input, const char*& cursor, const char* end) { - if(Offset(cursor, end) < sizeof( uint8_t ) ) { - TokenizeError("cannot ReadByte, out of bounds",input, cursor); - } - - uint8_t word;/* = *reinterpret_cast< const uint8_t* >( cursor )*/ - ::memcpy( &word, cursor, sizeof( uint8_t ) ); - ++cursor; - - return word; -} - -// ------------------------------------------------------------------------------------------------ -unsigned int ReadString(const char*& sbegin_out, const char*& send_out, const char* input, - const char*& cursor, const char* end, bool long_length = false, bool allow_null = false) { - const uint32_t len_len = long_length ? 4 : 1; - if(Offset(cursor, end) < len_len) { - TokenizeError("cannot ReadString, out of bounds reading length",input, cursor); - } - - const uint32_t length = long_length ? ReadWord(input, cursor, end) : ReadByte(input, cursor, end); - - if (Offset(cursor, end) < length) { - TokenizeError("cannot ReadString, length is out of bounds",input, cursor); - } - - sbegin_out = cursor; - cursor += length; - - send_out = cursor; - - if(!allow_null) { - for (unsigned int i = 0; i < length; ++i) { - if(sbegin_out[i] == '\0') { - TokenizeError("failed ReadString, unexpected NUL character in string",input, cursor); - } - } - } - - return length; -} - -// ------------------------------------------------------------------------------------------------ -void ReadData(const char*& sbegin_out, const char*& send_out, const char* input, const char*& cursor, const char* end) { - if(Offset(cursor, end) < 1) { - TokenizeError("cannot ReadData, out of bounds reading length",input, cursor); - } - - const char type = *cursor; - sbegin_out = cursor++; - - switch(type) - { - // 16 bit int - case 'Y': - cursor += 2; - break; - - // 1 bit bool flag (yes/no) - case 'C': - cursor += 1; - break; - - // 32 bit int - case 'I': - // <- fall through - - // float - case 'F': - cursor += 4; - break; - - // double - case 'D': - cursor += 8; - break; - - // 64 bit int - case 'L': - cursor += 8; - break; - - // note: do not write cursor += ReadWord(...cursor) as this would be UB - - // raw binary data - case 'R': - { - const uint32_t length = ReadWord(input, cursor, end); - cursor += length; - break; - } - - case 'b': - // TODO: what is the 'b' type code? Right now we just skip over it / - // take the full range we could get - cursor = end; - break; - - // array of * - case 'f': - case 'd': - case 'l': - case 'i': - case 'c': { - const uint32_t length = ReadWord(input, cursor, end); - const uint32_t encoding = ReadWord(input, cursor, end); - - const uint32_t comp_len = ReadWord(input, cursor, end); - - // compute length based on type and check against the stored value - if(encoding == 0) { - uint32_t stride = 0; - switch(type) - { - case 'f': - case 'i': - stride = 4; - break; - - case 'd': - case 'l': - stride = 8; - break; - - case 'c': - stride = 1; - break; - - default: - ai_assert(false); - }; - ai_assert(stride > 0); - if(length * stride != comp_len) { - TokenizeError("cannot ReadData, calculated data stride differs from what the file claims",input, cursor); - } - } - // zip/deflate algorithm (encoding==1)? take given length. anything else? die - else if (encoding != 1) { - TokenizeError("cannot ReadData, unknown encoding",input, cursor); - } - cursor += comp_len; - break; - } - - // string - case 'S': { - const char* sb, *se; - // 0 characters can legally happen in such strings - ReadString(sb, se, input, cursor, end, true, true); - break; - } - default: - TokenizeError("cannot ReadData, unexpected type code: " + std::string(&type, 1),input, cursor); - } - - if(cursor > end) { - TokenizeError("cannot ReadData, the remaining size is too small for the data type: " + std::string(&type, 1),input, cursor); - } - - // the type code is contained in the returned range - send_out = cursor; -} - - -// ------------------------------------------------------------------------------------------------ -bool ReadScope(TokenList& output_tokens, const char* input, const char*& cursor, const char* end, bool const is64bits) -{ - // the first word contains the offset at which this block ends - const uint64_t end_offset = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); - - // we may get 0 if reading reached the end of the file - - // fbx files have a mysterious extra footer which I don't know - // how to extract any information from, but at least it always - // starts with a 0. - if(!end_offset) { - return false; - } - - if(end_offset > Offset(input, end)) { - TokenizeError("block offset is out of range",input, cursor); - } - else if(end_offset < Offset(input, cursor)) { - TokenizeError("block offset is negative out of range",input, cursor); - } - - // the second data word contains the number of properties in the scope - const uint64_t prop_count = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); - - // the third data word contains the length of the property list - const uint64_t prop_length = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end); - - // now comes the name of the scope/key - const char* sbeg, *send; - ReadString(sbeg, send, input, cursor, end); - - output_tokens.push_back(new_Token(sbeg, send, TokenType_KEY, Offset(input, cursor) )); - - // now come the individual properties - const char* begin_cursor = cursor; - for (unsigned int i = 0; i < prop_count; ++i) { - ReadData(sbeg, send, input, cursor, begin_cursor + prop_length); - - output_tokens.push_back(new_Token(sbeg, send, TokenType_DATA, Offset(input, cursor) )); - - if(i != prop_count-1) { - output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_COMMA, Offset(input, cursor) )); - } - } - - if (Offset(begin_cursor, cursor) != prop_length) { - TokenizeError("property length not reached, something is wrong",input, cursor); - } - - // at the end of each nested block, there is a NUL record to indicate - // that the sub-scope exists (i.e. to distinguish between P: and P : {}) - // this NUL record is 13 bytes long on 32 bit version and 25 bytes long on 64 bit. - const size_t sentinel_block_length = is64bits ? (sizeof(uint64_t)* 3 + 1) : (sizeof(uint32_t)* 3 + 1); - - if (Offset(input, cursor) < end_offset) { - if (end_offset - Offset(input, cursor) < sentinel_block_length) { - TokenizeError("insufficient padding bytes at block end",input, cursor); - } - - output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_OPEN_BRACKET, Offset(input, cursor) )); - - // XXX this is vulnerable to stack overflowing .. - while(Offset(input, cursor) < end_offset - sentinel_block_length) { - ReadScope(output_tokens, input, cursor, input + end_offset - sentinel_block_length, is64bits); - } - output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_CLOSE_BRACKET, Offset(input, cursor) )); - - for (unsigned int i = 0; i < sentinel_block_length; ++i) { - if(cursor[i] != '\0') { - TokenizeError("failed to read nested block sentinel, expected all bytes to be 0",input, cursor); - } - } - cursor += sentinel_block_length; - } - - if (Offset(input, cursor) != end_offset) { - TokenizeError("scope length not reached, something is wrong",input, cursor); - } - - return true; -} - -} // anonymous namespace - -// ------------------------------------------------------------------------------------------------ -// TODO: Test FBX Binary files newer than the 7500 version to check if the 64 bits address behaviour is consistent -void TokenizeBinary(TokenList& output_tokens, const char* input, size_t length) -{ - ai_assert(input); - - if(length < 0x1b) { - TokenizeError("file is too short",0); - } - - //uint32_t offset = 0x15; -/* const char* cursor = input + 0x15; - - const uint32_t flags = ReadWord(input, cursor, input + length); - - const uint8_t padding_0 = ReadByte(input, cursor, input + length); // unused - const uint8_t padding_1 = ReadByte(input, cursor, input + length); // unused*/ - - if (strncmp(input,"Kaydara FBX Binary",18)) { - TokenizeError("magic bytes not found",0); - } - - const char* cursor = input + 18; - /*Result ignored*/ ReadByte(input, cursor, input + length); - /*Result ignored*/ ReadByte(input, cursor, input + length); - /*Result ignored*/ ReadByte(input, cursor, input + length); - /*Result ignored*/ ReadByte(input, cursor, input + length); - /*Result ignored*/ ReadByte(input, cursor, input + length); - const uint32_t version = ReadWord(input, cursor, input + length); - const bool is64bits = version >= 7500; - const char *end = input + length; - while (cursor < end ) { - if (!ReadScope(output_tokens, input, cursor, input + length, is64bits)) { - break; - } - } -} - -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXCommon.h b/thirdparty/assimp/code/FBX/FBXCommon.h deleted file mode 100644 index e516449130..0000000000 --- a/thirdparty/assimp/code/FBX/FBXCommon.h +++ /dev/null @@ -1,86 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXCommon.h -* Some useful constants and enums for dealing with FBX files. -*/ -#ifndef AI_FBXCOMMON_H_INC -#define AI_FBXCOMMON_H_INC - -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -namespace Assimp { -namespace FBX -{ - const std::string NULL_RECORD = { // 13 null bytes - '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' - }; // who knows why - const std::string SEPARATOR = {'\x00', '\x01'}; // for use inside strings - const std::string MAGIC_NODE_TAG = "_$AssimpFbx$"; // from import - const int64_t SECOND = 46186158000; // FBX's kTime unit - - // rotation order. We'll probably use EulerXYZ for everything - enum RotOrder { - RotOrder_EulerXYZ = 0, - RotOrder_EulerXZY, - RotOrder_EulerYZX, - RotOrder_EulerYXZ, - RotOrder_EulerZXY, - RotOrder_EulerZYX, - - RotOrder_SphericXYZ, - - RotOrder_MAX // end-of-enum sentinel - }; - - // transformation inheritance method. Most of the time RSrs - enum TransformInheritance { - TransformInheritance_RrSs = 0, - TransformInheritance_RSrs, - TransformInheritance_Rrs, - - TransformInheritance_MAX // end-of-enum sentinel - }; -} -} -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER - -#endif // AI_FBXCOMMON_H_INC diff --git a/thirdparty/assimp/code/FBX/FBXCompileConfig.h b/thirdparty/assimp/code/FBX/FBXCompileConfig.h deleted file mode 100644 index 03536a1823..0000000000 --- a/thirdparty/assimp/code/FBX/FBXCompileConfig.h +++ /dev/null @@ -1,78 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXCompileConfig.h - * @brief FBX importer compile-time switches - */ -#ifndef INCLUDED_AI_FBX_COMPILECONFIG_H -#define INCLUDED_AI_FBX_COMPILECONFIG_H - -#include <map> -#include <set> - -// -#if _MSC_VER > 1500 || (defined __GNUC___) -# define ASSIMP_FBX_USE_UNORDERED_MULTIMAP -# else -# define fbx_unordered_map map -# define fbx_unordered_multimap multimap -# define fbx_unordered_set set -# define fbx_unordered_multiset multiset -#endif - -#ifdef ASSIMP_FBX_USE_UNORDERED_MULTIMAP -# include <unordered_map> -# include <unordered_set> -# if _MSC_VER > 1600 -# define fbx_unordered_map unordered_map -# define fbx_unordered_multimap unordered_multimap -# define fbx_unordered_set unordered_set -# define fbx_unordered_multiset unordered_multiset -# else -# define fbx_unordered_map tr1::unordered_map -# define fbx_unordered_multimap tr1::unordered_multimap -# define fbx_unordered_set tr1::unordered_set -# define fbx_unordered_multiset tr1::unordered_multiset -# endif -#endif - -#endif // INCLUDED_AI_FBX_COMPILECONFIG_H diff --git a/thirdparty/assimp/code/FBX/FBXConverter.cpp b/thirdparty/assimp/code/FBX/FBXConverter.cpp deleted file mode 100644 index d8a22d9f74..0000000000 --- a/thirdparty/assimp/code/FBX/FBXConverter.cpp +++ /dev/null @@ -1,3727 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXConverter.cpp - * @brief Implementation of the FBX DOM -> aiScene converter - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXConverter.h" -#include "FBXParser.h" -#include "FBXMeshGeometry.h" -#include "FBXDocument.h" -#include "FBXUtil.h" -#include "FBXProperties.h" -#include "FBXImporter.h" - -#include <assimp/StringComparison.h> -#include <assimp/MathFunctions.h> - -#include <assimp/scene.h> - -#include <assimp/CreateAnimMesh.h> - -#include <tuple> -#include <memory> -#include <iterator> -#include <vector> -#include <sstream> -#include <iomanip> -#include <cstdint> -#include <iostream> -#include <stdlib.h> - -namespace Assimp { - namespace FBX { - - using namespace Util; - -#define MAGIC_NODE_TAG "_$AssimpFbx$" - -#define CONVERT_FBX_TIME(time) static_cast<double>(time) / 46186158000LL - - FBXConverter::FBXConverter(aiScene* out, const Document& doc, bool removeEmptyBones ) - : defaultMaterialIndex() - , lights() - , cameras() - , textures() - , materials_converted() - , textures_converted() - , meshes_converted() - , node_anim_chain_bits() - , mNodeNames() - , anim_fps() - , out(out) - , doc(doc) { - // animations need to be converted first since this will - // populate the node_anim_chain_bits map, which is needed - // to determine which nodes need to be generated. - ConvertAnimations(); - // Embedded textures in FBX could be connected to nothing but to itself, - // for instance Texture -> Video connection only but not to the main graph, - // The idea here is to traverse all objects to find these Textures and convert them, - // so later during material conversion it will find converted texture in the textures_converted array. - if (doc.Settings().readTextures) - { - ConvertOrphantEmbeddedTextures(); - } - ConvertRootNode(); - - if (doc.Settings().readAllMaterials) { - // unfortunately this means we have to evaluate all objects - for (const ObjectMap::value_type& v : doc.Objects()) { - - const Object* ob = v.second->Get(); - if (!ob) { - continue; - } - - const Material* mat = dynamic_cast<const Material*>(ob); - if (mat) { - - if (materials_converted.find(mat) == materials_converted.end()) { - ConvertMaterial(*mat, 0); - } - } - } - } - - ConvertGlobalSettings(); - TransferDataToScene(); - - // if we didn't read any meshes set the AI_SCENE_FLAGS_INCOMPLETE - // to make sure the scene passes assimp's validation. FBX files - // need not contain geometry (i.e. camera animations, raw armatures). - if (out->mNumMeshes == 0) { - out->mFlags |= AI_SCENE_FLAGS_INCOMPLETE; - } - } - - - FBXConverter::~FBXConverter() { - std::for_each(meshes.begin(), meshes.end(), Util::delete_fun<aiMesh>()); - std::for_each(materials.begin(), materials.end(), Util::delete_fun<aiMaterial>()); - std::for_each(animations.begin(), animations.end(), Util::delete_fun<aiAnimation>()); - std::for_each(lights.begin(), lights.end(), Util::delete_fun<aiLight>()); - std::for_each(cameras.begin(), cameras.end(), Util::delete_fun<aiCamera>()); - std::for_each(textures.begin(), textures.end(), Util::delete_fun<aiTexture>()); - } - - void FBXConverter::ConvertRootNode() { - out->mRootNode = new aiNode(); - std::string unique_name; - GetUniqueName("RootNode", unique_name); - out->mRootNode->mName.Set(unique_name); - - // root has ID 0 - ConvertNodes(0L, out->mRootNode, out->mRootNode); - } - - static std::string getAncestorBaseName(const aiNode* node) - { - const char* nodeName = nullptr; - size_t length = 0; - while (node && (!nodeName || length == 0)) - { - nodeName = node->mName.C_Str(); - length = node->mName.length; - node = node->mParent; - } - - if (!nodeName || length == 0) - { - return {}; - } - // could be std::string_view if c++17 available - return std::string(nodeName, length); - } - - // Make unique name - std::string FBXConverter::MakeUniqueNodeName(const Model* const model, const aiNode& parent) - { - std::string original_name = FixNodeName(model->Name()); - if (original_name.empty()) - { - original_name = getAncestorBaseName(&parent); - } - std::string unique_name; - GetUniqueName(original_name, unique_name); - return unique_name; - } - /// todo: pre-build node hierarchy - /// todo: get bone from stack - /// todo: make map of aiBone* to aiNode* - /// then update convert clusters to the new format - void FBXConverter::ConvertNodes(uint64_t id, aiNode *parent, aiNode *root_node) { - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(id, "Model"); - - std::vector<aiNode*> nodes; - nodes.reserve(conns.size()); - - std::vector<aiNode*> nodes_chain; - std::vector<aiNode*> post_nodes_chain; - - try { - for (const Connection* con : conns) { - // ignore object-property links - if (con->PropertyName().length()) { - // really important we document why this is ignored. - FBXImporter::LogInfo("ignoring property link - no docs on why this is ignored"); - continue; //? - } - - // convert connection source object into Object base class - const Object* const object = con->SourceObject(); - if (nullptr == object) { - FBXImporter::LogError("failed to convert source object for Model link"); - continue; - } - - // FBX Model::Cube, Model::Bone001, etc elements - // This detects if we can cast the object into this model structure. - const Model* const model = dynamic_cast<const Model*>(object); - - if (nullptr != model) { - nodes_chain.clear(); - post_nodes_chain.clear(); - - aiMatrix4x4 new_abs_transform = parent->mTransformation; - std::string node_name = FixNodeName(model->Name()); - // even though there is only a single input node, the design of - // assimp (or rather: the complicated transformation chain that - // is employed by fbx) means that we may need multiple aiNode's - // to represent a fbx node's transformation. - - - // generate node transforms - this includes pivot data - // if need_additional_node is true then you t - const bool need_additional_node = GenerateTransformationNodeChain(*model, node_name, nodes_chain, post_nodes_chain); - - // assert that for the current node we must have at least a single transform - ai_assert(nodes_chain.size()); - - if (need_additional_node) { - nodes_chain.push_back(new aiNode(node_name)); - } - - //setup metadata on newest node - SetupNodeMetadata(*model, *nodes_chain.back()); - - // link all nodes in a row - aiNode* last_parent = parent; - for (aiNode* child : nodes_chain) { - ai_assert(child); - - if (last_parent != parent) { - last_parent->mNumChildren = 1; - last_parent->mChildren = new aiNode*[1]; - last_parent->mChildren[0] = child; - } - - child->mParent = last_parent; - last_parent = child; - - new_abs_transform *= child->mTransformation; - } - - // attach geometry - ConvertModel(*model, nodes_chain.back(), root_node, new_abs_transform); - - // check if there will be any child nodes - const std::vector<const Connection*>& child_conns - = doc.GetConnectionsByDestinationSequenced(model->ID(), "Model"); - - // if so, link the geometric transform inverse nodes - // before we attach any child nodes - if (child_conns.size()) { - for (aiNode* postnode : post_nodes_chain) { - ai_assert(postnode); - - if (last_parent != parent) { - last_parent->mNumChildren = 1; - last_parent->mChildren = new aiNode*[1]; - last_parent->mChildren[0] = postnode; - } - - postnode->mParent = last_parent; - last_parent = postnode; - - new_abs_transform *= postnode->mTransformation; - } - } - else { - // free the nodes we allocated as we don't need them - Util::delete_fun<aiNode> deleter; - std::for_each( - post_nodes_chain.begin(), - post_nodes_chain.end(), - deleter - ); - } - - // recursion call - child nodes - ConvertNodes(model->ID(), last_parent, root_node); - - if (doc.Settings().readLights) { - ConvertLights(*model, node_name); - } - - if (doc.Settings().readCameras) { - ConvertCameras(*model, node_name); - } - - nodes.push_back(nodes_chain.front()); - nodes_chain.clear(); - } - } - - if (nodes.size()) { - parent->mChildren = new aiNode*[nodes.size()](); - parent->mNumChildren = static_cast<unsigned int>(nodes.size()); - - std::swap_ranges(nodes.begin(), nodes.end(), parent->mChildren); - } - else - { - parent->mNumChildren = 0; - parent->mChildren = nullptr; - } - - } - catch (std::exception&) { - Util::delete_fun<aiNode> deleter; - std::for_each(nodes.begin(), nodes.end(), deleter); - std::for_each(nodes_chain.begin(), nodes_chain.end(), deleter); - std::for_each(post_nodes_chain.begin(), post_nodes_chain.end(), deleter); - } - } - - - void FBXConverter::ConvertLights(const Model& model, const std::string &orig_name) { - const std::vector<const NodeAttribute*>& node_attrs = model.GetAttributes(); - for (const NodeAttribute* attr : node_attrs) { - const Light* const light = dynamic_cast<const Light*>(attr); - if (light) { - ConvertLight(*light, orig_name); - } - } - } - - void FBXConverter::ConvertCameras(const Model& model, const std::string &orig_name) { - const std::vector<const NodeAttribute*>& node_attrs = model.GetAttributes(); - for (const NodeAttribute* attr : node_attrs) { - const Camera* const cam = dynamic_cast<const Camera*>(attr); - if (cam) { - ConvertCamera(*cam, orig_name); - } - } - } - - void FBXConverter::ConvertLight(const Light& light, const std::string &orig_name) { - lights.push_back(new aiLight()); - aiLight* const out_light = lights.back(); - - out_light->mName.Set(orig_name); - - const float intensity = light.Intensity() / 100.0f; - const aiVector3D& col = light.Color(); - - out_light->mColorDiffuse = aiColor3D(col.x, col.y, col.z); - out_light->mColorDiffuse.r *= intensity; - out_light->mColorDiffuse.g *= intensity; - out_light->mColorDiffuse.b *= intensity; - - out_light->mColorSpecular = out_light->mColorDiffuse; - - //lights are defined along negative y direction - out_light->mPosition = aiVector3D(0.0f); - out_light->mDirection = aiVector3D(0.0f, -1.0f, 0.0f); - out_light->mUp = aiVector3D(0.0f, 0.0f, -1.0f); - - switch (light.LightType()) - { - case Light::Type_Point: - out_light->mType = aiLightSource_POINT; - break; - - case Light::Type_Directional: - out_light->mType = aiLightSource_DIRECTIONAL; - break; - - case Light::Type_Spot: - out_light->mType = aiLightSource_SPOT; - out_light->mAngleOuterCone = AI_DEG_TO_RAD(light.OuterAngle()); - out_light->mAngleInnerCone = AI_DEG_TO_RAD(light.InnerAngle()); - break; - - case Light::Type_Area: - FBXImporter::LogWarn("cannot represent area light, set to UNDEFINED"); - out_light->mType = aiLightSource_UNDEFINED; - break; - - case Light::Type_Volume: - FBXImporter::LogWarn("cannot represent volume light, set to UNDEFINED"); - out_light->mType = aiLightSource_UNDEFINED; - break; - default: - ai_assert(false); - } - - float decay = light.DecayStart(); - switch (light.DecayType()) - { - case Light::Decay_None: - out_light->mAttenuationConstant = decay; - out_light->mAttenuationLinear = 0.0f; - out_light->mAttenuationQuadratic = 0.0f; - break; - case Light::Decay_Linear: - out_light->mAttenuationConstant = 0.0f; - out_light->mAttenuationLinear = 2.0f / decay; - out_light->mAttenuationQuadratic = 0.0f; - break; - case Light::Decay_Quadratic: - out_light->mAttenuationConstant = 0.0f; - out_light->mAttenuationLinear = 0.0f; - out_light->mAttenuationQuadratic = 2.0f / (decay * decay); - break; - case Light::Decay_Cubic: - FBXImporter::LogWarn("cannot represent cubic attenuation, set to Quadratic"); - out_light->mAttenuationQuadratic = 1.0f; - break; - default: - ai_assert(false); - break; - } - } - - void FBXConverter::ConvertCamera(const Camera& cam, const std::string &orig_name) - { - cameras.push_back(new aiCamera()); - aiCamera* const out_camera = cameras.back(); - - out_camera->mName.Set(orig_name); - - out_camera->mAspect = cam.AspectWidth() / cam.AspectHeight(); - - out_camera->mPosition = aiVector3D(0.0f); - out_camera->mLookAt = aiVector3D(1.0f, 0.0f, 0.0f); - out_camera->mUp = aiVector3D(0.0f, 1.0f, 0.0f); - - out_camera->mHorizontalFOV = AI_DEG_TO_RAD(cam.FieldOfView()); - - out_camera->mClipPlaneNear = cam.NearPlane(); - out_camera->mClipPlaneFar = cam.FarPlane(); - - out_camera->mHorizontalFOV = AI_DEG_TO_RAD(cam.FieldOfView()); - out_camera->mClipPlaneNear = cam.NearPlane(); - out_camera->mClipPlaneFar = cam.FarPlane(); - } - - void FBXConverter::GetUniqueName(const std::string &name, std::string &uniqueName) - { - uniqueName = name; - auto it_pair = mNodeNames.insert({ name, 0 }); // duplicate node name instance count - unsigned int& i = it_pair.first->second; - while (!it_pair.second) - { - i++; - std::ostringstream ext; - ext << name << std::setfill('0') << std::setw(3) << i; - uniqueName = ext.str(); - it_pair = mNodeNames.insert({ uniqueName, 0 }); - } - } - - const char* FBXConverter::NameTransformationComp(TransformationComp comp) { - switch (comp) { - case TransformationComp_Translation: - return "Translation"; - case TransformationComp_RotationOffset: - return "RotationOffset"; - case TransformationComp_RotationPivot: - return "RotationPivot"; - case TransformationComp_PreRotation: - return "PreRotation"; - case TransformationComp_Rotation: - return "Rotation"; - case TransformationComp_PostRotation: - return "PostRotation"; - case TransformationComp_RotationPivotInverse: - return "RotationPivotInverse"; - case TransformationComp_ScalingOffset: - return "ScalingOffset"; - case TransformationComp_ScalingPivot: - return "ScalingPivot"; - case TransformationComp_Scaling: - return "Scaling"; - case TransformationComp_ScalingPivotInverse: - return "ScalingPivotInverse"; - case TransformationComp_GeometricScaling: - return "GeometricScaling"; - case TransformationComp_GeometricRotation: - return "GeometricRotation"; - case TransformationComp_GeometricTranslation: - return "GeometricTranslation"; - case TransformationComp_GeometricScalingInverse: - return "GeometricScalingInverse"; - case TransformationComp_GeometricRotationInverse: - return "GeometricRotationInverse"; - case TransformationComp_GeometricTranslationInverse: - return "GeometricTranslationInverse"; - case TransformationComp_MAXIMUM: // this is to silence compiler warnings - default: - break; - } - - ai_assert(false); - - return nullptr; - } - - const char* FBXConverter::NameTransformationCompProperty(TransformationComp comp) { - switch (comp) { - case TransformationComp_Translation: - return "Lcl Translation"; - case TransformationComp_RotationOffset: - return "RotationOffset"; - case TransformationComp_RotationPivot: - return "RotationPivot"; - case TransformationComp_PreRotation: - return "PreRotation"; - case TransformationComp_Rotation: - return "Lcl Rotation"; - case TransformationComp_PostRotation: - return "PostRotation"; - case TransformationComp_RotationPivotInverse: - return "RotationPivotInverse"; - case TransformationComp_ScalingOffset: - return "ScalingOffset"; - case TransformationComp_ScalingPivot: - return "ScalingPivot"; - case TransformationComp_Scaling: - return "Lcl Scaling"; - case TransformationComp_ScalingPivotInverse: - return "ScalingPivotInverse"; - case TransformationComp_GeometricScaling: - return "GeometricScaling"; - case TransformationComp_GeometricRotation: - return "GeometricRotation"; - case TransformationComp_GeometricTranslation: - return "GeometricTranslation"; - case TransformationComp_GeometricScalingInverse: - return "GeometricScalingInverse"; - case TransformationComp_GeometricRotationInverse: - return "GeometricRotationInverse"; - case TransformationComp_GeometricTranslationInverse: - return "GeometricTranslationInverse"; - case TransformationComp_MAXIMUM: // this is to silence compiler warnings - break; - } - - ai_assert(false); - - return nullptr; - } - - aiVector3D FBXConverter::TransformationCompDefaultValue(TransformationComp comp) - { - // XXX a neat way to solve the never-ending special cases for scaling - // would be to do everything in log space! - return comp == TransformationComp_Scaling ? aiVector3D(1.f, 1.f, 1.f) : aiVector3D(); - } - - void FBXConverter::GetRotationMatrix(Model::RotOrder mode, const aiVector3D& rotation, aiMatrix4x4& out) - { - if (mode == Model::RotOrder_SphericXYZ) { - FBXImporter::LogError("Unsupported RotationMode: SphericXYZ"); - out = aiMatrix4x4(); - return; - } - - const float angle_epsilon = Math::getEpsilon<float>(); - - out = aiMatrix4x4(); - - bool is_id[3] = { true, true, true }; - - aiMatrix4x4 temp[3]; - if (std::fabs(rotation.z) > angle_epsilon) { - aiMatrix4x4::RotationZ(AI_DEG_TO_RAD(rotation.z), temp[2]); - is_id[2] = false; - } - if (std::fabs(rotation.y) > angle_epsilon) { - aiMatrix4x4::RotationY(AI_DEG_TO_RAD(rotation.y), temp[1]); - is_id[1] = false; - } - if (std::fabs(rotation.x) > angle_epsilon) { - aiMatrix4x4::RotationX(AI_DEG_TO_RAD(rotation.x), temp[0]); - is_id[0] = false; - } - - int order[3] = { -1, -1, -1 }; - - // note: rotation order is inverted since we're left multiplying as is usual in assimp - switch (mode) - { - case Model::RotOrder_EulerXYZ: - order[0] = 2; - order[1] = 1; - order[2] = 0; - break; - - case Model::RotOrder_EulerXZY: - order[0] = 1; - order[1] = 2; - order[2] = 0; - break; - - case Model::RotOrder_EulerYZX: - order[0] = 0; - order[1] = 2; - order[2] = 1; - break; - - case Model::RotOrder_EulerYXZ: - order[0] = 2; - order[1] = 0; - order[2] = 1; - break; - - case Model::RotOrder_EulerZXY: - order[0] = 1; - order[1] = 0; - order[2] = 2; - break; - - case Model::RotOrder_EulerZYX: - order[0] = 0; - order[1] = 1; - order[2] = 2; - break; - - default: - ai_assert(false); - break; - } - - ai_assert(order[0] >= 0); - ai_assert(order[0] <= 2); - ai_assert(order[1] >= 0); - ai_assert(order[1] <= 2); - ai_assert(order[2] >= 0); - ai_assert(order[2] <= 2); - - if (!is_id[order[0]]) { - out = temp[order[0]]; - } - - if (!is_id[order[1]]) { - out = out * temp[order[1]]; - } - - if (!is_id[order[2]]) { - out = out * temp[order[2]]; - } - } - - bool FBXConverter::NeedsComplexTransformationChain(const Model& model) - { - const PropertyTable& props = model.Props(); - bool ok; - - const float zero_epsilon = 1e-6f; - const aiVector3D all_ones(1.0f, 1.0f, 1.0f); - for (size_t i = 0; i < TransformationComp_MAXIMUM; ++i) { - const TransformationComp comp = static_cast<TransformationComp>(i); - - if (comp == TransformationComp_Rotation || comp == TransformationComp_Scaling || comp == TransformationComp_Translation) { - continue; - } - - bool scale_compare = (comp == TransformationComp_GeometricScaling || comp == TransformationComp_Scaling); - - const aiVector3D& v = PropertyGet<aiVector3D>(props, NameTransformationCompProperty(comp), ok); - if (ok && scale_compare) { - if ((v - all_ones).SquareLength() > zero_epsilon) { - return true; - } - } else if (ok) { - if (v.SquareLength() > zero_epsilon) { - return true; - } - } - } - - return false; - } - - std::string FBXConverter::NameTransformationChainNode(const std::string& name, TransformationComp comp) - { - return name + std::string(MAGIC_NODE_TAG) + "_" + NameTransformationComp(comp); - } - - bool FBXConverter::GenerateTransformationNodeChain(const Model& model, const std::string& name, std::vector<aiNode*>& output_nodes, - std::vector<aiNode*>& post_output_nodes) { - const PropertyTable& props = model.Props(); - const Model::RotOrder rot = model.RotationOrder(); - - bool ok; - - aiMatrix4x4 chain[TransformationComp_MAXIMUM]; - - ai_assert(TransformationComp_MAXIMUM < 32); - std::uint32_t chainBits = 0; - // A node won't need a node chain if it only has these. - const std::uint32_t chainMaskSimple = (1 << TransformationComp_Translation) + (1 << TransformationComp_Scaling) + (1 << TransformationComp_Rotation); - // A node will need a node chain if it has any of these. - const std::uint32_t chainMaskComplex = ((1 << (TransformationComp_MAXIMUM)) - 1) - chainMaskSimple; - - std::fill_n(chain, static_cast<unsigned int>(TransformationComp_MAXIMUM), aiMatrix4x4()); - - // generate transformation matrices for all the different transformation components - const float zero_epsilon = Math::getEpsilon<float>(); - const aiVector3D all_ones(1.0f, 1.0f, 1.0f); - - const aiVector3D& PreRotation = PropertyGet<aiVector3D>(props, "PreRotation", ok); - if (ok && PreRotation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_PreRotation); - - GetRotationMatrix(Model::RotOrder::RotOrder_EulerXYZ, PreRotation, chain[TransformationComp_PreRotation]); - } - - const aiVector3D& PostRotation = PropertyGet<aiVector3D>(props, "PostRotation", ok); - if (ok && PostRotation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_PostRotation); - - GetRotationMatrix(Model::RotOrder::RotOrder_EulerXYZ, PostRotation, chain[TransformationComp_PostRotation]); - } - - const aiVector3D& RotationPivot = PropertyGet<aiVector3D>(props, "RotationPivot", ok); - if (ok && RotationPivot.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_RotationPivot) | (1 << TransformationComp_RotationPivotInverse); - - aiMatrix4x4::Translation(RotationPivot, chain[TransformationComp_RotationPivot]); - aiMatrix4x4::Translation(-RotationPivot, chain[TransformationComp_RotationPivotInverse]); - } - - const aiVector3D& RotationOffset = PropertyGet<aiVector3D>(props, "RotationOffset", ok); - if (ok && RotationOffset.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_RotationOffset); - - aiMatrix4x4::Translation(RotationOffset, chain[TransformationComp_RotationOffset]); - } - - const aiVector3D& ScalingOffset = PropertyGet<aiVector3D>(props, "ScalingOffset", ok); - if (ok && ScalingOffset.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_ScalingOffset); - - aiMatrix4x4::Translation(ScalingOffset, chain[TransformationComp_ScalingOffset]); - } - - const aiVector3D& ScalingPivot = PropertyGet<aiVector3D>(props, "ScalingPivot", ok); - if (ok && ScalingPivot.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_ScalingPivot) | (1 << TransformationComp_ScalingPivotInverse); - - aiMatrix4x4::Translation(ScalingPivot, chain[TransformationComp_ScalingPivot]); - aiMatrix4x4::Translation(-ScalingPivot, chain[TransformationComp_ScalingPivotInverse]); - } - - const aiVector3D& Translation = PropertyGet<aiVector3D>(props, "Lcl Translation", ok); - if (ok && Translation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_Translation); - - aiMatrix4x4::Translation(Translation, chain[TransformationComp_Translation]); - } - - const aiVector3D& Scaling = PropertyGet<aiVector3D>(props, "Lcl Scaling", ok); - if (ok && (Scaling - all_ones).SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_Scaling); - - aiMatrix4x4::Scaling(Scaling, chain[TransformationComp_Scaling]); - } - - const aiVector3D& Rotation = PropertyGet<aiVector3D>(props, "Lcl Rotation", ok); - if (ok && Rotation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_Rotation); - - GetRotationMatrix(rot, Rotation, chain[TransformationComp_Rotation]); - } - - const aiVector3D& GeometricScaling = PropertyGet<aiVector3D>(props, "GeometricScaling", ok); - if (ok && (GeometricScaling - all_ones).SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_GeometricScaling); - aiMatrix4x4::Scaling(GeometricScaling, chain[TransformationComp_GeometricScaling]); - aiVector3D GeometricScalingInverse = GeometricScaling; - bool canscale = true; - for (unsigned int i = 0; i < 3; ++i) { - if (std::fabs(GeometricScalingInverse[i]) > zero_epsilon) { - GeometricScalingInverse[i] = 1.0f / GeometricScaling[i]; - } - else { - FBXImporter::LogError("cannot invert geometric scaling matrix with a 0.0 scale component"); - canscale = false; - break; - } - } - if (canscale) { - chainBits = chainBits | (1 << TransformationComp_GeometricScalingInverse); - aiMatrix4x4::Scaling(GeometricScalingInverse, chain[TransformationComp_GeometricScalingInverse]); - } - } - - const aiVector3D& GeometricRotation = PropertyGet<aiVector3D>(props, "GeometricRotation", ok); - if (ok && GeometricRotation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_GeometricRotation) | (1 << TransformationComp_GeometricRotationInverse); - GetRotationMatrix(rot, GeometricRotation, chain[TransformationComp_GeometricRotation]); - GetRotationMatrix(rot, GeometricRotation, chain[TransformationComp_GeometricRotationInverse]); - chain[TransformationComp_GeometricRotationInverse].Inverse(); - } - - const aiVector3D& GeometricTranslation = PropertyGet<aiVector3D>(props, "GeometricTranslation", ok); - if (ok && GeometricTranslation.SquareLength() > zero_epsilon) { - chainBits = chainBits | (1 << TransformationComp_GeometricTranslation) | (1 << TransformationComp_GeometricTranslationInverse); - aiMatrix4x4::Translation(GeometricTranslation, chain[TransformationComp_GeometricTranslation]); - aiMatrix4x4::Translation(-GeometricTranslation, chain[TransformationComp_GeometricTranslationInverse]); - } - - // is_complex needs to be consistent with NeedsComplexTransformationChain() - // or the interplay between this code and the animation converter would - // not be guaranteed. - //ai_assert(NeedsComplexTransformationChain(model) == ((chainBits & chainMaskComplex) != 0)); - - // now, if we have more than just Translation, Scaling and Rotation, - // we need to generate a full node chain to accommodate for assimp's - // lack to express pivots and offsets. - if ((chainBits & chainMaskComplex) && doc.Settings().preservePivots) { - FBXImporter::LogInfo("generating full transformation chain for node: " + name); - - // query the anim_chain_bits dictionary to find out which chain elements - // have associated node animation channels. These can not be dropped - // even if they have identity transform in bind pose. - NodeAnimBitMap::const_iterator it = node_anim_chain_bits.find(name); - const unsigned int anim_chain_bitmask = (it == node_anim_chain_bits.end() ? 0 : (*it).second); - - unsigned int bit = 0x1; - for (size_t i = 0; i < TransformationComp_MAXIMUM; ++i, bit <<= 1) { - const TransformationComp comp = static_cast<TransformationComp>(i); - - if ((chainBits & bit) == 0 && (anim_chain_bitmask & bit) == 0) { - continue; - } - - if (comp == TransformationComp_PostRotation) { - chain[i] = chain[i].Inverse(); - } - - aiNode* nd = new aiNode(); - nd->mName.Set(NameTransformationChainNode(name, comp)); - nd->mTransformation = chain[i]; - - // geometric inverses go in a post-node chain - if (comp == TransformationComp_GeometricScalingInverse || - comp == TransformationComp_GeometricRotationInverse || - comp == TransformationComp_GeometricTranslationInverse - ) { - post_output_nodes.push_back(nd); - } - else { - output_nodes.push_back(nd); - } - } - - ai_assert(output_nodes.size()); - return true; - } - - // else, we can just multiply the matrices together - aiNode* nd = new aiNode(); - output_nodes.push_back(nd); - - // name passed to the method is already unique - nd->mName.Set(name); - - for (const auto &transform : chain) { - nd->mTransformation = nd->mTransformation * transform; - } - return false; - } - - void FBXConverter::SetupNodeMetadata(const Model& model, aiNode& nd) - { - const PropertyTable& props = model.Props(); - DirectPropertyMap unparsedProperties = props.GetUnparsedProperties(); - - // create metadata on node - const std::size_t numStaticMetaData = 2; - aiMetadata* data = aiMetadata::Alloc(static_cast<unsigned int>(unparsedProperties.size() + numStaticMetaData)); - nd.mMetaData = data; - int index = 0; - - // find user defined properties (3ds Max) - data->Set(index++, "UserProperties", aiString(PropertyGet<std::string>(props, "UDP3DSMAX", ""))); - // preserve the info that a node was marked as Null node in the original file. - data->Set(index++, "IsNull", model.IsNull() ? true : false); - - // add unparsed properties to the node's metadata - for (const DirectPropertyMap::value_type& prop : unparsedProperties) { - // Interpret the property as a concrete type - if (const TypedProperty<bool>* interpreted = prop.second->As<TypedProperty<bool> >()) { - data->Set(index++, prop.first, interpreted->Value()); - } - else if (const TypedProperty<int>* interpreted = prop.second->As<TypedProperty<int> >()) { - data->Set(index++, prop.first, interpreted->Value()); - } - else if (const TypedProperty<uint64_t>* interpreted = prop.second->As<TypedProperty<uint64_t> >()) { - data->Set(index++, prop.first, interpreted->Value()); - } - else if (const TypedProperty<float>* interpreted = prop.second->As<TypedProperty<float> >()) { - data->Set(index++, prop.first, interpreted->Value()); - } - else if (const TypedProperty<std::string>* interpreted = prop.second->As<TypedProperty<std::string> >()) { - data->Set(index++, prop.first, aiString(interpreted->Value())); - } - else if (const TypedProperty<aiVector3D>* interpreted = prop.second->As<TypedProperty<aiVector3D> >()) { - data->Set(index++, prop.first, interpreted->Value()); - } - else { - ai_assert(false); - } - } - } - - void FBXConverter::ConvertModel(const Model &model, aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform) - { - const std::vector<const Geometry*>& geos = model.GetGeometry(); - - std::vector<unsigned int> meshes; - meshes.reserve(geos.size()); - - for (const Geometry* geo : geos) { - - const MeshGeometry* const mesh = dynamic_cast<const MeshGeometry*>(geo); - const LineGeometry* const line = dynamic_cast<const LineGeometry*>(geo); - if (mesh) { - const std::vector<unsigned int>& indices = ConvertMesh(*mesh, model, parent, root_node, - absolute_transform); - std::copy(indices.begin(), indices.end(), std::back_inserter(meshes)); - } - else if (line) { - const std::vector<unsigned int>& indices = ConvertLine(*line, model, parent, root_node); - std::copy(indices.begin(), indices.end(), std::back_inserter(meshes)); - } - else { - FBXImporter::LogWarn("ignoring unrecognized geometry: " + geo->Name()); - } - } - - if (meshes.size()) { - parent->mMeshes = new unsigned int[meshes.size()](); - parent->mNumMeshes = static_cast<unsigned int>(meshes.size()); - - std::swap_ranges(meshes.begin(), meshes.end(), parent->mMeshes); - } - } - - std::vector<unsigned int> - FBXConverter::ConvertMesh(const MeshGeometry &mesh, const Model &model, aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform) - { - std::vector<unsigned int> temp; - - MeshMap::const_iterator it = meshes_converted.find(&mesh); - if (it != meshes_converted.end()) { - std::copy((*it).second.begin(), (*it).second.end(), std::back_inserter(temp)); - return temp; - } - - const std::vector<aiVector3D>& vertices = mesh.GetVertices(); - const std::vector<unsigned int>& faces = mesh.GetFaceIndexCounts(); - if (vertices.empty() || faces.empty()) { - FBXImporter::LogWarn("ignoring empty geometry: " + mesh.Name()); - return temp; - } - - // one material per mesh maps easily to aiMesh. Multiple material - // meshes need to be split. - const MatIndexArray& mindices = mesh.GetMaterialIndices(); - if (doc.Settings().readMaterials && !mindices.empty()) { - const MatIndexArray::value_type base = mindices[0]; - for (MatIndexArray::value_type index : mindices) { - if (index != base) { - return ConvertMeshMultiMaterial(mesh, model, parent, root_node, absolute_transform); - } - } - } - - // faster code-path, just copy the data - temp.push_back(ConvertMeshSingleMaterial(mesh, model, absolute_transform, parent, root_node)); - return temp; - } - - std::vector<unsigned int> FBXConverter::ConvertLine(const LineGeometry& line, const Model& model, - aiNode *parent, aiNode *root_node) - { - std::vector<unsigned int> temp; - - const std::vector<aiVector3D>& vertices = line.GetVertices(); - const std::vector<int>& indices = line.GetIndices(); - if (vertices.empty() || indices.empty()) { - FBXImporter::LogWarn("ignoring empty line: " + line.Name()); - return temp; - } - - aiMesh* const out_mesh = SetupEmptyMesh(line, root_node); - out_mesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - - // copy vertices - out_mesh->mNumVertices = static_cast<unsigned int>(vertices.size()); - out_mesh->mVertices = new aiVector3D[out_mesh->mNumVertices]; - std::copy(vertices.begin(), vertices.end(), out_mesh->mVertices); - - //Number of line segments (faces) is "Number of Points - Number of Endpoints" - //N.B.: Endpoints in FbxLine are denoted by negative indices. - //If such an Index is encountered, add 1 and multiply by -1 to get the real index. - unsigned int epcount = 0; - for (unsigned i = 0; i < indices.size(); i++) - { - if (indices[i] < 0) { - epcount++; - } - } - unsigned int pcount = static_cast<unsigned int>( indices.size() ); - unsigned int scount = out_mesh->mNumFaces = pcount - epcount; - - aiFace* fac = out_mesh->mFaces = new aiFace[scount](); - for (unsigned int i = 0; i < pcount; ++i) { - if (indices[i] < 0) continue; - aiFace& f = *fac++; - f.mNumIndices = 2; //2 == aiPrimitiveType_LINE - f.mIndices = new unsigned int[2]; - f.mIndices[0] = indices[i]; - int segid = indices[(i + 1 == pcount ? 0 : i + 1)]; //If we have reached he last point, wrap around - f.mIndices[1] = (segid < 0 ? (segid + 1)*-1 : segid); //Convert EndPoint Index to normal Index - } - temp.push_back(static_cast<unsigned int>(meshes.size() - 1)); - return temp; - } - - aiMesh* FBXConverter::SetupEmptyMesh(const Geometry& mesh, aiNode *parent) - { - aiMesh* const out_mesh = new aiMesh(); - meshes.push_back(out_mesh); - meshes_converted[&mesh].push_back(static_cast<unsigned int>(meshes.size() - 1)); - - // set name - std::string name = mesh.Name(); - if (name.substr(0, 10) == "Geometry::") { - name = name.substr(10); - } - - if (name.length()) { - out_mesh->mName.Set(name); - } - else - { - out_mesh->mName = parent->mName; - } - - return out_mesh; - } - - unsigned int FBXConverter::ConvertMeshSingleMaterial(const MeshGeometry &mesh, const Model &model, - const aiMatrix4x4 &absolute_transform, aiNode *parent, - aiNode *root_node) - { - const MatIndexArray& mindices = mesh.GetMaterialIndices(); - aiMesh* const out_mesh = SetupEmptyMesh(mesh, parent); - - const std::vector<aiVector3D>& vertices = mesh.GetVertices(); - const std::vector<unsigned int>& faces = mesh.GetFaceIndexCounts(); - - // copy vertices - out_mesh->mNumVertices = static_cast<unsigned int>(vertices.size()); - out_mesh->mVertices = new aiVector3D[vertices.size()]; - - std::copy(vertices.begin(), vertices.end(), out_mesh->mVertices); - - // generate dummy faces - out_mesh->mNumFaces = static_cast<unsigned int>(faces.size()); - aiFace* fac = out_mesh->mFaces = new aiFace[faces.size()](); - - unsigned int cursor = 0; - for (unsigned int pcount : faces) { - aiFace& f = *fac++; - f.mNumIndices = pcount; - f.mIndices = new unsigned int[pcount]; - switch (pcount) - { - case 1: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 2: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 3: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - break; - } - for (unsigned int i = 0; i < pcount; ++i) { - f.mIndices[i] = cursor++; - } - } - - // copy normals - const std::vector<aiVector3D>& normals = mesh.GetNormals(); - if (normals.size()) { - ai_assert(normals.size() == vertices.size()); - - out_mesh->mNormals = new aiVector3D[vertices.size()]; - std::copy(normals.begin(), normals.end(), out_mesh->mNormals); - } - - // copy tangents - assimp requires both tangents and bitangents (binormals) - // to be present, or neither of them. Compute binormals from normals - // and tangents if needed. - const std::vector<aiVector3D>& tangents = mesh.GetTangents(); - const std::vector<aiVector3D>* binormals = &mesh.GetBinormals(); - - if (tangents.size()) { - std::vector<aiVector3D> tempBinormals; - if (!binormals->size()) { - if (normals.size()) { - tempBinormals.resize(normals.size()); - for (unsigned int i = 0; i < tangents.size(); ++i) { - tempBinormals[i] = normals[i] ^ tangents[i]; - } - - binormals = &tempBinormals; - } - else { - binormals = nullptr; - } - } - - if (binormals) { - ai_assert(tangents.size() == vertices.size()); - ai_assert(binormals->size() == vertices.size()); - - out_mesh->mTangents = new aiVector3D[vertices.size()]; - std::copy(tangents.begin(), tangents.end(), out_mesh->mTangents); - - out_mesh->mBitangents = new aiVector3D[vertices.size()]; - std::copy(binormals->begin(), binormals->end(), out_mesh->mBitangents); - } - } - - // copy texture coords - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - const std::vector<aiVector2D>& uvs = mesh.GetTextureCoords(i); - if (uvs.empty()) { - break; - } - - aiVector3D* out_uv = out_mesh->mTextureCoords[i] = new aiVector3D[vertices.size()]; - for (const aiVector2D& v : uvs) { - *out_uv++ = aiVector3D(v.x, v.y, 0.0f); - } - - out_mesh->mNumUVComponents[i] = 2; - } - - // copy vertex colors - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i) { - const std::vector<aiColor4D>& colors = mesh.GetVertexColors(i); - if (colors.empty()) { - break; - } - - out_mesh->mColors[i] = new aiColor4D[vertices.size()]; - std::copy(colors.begin(), colors.end(), out_mesh->mColors[i]); - } - - if (!doc.Settings().readMaterials || mindices.empty()) { - FBXImporter::LogError("no material assigned to mesh, setting default material"); - out_mesh->mMaterialIndex = GetDefaultMaterial(); - } - else { - ConvertMaterialForMesh(out_mesh, model, mesh, mindices[0]); - } - - if (doc.Settings().readWeights && mesh.DeformerSkin() != nullptr) { - ConvertWeights(out_mesh, model, mesh, absolute_transform, parent, root_node, NO_MATERIAL_SEPARATION, - nullptr); - } - - std::vector<aiAnimMesh*> animMeshes; - for (const BlendShape* blendShape : mesh.GetBlendShapes()) { - for (const BlendShapeChannel* blendShapeChannel : blendShape->BlendShapeChannels()) { - const std::vector<const ShapeGeometry*>& shapeGeometries = blendShapeChannel->GetShapeGeometries(); - for (size_t i = 0; i < shapeGeometries.size(); i++) { - aiAnimMesh *animMesh = aiCreateAnimMesh(out_mesh); - const ShapeGeometry* shapeGeometry = shapeGeometries.at(i); - const std::vector<aiVector3D>& vertices = shapeGeometry->GetVertices(); - const std::vector<aiVector3D>& normals = shapeGeometry->GetNormals(); - const std::vector<unsigned int>& indices = shapeGeometry->GetIndices(); - animMesh->mName.Set(FixAnimMeshName(shapeGeometry->Name())); - for (size_t j = 0; j < indices.size(); j++) { - unsigned int index = indices.at(j); - aiVector3D vertex = vertices.at(j); - aiVector3D normal = normals.at(j); - unsigned int count = 0; - const unsigned int* outIndices = mesh.ToOutputVertexIndex(index, count); - for (unsigned int k = 0; k < count; k++) { - unsigned int index = outIndices[k]; - animMesh->mVertices[index] += vertex; - if (animMesh->mNormals != nullptr) { - animMesh->mNormals[index] += normal; - animMesh->mNormals[index].NormalizeSafe(); - } - } - } - animMesh->mWeight = shapeGeometries.size() > 1 ? blendShapeChannel->DeformPercent() / 100.0f : 1.0f; - animMeshes.push_back(animMesh); - } - } - } - const size_t numAnimMeshes = animMeshes.size(); - if (numAnimMeshes > 0) { - out_mesh->mNumAnimMeshes = static_cast<unsigned int>(numAnimMeshes); - out_mesh->mAnimMeshes = new aiAnimMesh*[numAnimMeshes]; - for (size_t i = 0; i < numAnimMeshes; i++) { - out_mesh->mAnimMeshes[i] = animMeshes.at(i); - } - } - return static_cast<unsigned int>(meshes.size() - 1); - } - - std::vector<unsigned int> - FBXConverter::ConvertMeshMultiMaterial(const MeshGeometry &mesh, const Model &model, aiNode *parent, - aiNode *root_node, - const aiMatrix4x4 &absolute_transform) - { - const MatIndexArray& mindices = mesh.GetMaterialIndices(); - ai_assert(mindices.size()); - - std::set<MatIndexArray::value_type> had; - std::vector<unsigned int> indices; - - for (MatIndexArray::value_type index : mindices) { - if (had.find(index) == had.end()) { - - indices.push_back(ConvertMeshMultiMaterial(mesh, model, index, parent, root_node, absolute_transform)); - had.insert(index); - } - } - - return indices; - } - - unsigned int FBXConverter::ConvertMeshMultiMaterial(const MeshGeometry &mesh, const Model &model, - MatIndexArray::value_type index, - aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform) - { - aiMesh* const out_mesh = SetupEmptyMesh(mesh, parent); - - const MatIndexArray& mindices = mesh.GetMaterialIndices(); - const std::vector<aiVector3D>& vertices = mesh.GetVertices(); - const std::vector<unsigned int>& faces = mesh.GetFaceIndexCounts(); - - const bool process_weights = doc.Settings().readWeights && mesh.DeformerSkin() != nullptr; - - unsigned int count_faces = 0; - unsigned int count_vertices = 0; - - // count faces - std::vector<unsigned int>::const_iterator itf = faces.begin(); - for (MatIndexArray::const_iterator it = mindices.begin(), - end = mindices.end(); it != end; ++it, ++itf) - { - if ((*it) != index) { - continue; - } - ++count_faces; - count_vertices += *itf; - } - - ai_assert(count_faces); - ai_assert(count_vertices); - - // mapping from output indices to DOM indexing, needed to resolve weights or blendshapes - std::vector<unsigned int> reverseMapping; - std::map<unsigned int, unsigned int> translateIndexMap; - if (process_weights || mesh.GetBlendShapes().size() > 0) { - reverseMapping.resize(count_vertices); - } - - // allocate output data arrays, but don't fill them yet - out_mesh->mNumVertices = count_vertices; - out_mesh->mVertices = new aiVector3D[count_vertices]; - - out_mesh->mNumFaces = count_faces; - aiFace* fac = out_mesh->mFaces = new aiFace[count_faces](); - - - // allocate normals - const std::vector<aiVector3D>& normals = mesh.GetNormals(); - if (normals.size()) { - ai_assert(normals.size() == vertices.size()); - out_mesh->mNormals = new aiVector3D[vertices.size()]; - } - - // allocate tangents, binormals. - const std::vector<aiVector3D>& tangents = mesh.GetTangents(); - const std::vector<aiVector3D>* binormals = &mesh.GetBinormals(); - std::vector<aiVector3D> tempBinormals; - - if (tangents.size()) { - if (!binormals->size()) { - if (normals.size()) { - // XXX this computes the binormals for the entire mesh, not only - // the part for which we need them. - tempBinormals.resize(normals.size()); - for (unsigned int i = 0; i < tangents.size(); ++i) { - tempBinormals[i] = normals[i] ^ tangents[i]; - } - - binormals = &tempBinormals; - } - else { - binormals = nullptr; - } - } - - if (binormals) { - ai_assert(tangents.size() == vertices.size() && binormals->size() == vertices.size()); - - out_mesh->mTangents = new aiVector3D[vertices.size()]; - out_mesh->mBitangents = new aiVector3D[vertices.size()]; - } - } - - // allocate texture coords - unsigned int num_uvs = 0; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i, ++num_uvs) { - const std::vector<aiVector2D>& uvs = mesh.GetTextureCoords(i); - if (uvs.empty()) { - break; - } - - out_mesh->mTextureCoords[i] = new aiVector3D[vertices.size()]; - out_mesh->mNumUVComponents[i] = 2; - } - - // allocate vertex colors - unsigned int num_vcs = 0; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i, ++num_vcs) { - const std::vector<aiColor4D>& colors = mesh.GetVertexColors(i); - if (colors.empty()) { - break; - } - - out_mesh->mColors[i] = new aiColor4D[vertices.size()]; - } - - unsigned int cursor = 0, in_cursor = 0; - - itf = faces.begin(); - for (MatIndexArray::const_iterator it = mindices.begin(), end = mindices.end(); it != end; ++it, ++itf) - { - const unsigned int pcount = *itf; - if ((*it) != index) { - in_cursor += pcount; - continue; - } - - aiFace& f = *fac++; - - f.mNumIndices = pcount; - f.mIndices = new unsigned int[pcount]; - switch (pcount) - { - case 1: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 2: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 3: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - out_mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - break; - } - for (unsigned int i = 0; i < pcount; ++i, ++cursor, ++in_cursor) { - f.mIndices[i] = cursor; - - if (reverseMapping.size()) { - reverseMapping[cursor] = in_cursor; - translateIndexMap[in_cursor] = cursor; - } - - out_mesh->mVertices[cursor] = vertices[in_cursor]; - - if (out_mesh->mNormals) { - out_mesh->mNormals[cursor] = normals[in_cursor]; - } - - if (out_mesh->mTangents) { - out_mesh->mTangents[cursor] = tangents[in_cursor]; - out_mesh->mBitangents[cursor] = (*binormals)[in_cursor]; - } - - for (unsigned int j = 0; j < num_uvs; ++j) { - const std::vector<aiVector2D>& uvs = mesh.GetTextureCoords(j); - out_mesh->mTextureCoords[j][cursor] = aiVector3D(uvs[in_cursor].x, uvs[in_cursor].y, 0.0f); - } - - for (unsigned int j = 0; j < num_vcs; ++j) { - const std::vector<aiColor4D>& cols = mesh.GetVertexColors(j); - out_mesh->mColors[j][cursor] = cols[in_cursor]; - } - } - } - - ConvertMaterialForMesh(out_mesh, model, mesh, index); - - if (process_weights) { - ConvertWeights(out_mesh, model, mesh, absolute_transform, parent, root_node, index, &reverseMapping); - } - - std::vector<aiAnimMesh*> animMeshes; - for (const BlendShape* blendShape : mesh.GetBlendShapes()) { - for (const BlendShapeChannel* blendShapeChannel : blendShape->BlendShapeChannels()) { - const std::vector<const ShapeGeometry*>& shapeGeometries = blendShapeChannel->GetShapeGeometries(); - for (size_t i = 0; i < shapeGeometries.size(); i++) { - aiAnimMesh* animMesh = aiCreateAnimMesh(out_mesh); - const ShapeGeometry* shapeGeometry = shapeGeometries.at(i); - const std::vector<aiVector3D>& vertices = shapeGeometry->GetVertices(); - const std::vector<aiVector3D>& normals = shapeGeometry->GetNormals(); - const std::vector<unsigned int>& indices = shapeGeometry->GetIndices(); - animMesh->mName.Set(FixAnimMeshName(shapeGeometry->Name())); - for (size_t j = 0; j < indices.size(); j++) { - unsigned int index = indices.at(j); - aiVector3D vertex = vertices.at(j); - aiVector3D normal = normals.at(j); - unsigned int count = 0; - const unsigned int* outIndices = mesh.ToOutputVertexIndex(index, count); - for (unsigned int k = 0; k < count; k++) { - unsigned int outIndex = outIndices[k]; - if (translateIndexMap.find(outIndex) == translateIndexMap.end()) - continue; - unsigned int index = translateIndexMap[outIndex]; - animMesh->mVertices[index] += vertex; - if (animMesh->mNormals != nullptr) { - animMesh->mNormals[index] += normal; - animMesh->mNormals[index].NormalizeSafe(); - } - } - } - animMesh->mWeight = shapeGeometries.size() > 1 ? blendShapeChannel->DeformPercent() / 100.0f : 1.0f; - animMeshes.push_back(animMesh); - } - } - } - - const size_t numAnimMeshes = animMeshes.size(); - if (numAnimMeshes > 0) { - out_mesh->mNumAnimMeshes = static_cast<unsigned int>(numAnimMeshes); - out_mesh->mAnimMeshes = new aiAnimMesh*[numAnimMeshes]; - for (size_t i = 0; i < numAnimMeshes; i++) { - out_mesh->mAnimMeshes[i] = animMeshes.at(i); - } - } - - return static_cast<unsigned int>(meshes.size() - 1); - } - - void FBXConverter::ConvertWeights(aiMesh *out, const Model &model, const MeshGeometry &geo, - const aiMatrix4x4 &absolute_transform, - aiNode *parent, aiNode *root_node, unsigned int materialIndex, - std::vector<unsigned int> *outputVertStartIndices) - { - ai_assert(geo.DeformerSkin()); - - std::vector<size_t> out_indices; - std::vector<size_t> index_out_indices; - std::vector<size_t> count_out_indices; - - const Skin& sk = *geo.DeformerSkin(); - - std::vector<aiBone*> bones; - - const bool no_mat_check = materialIndex == NO_MATERIAL_SEPARATION; - ai_assert(no_mat_check || outputVertStartIndices); - - try { - // iterate over the sub deformers - for (const Cluster* cluster : sk.Clusters()) { - ai_assert(cluster); - - const WeightIndexArray& indices = cluster->GetIndices(); - - const MatIndexArray& mats = geo.GetMaterialIndices(); - - const size_t no_index_sentinel = std::numeric_limits<size_t>::max(); - - count_out_indices.clear(); - index_out_indices.clear(); - out_indices.clear(); - - - // now check if *any* of these weights is contained in the output mesh, - // taking notes so we don't need to do it twice. - for (WeightIndexArray::value_type index : indices) { - - unsigned int count = 0; - const unsigned int* const out_idx = geo.ToOutputVertexIndex(index, count); - // ToOutputVertexIndex only returns nullptr if index is out of bounds - // which should never happen - ai_assert(out_idx != nullptr); - - index_out_indices.push_back(no_index_sentinel); - count_out_indices.push_back(0); - - for (unsigned int i = 0; i < count; ++i) { - if (no_mat_check || static_cast<size_t>(mats[geo.FaceForVertexIndex(out_idx[i])]) == materialIndex) { - - if (index_out_indices.back() == no_index_sentinel) { - index_out_indices.back() = out_indices.size(); - } - - if (no_mat_check) { - out_indices.push_back(out_idx[i]); - } else { - // this extra lookup is in O(logn), so the entire algorithm becomes O(nlogn) - const std::vector<unsigned int>::iterator it = std::lower_bound( - outputVertStartIndices->begin(), - outputVertStartIndices->end(), - out_idx[i] - ); - - out_indices.push_back(std::distance(outputVertStartIndices->begin(), it)); - } - - ++count_out_indices.back(); - } - } - } - - // if we found at least one, generate the output bones - // XXX this could be heavily simplified by collecting the bone - // data in a single step. - ConvertCluster(bones, cluster, out_indices, index_out_indices, - count_out_indices, absolute_transform, parent, root_node); - } - - bone_map.clear(); - } - catch (std::exception&e) { - std::for_each(bones.begin(), bones.end(), Util::delete_fun<aiBone>()); - throw; - } - - if (bones.empty()) { - out->mBones = nullptr; - out->mNumBones = 0; - return; - } else { - out->mBones = new aiBone *[bones.size()](); - out->mNumBones = static_cast<unsigned int>(bones.size()); - - std::swap_ranges(bones.begin(), bones.end(), out->mBones); - } - } - - const aiNode* FBXConverter::GetNodeByName( const aiString& name, aiNode *current_node ) - { - aiNode * iter = current_node; - //printf("Child count: %d", iter->mNumChildren); - return iter; - } - - void FBXConverter::ConvertCluster(std::vector<aiBone *> &local_mesh_bones, const Cluster *cl, - std::vector<size_t> &out_indices, std::vector<size_t> &index_out_indices, - std::vector<size_t> &count_out_indices, const aiMatrix4x4 &absolute_transform, - aiNode *parent, aiNode *root_node) { - ai_assert(cl); // make sure cluster valid - std::string deformer_name = cl->TargetNode()->Name(); - aiString bone_name = aiString(FixNodeName(deformer_name)); - - aiBone *bone = nullptr; - - if (bone_map.count(deformer_name)) { - std::cout << "retrieved bone from lookup " << bone_name.C_Str() << ". Deformer: " << deformer_name - << std::endl; - bone = bone_map[deformer_name]; - } else { - std::cout << "created new bone " << bone_name.C_Str() << ". Deformer: " << deformer_name << std::endl; - bone = new aiBone(); - bone->mName = bone_name; - - // store local transform link for post processing - bone->mOffsetMatrix = cl->TransformLink(); - bone->mOffsetMatrix.Inverse(); - - aiMatrix4x4 matrix = (aiMatrix4x4)absolute_transform; - - bone->mOffsetMatrix = bone->mOffsetMatrix * matrix; // * mesh_offset - - - // - // Now calculate the aiVertexWeights - // - - aiVertexWeight *cursor = nullptr; - - bone->mNumWeights = static_cast<unsigned int>(out_indices.size()); - cursor = bone->mWeights = new aiVertexWeight[out_indices.size()]; - - const size_t no_index_sentinel = std::numeric_limits<size_t>::max(); - const WeightArray& weights = cl->GetWeights(); - - const size_t c = index_out_indices.size(); - for (size_t i = 0; i < c; ++i) { - const size_t index_index = index_out_indices[i]; - - if (index_index == no_index_sentinel) { - continue; - } - - const size_t cc = count_out_indices[i]; - for (size_t j = 0; j < cc; ++j) { - // cursor runs from first element relative to the start - // or relative to the start of the next indexes. - aiVertexWeight& out_weight = *cursor++; - - out_weight.mVertexId = static_cast<unsigned int>(out_indices[index_index + j]); - out_weight.mWeight = weights[i]; - } - } - - bone_map.insert(std::pair<const std::string, aiBone *>(deformer_name, bone)); - } - - std::cout << "bone research: Indicies size: " << out_indices.size() << std::endl; - - // lookup must be populated in case something goes wrong - // this also allocates bones to mesh instance outside - local_mesh_bones.push_back(bone); - } - - void FBXConverter::ConvertMaterialForMesh(aiMesh* out, const Model& model, const MeshGeometry& geo, - MatIndexArray::value_type materialIndex) - { - // locate source materials for this mesh - const std::vector<const Material*>& mats = model.GetMaterials(); - if (static_cast<unsigned int>(materialIndex) >= mats.size() || materialIndex < 0) { - FBXImporter::LogError("material index out of bounds, setting default material"); - out->mMaterialIndex = GetDefaultMaterial(); - return; - } - - const Material* const mat = mats[materialIndex]; - MaterialMap::const_iterator it = materials_converted.find(mat); - if (it != materials_converted.end()) { - out->mMaterialIndex = (*it).second; - return; - } - - out->mMaterialIndex = ConvertMaterial(*mat, &geo); - materials_converted[mat] = out->mMaterialIndex; - } - - unsigned int FBXConverter::GetDefaultMaterial() - { - if (defaultMaterialIndex) { - return defaultMaterialIndex - 1; - } - - aiMaterial* out_mat = new aiMaterial(); - materials.push_back(out_mat); - - const aiColor3D diffuse = aiColor3D(0.8f, 0.8f, 0.8f); - out_mat->AddProperty(&diffuse, 1, AI_MATKEY_COLOR_DIFFUSE); - - aiString s; - s.Set(AI_DEFAULT_MATERIAL_NAME); - - out_mat->AddProperty(&s, AI_MATKEY_NAME); - - defaultMaterialIndex = static_cast<unsigned int>(materials.size()); - return defaultMaterialIndex - 1; - } - - - unsigned int FBXConverter::ConvertMaterial(const Material& material, const MeshGeometry* const mesh) - { - const PropertyTable& props = material.Props(); - - // generate empty output material - aiMaterial* out_mat = new aiMaterial(); - materials_converted[&material] = static_cast<unsigned int>(materials.size()); - - materials.push_back(out_mat); - - aiString str; - - // strip Material:: prefix - std::string name = material.Name(); - if (name.substr(0, 10) == "Material::") { - name = name.substr(10); - } - - // set material name if not empty - this could happen - // and there should be no key for it in this case. - if (name.length()) { - str.Set(name); - out_mat->AddProperty(&str, AI_MATKEY_NAME); - } - - // Set the shading mode as best we can: The FBX specification only mentions Lambert and Phong, and only Phong is mentioned in Assimp's aiShadingMode enum. - if (material.GetShadingModel() == "phong") - { - aiShadingMode shadingMode = aiShadingMode_Phong; - out_mat->AddProperty<aiShadingMode>(&shadingMode, 1, AI_MATKEY_SHADING_MODEL); - } - - // shading stuff and colors - SetShadingPropertiesCommon(out_mat, props); - SetShadingPropertiesRaw( out_mat, props, material.Textures(), mesh ); - - // texture assignments - SetTextureProperties(out_mat, material.Textures(), mesh); - SetTextureProperties(out_mat, material.LayeredTextures(), mesh); - - return static_cast<unsigned int>(materials.size() - 1); - } - - unsigned int FBXConverter::ConvertVideo(const Video& video) - { - // generate empty output texture - aiTexture* out_tex = new aiTexture(); - textures.push_back(out_tex); - - // assuming the texture is compressed - out_tex->mWidth = static_cast<unsigned int>(video.ContentLength()); // total data size - out_tex->mHeight = 0; // fixed to 0 - - // steal the data from the Video to avoid an additional copy - out_tex->pcData = reinterpret_cast<aiTexel*>(const_cast<Video&>(video).RelinquishContent()); - - // try to extract a hint from the file extension - const std::string& filename = video.RelativeFilename().empty() ? video.FileName() : video.RelativeFilename(); - std::string ext = BaseImporter::GetExtension(filename); - - if (ext == "jpeg") { - ext = "jpg"; - } - - if (ext.size() <= 3) { - memcpy(out_tex->achFormatHint, ext.c_str(), ext.size()); - } - - out_tex->mFilename.Set(filename.c_str()); - - return static_cast<unsigned int>(textures.size() - 1); - } - - aiString FBXConverter::GetTexturePath(const Texture* tex) - { - aiString path; - path.Set(tex->RelativeFilename()); - - const Video* media = tex->Media(); - if (media != nullptr) { - bool textureReady = false; //tells if our texture is ready (if it was loaded or if it was found) - unsigned int index; - - VideoMap::const_iterator it = textures_converted.find(*media); - if (it != textures_converted.end()) { - index = (*it).second; - textureReady = true; - } - else { - if (media->ContentLength() > 0) { - index = ConvertVideo(*media); - textures_converted[*media] = index; - textureReady = true; - } - } - - // setup texture reference string (copied from ColladaLoader::FindFilenameForEffectTexture), if the texture is ready - if (doc.Settings().useLegacyEmbeddedTextureNaming) { - if (textureReady) { - // TODO: check the possibility of using the flag "AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING" - // In FBX files textures are now stored internally by Assimp with their filename included - // Now Assimp can lookup through the loaded textures after all data is processed - // We need to load all textures before referencing them, as FBX file format order may reference a texture before loading it - // This may occur on this case too, it has to be studied - path.data[0] = '*'; - path.length = 1 + ASSIMP_itoa10(path.data + 1, MAXLEN - 1, index); - } - } - } - - return path; - } - - void FBXConverter::TrySetTextureProperties(aiMaterial* out_mat, const TextureMap& textures, - const std::string& propName, - aiTextureType target, const MeshGeometry* const mesh) { - TextureMap::const_iterator it = textures.find(propName); - if (it == textures.end()) { - return; - } - - const Texture* const tex = (*it).second; - if (tex != 0) - { - aiString path = GetTexturePath(tex); - out_mat->AddProperty(&path, _AI_MATKEY_TEXTURE_BASE, target, 0); - - aiUVTransform uvTrafo; - // XXX handle all kinds of UV transformations - uvTrafo.mScaling = tex->UVScaling(); - uvTrafo.mTranslation = tex->UVTranslation(); - out_mat->AddProperty(&uvTrafo, 1, _AI_MATKEY_UVTRANSFORM_BASE, target, 0); - - const PropertyTable& props = tex->Props(); - - int uvIndex = 0; - - bool ok; - const std::string& uvSet = PropertyGet<std::string>(props, "UVSet", ok); - if (ok) { - // "default" is the name which usually appears in the FbxFileTexture template - if (uvSet != "default" && uvSet.length()) { - // this is a bit awkward - we need to find a mesh that uses this - // material and scan its UV channels for the given UV name because - // assimp references UV channels by index, not by name. - - // XXX: the case that UV channels may appear in different orders - // in meshes is unhandled. A possible solution would be to sort - // the UV channels alphabetically, but this would have the side - // effect that the primary (first) UV channel would sometimes - // be moved, causing trouble when users read only the first - // UV channel and ignore UV channel assignments altogether. - - const unsigned int matIndex = static_cast<unsigned int>(std::distance(materials.begin(), - std::find(materials.begin(), materials.end(), out_mat) - )); - - - uvIndex = -1; - if (!mesh) - { - for (const MeshMap::value_type& v : meshes_converted) { - const MeshGeometry* const meshGeom = dynamic_cast<const MeshGeometry*> (v.first); - if (!meshGeom) { - continue; - } - - const MatIndexArray& mats = meshGeom->GetMaterialIndices(); - if (std::find(mats.begin(), mats.end(), matIndex) == mats.end()) { - continue; - } - - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (meshGeom->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = meshGeom->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - continue; - } - - if (uvIndex == -1) { - uvIndex = index; - } - else { - FBXImporter::LogWarn("the UV channel named " + uvSet + - " appears at different positions in meshes, results will be wrong"); - } - } - } - else - { - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (mesh->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = mesh->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - } - - if (uvIndex == -1) { - uvIndex = index; - } - } - - if (uvIndex == -1) { - FBXImporter::LogWarn("failed to resolve UV channel " + uvSet + ", using first UV channel"); - uvIndex = 0; - } - } - } - - out_mat->AddProperty(&uvIndex, 1, _AI_MATKEY_UVWSRC_BASE, target, 0); - } - } - - void FBXConverter::TrySetTextureProperties(aiMaterial* out_mat, const LayeredTextureMap& layeredTextures, - const std::string& propName, - aiTextureType target, const MeshGeometry* const mesh) { - LayeredTextureMap::const_iterator it = layeredTextures.find(propName); - if (it == layeredTextures.end()) { - return; - } - - int texCount = (*it).second->textureCount(); - - // Set the blend mode for layered textures - int blendmode = (*it).second->GetBlendMode(); - out_mat->AddProperty(&blendmode, 1, _AI_MATKEY_TEXOP_BASE, target, 0); - - for (int texIndex = 0; texIndex < texCount; texIndex++) { - - const Texture* const tex = (*it).second->getTexture(texIndex); - - aiString path = GetTexturePath(tex); - out_mat->AddProperty(&path, _AI_MATKEY_TEXTURE_BASE, target, texIndex); - - aiUVTransform uvTrafo; - // XXX handle all kinds of UV transformations - uvTrafo.mScaling = tex->UVScaling(); - uvTrafo.mTranslation = tex->UVTranslation(); - out_mat->AddProperty(&uvTrafo, 1, _AI_MATKEY_UVTRANSFORM_BASE, target, texIndex); - - const PropertyTable& props = tex->Props(); - - int uvIndex = 0; - - bool ok; - const std::string& uvSet = PropertyGet<std::string>(props, "UVSet", ok); - if (ok) { - // "default" is the name which usually appears in the FbxFileTexture template - if (uvSet != "default" && uvSet.length()) { - // this is a bit awkward - we need to find a mesh that uses this - // material and scan its UV channels for the given UV name because - // assimp references UV channels by index, not by name. - - // XXX: the case that UV channels may appear in different orders - // in meshes is unhandled. A possible solution would be to sort - // the UV channels alphabetically, but this would have the side - // effect that the primary (first) UV channel would sometimes - // be moved, causing trouble when users read only the first - // UV channel and ignore UV channel assignments altogether. - - const unsigned int matIndex = static_cast<unsigned int>(std::distance(materials.begin(), - std::find(materials.begin(), materials.end(), out_mat) - )); - - uvIndex = -1; - if (!mesh) - { - for (const MeshMap::value_type& v : meshes_converted) { - const MeshGeometry* const meshGeom = dynamic_cast<const MeshGeometry*> (v.first); - if (!meshGeom) { - continue; - } - - const MatIndexArray& mats = meshGeom->GetMaterialIndices(); - if (std::find(mats.begin(), mats.end(), matIndex) == mats.end()) { - continue; - } - - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (meshGeom->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = meshGeom->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - continue; - } - - if (uvIndex == -1) { - uvIndex = index; - } - else { - FBXImporter::LogWarn("the UV channel named " + uvSet + - " appears at different positions in meshes, results will be wrong"); - } - } - } - else - { - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (mesh->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = mesh->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - } - - if (uvIndex == -1) { - uvIndex = index; - } - } - - if (uvIndex == -1) { - FBXImporter::LogWarn("failed to resolve UV channel " + uvSet + ", using first UV channel"); - uvIndex = 0; - } - } - } - - out_mat->AddProperty(&uvIndex, 1, _AI_MATKEY_UVWSRC_BASE, target, texIndex); - } - } - - void FBXConverter::SetTextureProperties(aiMaterial* out_mat, const TextureMap& textures, const MeshGeometry* const mesh) - { - TrySetTextureProperties(out_mat, textures, "DiffuseColor", aiTextureType_DIFFUSE, mesh); - TrySetTextureProperties(out_mat, textures, "AmbientColor", aiTextureType_AMBIENT, mesh); - TrySetTextureProperties(out_mat, textures, "EmissiveColor", aiTextureType_EMISSIVE, mesh); - TrySetTextureProperties(out_mat, textures, "SpecularColor", aiTextureType_SPECULAR, mesh); - TrySetTextureProperties(out_mat, textures, "SpecularFactor", aiTextureType_SPECULAR, mesh); - TrySetTextureProperties(out_mat, textures, "TransparentColor", aiTextureType_OPACITY, mesh); - TrySetTextureProperties(out_mat, textures, "ReflectionColor", aiTextureType_REFLECTION, mesh); - TrySetTextureProperties(out_mat, textures, "DisplacementColor", aiTextureType_DISPLACEMENT, mesh); - TrySetTextureProperties(out_mat, textures, "NormalMap", aiTextureType_NORMALS, mesh); - TrySetTextureProperties(out_mat, textures, "Bump", aiTextureType_HEIGHT, mesh); - TrySetTextureProperties(out_mat, textures, "ShininessExponent", aiTextureType_SHININESS, mesh); - TrySetTextureProperties( out_mat, textures, "TransparencyFactor", aiTextureType_OPACITY, mesh ); - TrySetTextureProperties( out_mat, textures, "EmissiveFactor", aiTextureType_EMISSIVE, mesh ); - //Maya counterparts - TrySetTextureProperties(out_mat, textures, "Maya|DiffuseTexture", aiTextureType_DIFFUSE, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|NormalTexture", aiTextureType_NORMALS, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|SpecularTexture", aiTextureType_SPECULAR, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|FalloffTexture", aiTextureType_OPACITY, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|ReflectionMapTexture", aiTextureType_REFLECTION, mesh); - - // Maya PBR - TrySetTextureProperties(out_mat, textures, "Maya|baseColor|file", aiTextureType_BASE_COLOR, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|normalCamera|file", aiTextureType_NORMAL_CAMERA, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|emissionColor|file", aiTextureType_EMISSION_COLOR, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|metalness|file", aiTextureType_METALNESS, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|diffuseRoughness|file", aiTextureType_DIFFUSE_ROUGHNESS, mesh); - - // Maya stingray - TrySetTextureProperties(out_mat, textures, "Maya|TEX_color_map|file", aiTextureType_BASE_COLOR, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|TEX_normal_map|file", aiTextureType_NORMAL_CAMERA, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|TEX_emissive_map|file", aiTextureType_EMISSION_COLOR, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|TEX_metallic_map|file", aiTextureType_METALNESS, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|TEX_roughness_map|file", aiTextureType_DIFFUSE_ROUGHNESS, mesh); - TrySetTextureProperties(out_mat, textures, "Maya|TEX_ao_map|file", aiTextureType_AMBIENT_OCCLUSION, mesh); - } - - void FBXConverter::SetTextureProperties(aiMaterial* out_mat, const LayeredTextureMap& layeredTextures, const MeshGeometry* const mesh) - { - TrySetTextureProperties(out_mat, layeredTextures, "DiffuseColor", aiTextureType_DIFFUSE, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "AmbientColor", aiTextureType_AMBIENT, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "EmissiveColor", aiTextureType_EMISSIVE, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "SpecularColor", aiTextureType_SPECULAR, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "SpecularFactor", aiTextureType_SPECULAR, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "TransparentColor", aiTextureType_OPACITY, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "ReflectionColor", aiTextureType_REFLECTION, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "DisplacementColor", aiTextureType_DISPLACEMENT, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "NormalMap", aiTextureType_NORMALS, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "Bump", aiTextureType_HEIGHT, mesh); - TrySetTextureProperties(out_mat, layeredTextures, "ShininessExponent", aiTextureType_SHININESS, mesh); - TrySetTextureProperties( out_mat, layeredTextures, "EmissiveFactor", aiTextureType_EMISSIVE, mesh ); - TrySetTextureProperties( out_mat, layeredTextures, "TransparencyFactor", aiTextureType_OPACITY, mesh ); - } - - aiColor3D FBXConverter::GetColorPropertyFactored(const PropertyTable& props, const std::string& colorName, - const std::string& factorName, bool& result, bool useTemplate) - { - result = true; - - bool ok; - aiVector3D BaseColor = PropertyGet<aiVector3D>(props, colorName, ok, useTemplate); - if (!ok) { - result = false; - return aiColor3D(0.0f, 0.0f, 0.0f); - } - - // if no factor name, return the colour as is - if (factorName.empty()) { - return aiColor3D(BaseColor.x, BaseColor.y, BaseColor.z); - } - - // otherwise it should be multiplied by the factor, if found. - float factor = PropertyGet<float>(props, factorName, ok, useTemplate); - if (ok) { - BaseColor *= factor; - } - return aiColor3D(BaseColor.x, BaseColor.y, BaseColor.z); - } - - aiColor3D FBXConverter::GetColorPropertyFromMaterial(const PropertyTable& props, const std::string& baseName, - bool& result) - { - return GetColorPropertyFactored(props, baseName + "Color", baseName + "Factor", result, true); - } - - aiColor3D FBXConverter::GetColorProperty(const PropertyTable& props, const std::string& colorName, - bool& result, bool useTemplate) - { - result = true; - bool ok; - const aiVector3D& ColorVec = PropertyGet<aiVector3D>(props, colorName, ok, useTemplate); - if (!ok) { - result = false; - return aiColor3D(0.0f, 0.0f, 0.0f); - } - return aiColor3D(ColorVec.x, ColorVec.y, ColorVec.z); - } - - void FBXConverter::SetShadingPropertiesCommon(aiMaterial* out_mat, const PropertyTable& props) - { - // Set shading properties. - // Modern FBX Files have two separate systems for defining these, - // with only the more comprehensive one described in the property template. - // Likely the other values are a legacy system, - // which is still always exported by the official FBX SDK. - // - // Blender's FBX import and export mostly ignore this legacy system, - // and as we only support recent versions of FBX anyway, we can do the same. - bool ok; - - const aiColor3D& Diffuse = GetColorPropertyFromMaterial(props, "Diffuse", ok); - if (ok) { - out_mat->AddProperty(&Diffuse, 1, AI_MATKEY_COLOR_DIFFUSE); - } - - const aiColor3D& Emissive = GetColorPropertyFromMaterial(props, "Emissive", ok); - if (ok) { - out_mat->AddProperty(&Emissive, 1, AI_MATKEY_COLOR_EMISSIVE); - } - - const aiColor3D& Ambient = GetColorPropertyFromMaterial(props, "Ambient", ok); - if (ok) { - out_mat->AddProperty(&Ambient, 1, AI_MATKEY_COLOR_AMBIENT); - } - - // we store specular factor as SHININESS_STRENGTH, so just get the color - const aiColor3D& Specular = GetColorProperty(props, "SpecularColor", ok, true); - if (ok) { - out_mat->AddProperty(&Specular, 1, AI_MATKEY_COLOR_SPECULAR); - } - - // and also try to get SHININESS_STRENGTH - const float SpecularFactor = PropertyGet<float>(props, "SpecularFactor", ok, true); - if (ok) { - out_mat->AddProperty(&SpecularFactor, 1, AI_MATKEY_SHININESS_STRENGTH); - } - - // and the specular exponent - const float ShininessExponent = PropertyGet<float>(props, "ShininessExponent", ok); - if (ok) { - out_mat->AddProperty(&ShininessExponent, 1, AI_MATKEY_SHININESS); - } - - // TransparentColor / TransparencyFactor... gee thanks FBX :rolleyes: - const aiColor3D& Transparent = GetColorPropertyFactored(props, "TransparentColor", "TransparencyFactor", ok); - float CalculatedOpacity = 1.0f; - if (ok) { - out_mat->AddProperty(&Transparent, 1, AI_MATKEY_COLOR_TRANSPARENT); - // as calculated by FBX SDK 2017: - CalculatedOpacity = 1.0f - ((Transparent.r + Transparent.g + Transparent.b) / 3.0f); - } - - // try to get the transparency factor - const float TransparencyFactor = PropertyGet<float>(props, "TransparencyFactor", ok); - if (ok) { - out_mat->AddProperty(&TransparencyFactor, 1, AI_MATKEY_TRANSPARENCYFACTOR); - } - - // use of TransparencyFactor is inconsistent. - // Maya always stores it as 1.0, - // so we can't use it to set AI_MATKEY_OPACITY. - // Blender is more sensible and stores it as the alpha value. - // However both the FBX SDK and Blender always write an additional - // legacy "Opacity" field, so we can try to use that. - // - // If we can't find it, - // we can fall back to the value which the FBX SDK calculates - // from transparency colour (RGB) and factor (F) as - // 1.0 - F*((R+G+B)/3). - // - // There's no consistent way to interpret this opacity value, - // so it's up to clients to do the correct thing. - const float Opacity = PropertyGet<float>(props, "Opacity", ok); - if (ok) { - out_mat->AddProperty(&Opacity, 1, AI_MATKEY_OPACITY); - } - else if (CalculatedOpacity != 1.0) { - out_mat->AddProperty(&CalculatedOpacity, 1, AI_MATKEY_OPACITY); - } - - // reflection color and factor are stored separately - const aiColor3D& Reflection = GetColorProperty(props, "ReflectionColor", ok, true); - if (ok) { - out_mat->AddProperty(&Reflection, 1, AI_MATKEY_COLOR_REFLECTIVE); - } - - float ReflectionFactor = PropertyGet<float>(props, "ReflectionFactor", ok, true); - if (ok) { - out_mat->AddProperty(&ReflectionFactor, 1, AI_MATKEY_REFLECTIVITY); - } - - const float BumpFactor = PropertyGet<float>(props, "BumpFactor", ok); - if (ok) { - out_mat->AddProperty(&BumpFactor, 1, AI_MATKEY_BUMPSCALING); - } - - const float DispFactor = PropertyGet<float>(props, "DisplacementFactor", ok); - if (ok) { - out_mat->AddProperty(&DispFactor, 1, "$mat.displacementscaling", 0, 0); - } -} - - -void FBXConverter::SetShadingPropertiesRaw(aiMaterial* out_mat, const PropertyTable& props, const TextureMap& textures, const MeshGeometry* const mesh) -{ - // Add all the unparsed properties with a "$raw." prefix - - const std::string prefix = "$raw."; - - for (const DirectPropertyMap::value_type& prop : props.GetUnparsedProperties()) { - - std::string name = prefix + prop.first; - - if (const TypedProperty<aiVector3D>* interpreted = prop.second->As<TypedProperty<aiVector3D> >()) - { - out_mat->AddProperty(&interpreted->Value(), 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<aiColor3D>* interpreted = prop.second->As<TypedProperty<aiColor3D> >()) - { - out_mat->AddProperty(&interpreted->Value(), 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<aiColor4D>* interpreted = prop.second->As<TypedProperty<aiColor4D> >()) - { - out_mat->AddProperty(&interpreted->Value(), 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<float>* interpreted = prop.second->As<TypedProperty<float> >()) - { - out_mat->AddProperty(&interpreted->Value(), 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<int>* interpreted = prop.second->As<TypedProperty<int> >()) - { - out_mat->AddProperty(&interpreted->Value(), 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<bool>* interpreted = prop.second->As<TypedProperty<bool> >()) - { - int value = interpreted->Value() ? 1 : 0; - out_mat->AddProperty(&value, 1, name.c_str(), 0, 0); - } - else if (const TypedProperty<std::string>* interpreted = prop.second->As<TypedProperty<std::string> >()) - { - const aiString value = aiString(interpreted->Value()); - out_mat->AddProperty(&value, name.c_str(), 0, 0); - } - } - - // Add the textures' properties - - for (TextureMap::const_iterator it = textures.begin(); it != textures.end(); it++) { - - std::string name = prefix + it->first; - - const Texture* const tex = (*it).second; - if (tex != nullptr) - { - aiString path; - path.Set(tex->RelativeFilename()); - - const Video* media = tex->Media(); - if (media != nullptr && media->ContentLength() > 0) { - unsigned int index; - - VideoMap::const_iterator it = textures_converted.find(*media); - if (it != textures_converted.end()) { - index = (*it).second; - } - else { - index = ConvertVideo(*media); - textures_converted[*media] = index; - } - - // setup texture reference string (copied from ColladaLoader::FindFilenameForEffectTexture) - path.data[0] = '*'; - path.length = 1 + ASSIMP_itoa10(path.data + 1, MAXLEN - 1, index); - } - - out_mat->AddProperty(&path, (name + "|file").c_str(), aiTextureType_UNKNOWN, 0); - - aiUVTransform uvTrafo; - // XXX handle all kinds of UV transformations - uvTrafo.mScaling = tex->UVScaling(); - uvTrafo.mTranslation = tex->UVTranslation(); - out_mat->AddProperty(&uvTrafo, 1, (name + "|uvtrafo").c_str(), aiTextureType_UNKNOWN, 0); - - int uvIndex = 0; - - bool uvFound = false; - const std::string& uvSet = PropertyGet<std::string>(tex->Props(), "UVSet", uvFound); - if (uvFound) { - // "default" is the name which usually appears in the FbxFileTexture template - if (uvSet != "default" && uvSet.length()) { - // this is a bit awkward - we need to find a mesh that uses this - // material and scan its UV channels for the given UV name because - // assimp references UV channels by index, not by name. - - // XXX: the case that UV channels may appear in different orders - // in meshes is unhandled. A possible solution would be to sort - // the UV channels alphabetically, but this would have the side - // effect that the primary (first) UV channel would sometimes - // be moved, causing trouble when users read only the first - // UV channel and ignore UV channel assignments altogether. - - std::vector<aiMaterial*>::iterator materialIt = std::find(materials.begin(), materials.end(), out_mat); - const unsigned int matIndex = static_cast<unsigned int>(std::distance(materials.begin(), materialIt)); - - uvIndex = -1; - if (!mesh) - { - for (const MeshMap::value_type& v : meshes_converted) { - const MeshGeometry* const meshGeom = dynamic_cast<const MeshGeometry*>(v.first); - if (!meshGeom) { - continue; - } - - const MatIndexArray& mats = meshGeom->GetMaterialIndices(); - if (std::find(mats.begin(), mats.end(), matIndex) == mats.end()) { - continue; - } - - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (meshGeom->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = meshGeom->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - continue; - } - - if (uvIndex == -1) { - uvIndex = index; - } - else { - FBXImporter::LogWarn("the UV channel named " + uvSet + " appears at different positions in meshes, results will be wrong"); - } - } - } - else - { - int index = -1; - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - if (mesh->GetTextureCoords(i).empty()) { - break; - } - const std::string& name = mesh->GetTextureCoordChannelName(i); - if (name == uvSet) { - index = static_cast<int>(i); - break; - } - } - if (index == -1) { - FBXImporter::LogWarn("did not find UV channel named " + uvSet + " in a mesh using this material"); - } - - if (uvIndex == -1) { - uvIndex = index; - } - } - - if (uvIndex == -1) { - FBXImporter::LogWarn("failed to resolve UV channel " + uvSet + ", using first UV channel"); - uvIndex = 0; - } - } - } - - out_mat->AddProperty(&uvIndex, 1, (name + "|uvwsrc").c_str(), aiTextureType_UNKNOWN, 0); - } - } - } - - - double FBXConverter::FrameRateToDouble(FileGlobalSettings::FrameRate fp, double customFPSVal) { - switch (fp) { - case FileGlobalSettings::FrameRate_DEFAULT: - return 1.0; - - case FileGlobalSettings::FrameRate_120: - return 120.0; - - case FileGlobalSettings::FrameRate_100: - return 100.0; - - case FileGlobalSettings::FrameRate_60: - return 60.0; - - case FileGlobalSettings::FrameRate_50: - return 50.0; - - case FileGlobalSettings::FrameRate_48: - return 48.0; - - case FileGlobalSettings::FrameRate_30: - case FileGlobalSettings::FrameRate_30_DROP: - return 30.0; - - case FileGlobalSettings::FrameRate_NTSC_DROP_FRAME: - case FileGlobalSettings::FrameRate_NTSC_FULL_FRAME: - return 29.9700262; - - case FileGlobalSettings::FrameRate_PAL: - return 25.0; - - case FileGlobalSettings::FrameRate_CINEMA: - return 24.0; - - case FileGlobalSettings::FrameRate_1000: - return 1000.0; - - case FileGlobalSettings::FrameRate_CINEMA_ND: - return 23.976; - - case FileGlobalSettings::FrameRate_CUSTOM: - return customFPSVal; - - case FileGlobalSettings::FrameRate_MAX: // this is to silence compiler warnings - break; - } - - ai_assert(false); - - return -1.0f; - } - - - void FBXConverter::ConvertAnimations() - { - // first of all determine framerate - const FileGlobalSettings::FrameRate fps = doc.GlobalSettings().TimeMode(); - const float custom = doc.GlobalSettings().CustomFrameRate(); - anim_fps = FrameRateToDouble(fps, custom); - - const std::vector<const AnimationStack*>& animations = doc.AnimationStacks(); - for (const AnimationStack* stack : animations) { - ConvertAnimationStack(*stack); - } - } - - std::string FBXConverter::FixNodeName(const std::string& name) { - // strip Model:: prefix, avoiding ambiguities (i.e. don't strip if - // this causes ambiguities, well possible between empty identifiers, - // such as "Model::" and ""). Make sure the behaviour is consistent - // across multiple calls to FixNodeName(). - if (name.substr(0, 7) == "Model::") { - std::string temp = name.substr(7); - return temp; - } - - return name; - } - - std::string FBXConverter::FixAnimMeshName(const std::string& name) { - if (name.length()) { - size_t indexOf = name.find_first_of("::"); - if (indexOf != std::string::npos && indexOf < name.size() - 2) { - return name.substr(indexOf + 2); - } - } - return name.length() ? name : "AnimMesh"; - } - - void FBXConverter::ConvertAnimationStack(const AnimationStack& st) - { - const AnimationLayerList& layers = st.Layers(); - if (layers.empty()) { - return; - } - - aiAnimation* const anim = new aiAnimation(); - animations.push_back(anim); - - // strip AnimationStack:: prefix - std::string name = st.Name(); - if (name.substr(0, 16) == "AnimationStack::") { - name = name.substr(16); - } - else if (name.substr(0, 11) == "AnimStack::") { - name = name.substr(11); - } - - anim->mName.Set(name); - - // need to find all nodes for which we need to generate node animations - - // it may happen that we need to merge multiple layers, though. - NodeMap node_map; - - // reverse mapping from curves to layers, much faster than querying - // the FBX DOM for it. - LayerMap layer_map; - - const char* prop_whitelist[] = { - "Lcl Scaling", - "Lcl Rotation", - "Lcl Translation", - "DeformPercent" - }; - - std::map<std::string, morphAnimData*> morphAnimDatas; - - for (const AnimationLayer* layer : layers) { - ai_assert(layer); - const AnimationCurveNodeList& nodes = layer->Nodes(prop_whitelist, 4); - for (const AnimationCurveNode* node : nodes) { - ai_assert(node); - const Model* const model = dynamic_cast<const Model*>(node->Target()); - if (model) { - const std::string& name = FixNodeName(model->Name()); - node_map[name].push_back(node); - layer_map[node] = layer; - continue; - } - const BlendShapeChannel* const bsc = dynamic_cast<const BlendShapeChannel*>(node->Target()); - if (bsc) { - ProcessMorphAnimDatas(&morphAnimDatas, bsc, node); - } - } - } - - // generate node animations - std::vector<aiNodeAnim*> node_anims; - - double min_time = 1e10; - double max_time = -1e10; - - int64_t start_time = st.LocalStart(); - int64_t stop_time = st.LocalStop(); - bool has_local_startstop = start_time != 0 || stop_time != 0; - if (!has_local_startstop) { - // no time range given, so accept every keyframe and use the actual min/max time - // the numbers are INT64_MIN/MAX, the 20000 is for safety because GenerateNodeAnimations uses an epsilon of 10000 - start_time = -9223372036854775807ll + 20000; - stop_time = 9223372036854775807ll - 20000; - } - - try { - for (const NodeMap::value_type& kv : node_map) { - GenerateNodeAnimations(node_anims, - kv.first, - kv.second, - layer_map, - start_time, stop_time, - max_time, - min_time); - } - } - catch (std::exception&) { - std::for_each(node_anims.begin(), node_anims.end(), Util::delete_fun<aiNodeAnim>()); - throw; - } - - if (node_anims.size() || morphAnimDatas.size()) { - if (node_anims.size()) { - anim->mChannels = new aiNodeAnim*[node_anims.size()](); - anim->mNumChannels = static_cast<unsigned int>(node_anims.size()); - std::swap_ranges(node_anims.begin(), node_anims.end(), anim->mChannels); - } - if (morphAnimDatas.size()) { - unsigned int numMorphMeshChannels = static_cast<unsigned int>(morphAnimDatas.size()); - anim->mMorphMeshChannels = new aiMeshMorphAnim*[numMorphMeshChannels]; - anim->mNumMorphMeshChannels = numMorphMeshChannels; - unsigned int i = 0; - for (auto morphAnimIt : morphAnimDatas) { - morphAnimData* animData = morphAnimIt.second; - unsigned int numKeys = static_cast<unsigned int>(animData->size()); - aiMeshMorphAnim* meshMorphAnim = new aiMeshMorphAnim(); - meshMorphAnim->mName.Set(morphAnimIt.first); - meshMorphAnim->mNumKeys = numKeys; - meshMorphAnim->mKeys = new aiMeshMorphKey[numKeys]; - unsigned int j = 0; - for (auto animIt : *animData) { - morphKeyData* keyData = animIt.second; - unsigned int numValuesAndWeights = static_cast<unsigned int>(keyData->values.size()); - meshMorphAnim->mKeys[j].mNumValuesAndWeights = numValuesAndWeights; - meshMorphAnim->mKeys[j].mValues = new unsigned int[numValuesAndWeights]; - meshMorphAnim->mKeys[j].mWeights = new double[numValuesAndWeights]; - meshMorphAnim->mKeys[j].mTime = CONVERT_FBX_TIME(animIt.first) * anim_fps; - for (unsigned int k = 0; k < numValuesAndWeights; k++) { - meshMorphAnim->mKeys[j].mValues[k] = keyData->values.at(k); - meshMorphAnim->mKeys[j].mWeights[k] = keyData->weights.at(k); - } - j++; - } - anim->mMorphMeshChannels[i++] = meshMorphAnim; - } - } - } - else { - // empty animations would fail validation, so drop them - delete anim; - animations.pop_back(); - FBXImporter::LogInfo("ignoring empty AnimationStack (using IK?): " + name); - return; - } - - double start_time_fps = has_local_startstop ? (CONVERT_FBX_TIME(start_time) * anim_fps) : min_time; - double stop_time_fps = has_local_startstop ? (CONVERT_FBX_TIME(stop_time) * anim_fps) : max_time; - - // adjust relative timing for animation - for (unsigned int c = 0; c < anim->mNumChannels; c++) { - aiNodeAnim* channel = anim->mChannels[c]; - for (uint32_t i = 0; i < channel->mNumPositionKeys; i++) { - channel->mPositionKeys[i].mTime -= start_time_fps; - } - for (uint32_t i = 0; i < channel->mNumRotationKeys; i++) { - channel->mRotationKeys[i].mTime -= start_time_fps; - } - for (uint32_t i = 0; i < channel->mNumScalingKeys; i++) { - channel->mScalingKeys[i].mTime -= start_time_fps; - } - } - for (unsigned int c = 0; c < anim->mNumMorphMeshChannels; c++) { - aiMeshMorphAnim* channel = anim->mMorphMeshChannels[c]; - for (uint32_t i = 0; i < channel->mNumKeys; i++) { - channel->mKeys[i].mTime -= start_time_fps; - } - } - - // for some mysterious reason, mDuration is simply the maximum key -- the - // validator always assumes animations to start at zero. - anim->mDuration = stop_time_fps - start_time_fps; - anim->mTicksPerSecond = anim_fps; - } - - // ------------------------------------------------------------------------------------------------ - void FBXConverter::ProcessMorphAnimDatas(std::map<std::string, morphAnimData*>* morphAnimDatas, const BlendShapeChannel* bsc, const AnimationCurveNode* node) { - std::vector<const Connection*> bscConnections = doc.GetConnectionsBySourceSequenced(bsc->ID(), "Deformer"); - for (const Connection* bscConnection : bscConnections) { - auto bs = dynamic_cast<const BlendShape*>(bscConnection->DestinationObject()); - if (bs) { - auto channelIt = std::find(bs->BlendShapeChannels().begin(), bs->BlendShapeChannels().end(), bsc); - if (channelIt != bs->BlendShapeChannels().end()) { - auto channelIndex = static_cast<unsigned int>(std::distance(bs->BlendShapeChannels().begin(), channelIt)); - std::vector<const Connection*> bsConnections = doc.GetConnectionsBySourceSequenced(bs->ID(), "Geometry"); - for (const Connection* bsConnection : bsConnections) { - auto geo = dynamic_cast<const Geometry*>(bsConnection->DestinationObject()); - if (geo) { - std::vector<const Connection*> geoConnections = doc.GetConnectionsBySourceSequenced(geo->ID(), "Model"); - for (const Connection* geoConnection : geoConnections) { - auto model = dynamic_cast<const Model*>(geoConnection->DestinationObject()); - if (model) { - auto geoIt = std::find(model->GetGeometry().begin(), model->GetGeometry().end(), geo); - auto geoIndex = static_cast<unsigned int>(std::distance(model->GetGeometry().begin(), geoIt)); - auto name = aiString(FixNodeName(model->Name() + "*")); - name.length = 1 + ASSIMP_itoa10(name.data + name.length, MAXLEN - 1, geoIndex); - morphAnimData* animData; - auto animIt = morphAnimDatas->find(name.C_Str()); - if (animIt == morphAnimDatas->end()) { - animData = new morphAnimData(); - morphAnimDatas->insert(std::make_pair(name.C_Str(), animData)); - } - else { - animData = animIt->second; - } - for (std::pair<std::string, const AnimationCurve*> curvesIt : node->Curves()) { - if (curvesIt.first == "d|DeformPercent") { - const AnimationCurve* animationCurve = curvesIt.second; - const KeyTimeList& keys = animationCurve->GetKeys(); - const KeyValueList& values = animationCurve->GetValues(); - unsigned int k = 0; - for (auto key : keys) { - morphKeyData* keyData; - auto keyIt = animData->find(key); - if (keyIt == animData->end()) { - keyData = new morphKeyData(); - animData->insert(std::make_pair(key, keyData)); - } - else { - keyData = keyIt->second; - } - keyData->values.push_back(channelIndex); - keyData->weights.push_back(values.at(k) / 100.0f); - k++; - } - } - } - } - } - } - } - } - } - } - } - - // ------------------------------------------------------------------------------------------------ -#ifdef ASSIMP_BUILD_DEBUG - // ------------------------------------------------------------------------------------------------ - // sanity check whether the input is ok - static void validateAnimCurveNodes(const std::vector<const AnimationCurveNode*>& curves, - bool strictMode) { - const Object* target(nullptr); - for (const AnimationCurveNode* node : curves) { - if (!target) { - target = node->Target(); - } - if (node->Target() != target) { - FBXImporter::LogWarn("Node target is nullptr type."); - } - if (strictMode) { - ai_assert(node->Target() == target); - } - } - } -#endif // ASSIMP_BUILD_DEBUG - - // ------------------------------------------------------------------------------------------------ - void FBXConverter::GenerateNodeAnimations(std::vector<aiNodeAnim*>& node_anims, - const std::string& fixed_name, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time) - { - - NodeMap node_property_map; - ai_assert(curves.size()); - -#ifdef ASSIMP_BUILD_DEBUG - validateAnimCurveNodes(curves, doc.Settings().strictMode); -#endif - const AnimationCurveNode* curve_node = nullptr; - for (const AnimationCurveNode* node : curves) { - ai_assert(node); - - if (node->TargetProperty().empty()) { - FBXImporter::LogWarn("target property for animation curve not set: " + node->Name()); - continue; - } - - curve_node = node; - if (node->Curves().empty()) { - FBXImporter::LogWarn("no animation curves assigned to AnimationCurveNode: " + node->Name()); - continue; - } - - node_property_map[node->TargetProperty()].push_back(node); - } - - ai_assert(curve_node); - ai_assert(curve_node->TargetAsModel()); - - const Model& target = *curve_node->TargetAsModel(); - - // check for all possible transformation components - NodeMap::const_iterator chain[TransformationComp_MAXIMUM]; - - bool has_any = false; - bool has_complex = false; - - for (size_t i = 0; i < TransformationComp_MAXIMUM; ++i) { - const TransformationComp comp = static_cast<TransformationComp>(i); - - // inverse pivots don't exist in the input, we just generate them - if (comp == TransformationComp_RotationPivotInverse || comp == TransformationComp_ScalingPivotInverse) { - chain[i] = node_property_map.end(); - continue; - } - - chain[i] = node_property_map.find(NameTransformationCompProperty(comp)); - if (chain[i] != node_property_map.end()) { - - // check if this curves contains redundant information by looking - // up the corresponding node's transformation chain. - if (doc.Settings().optimizeEmptyAnimationCurves && - IsRedundantAnimationData(target, comp, (*chain[i]).second)) { - - FBXImporter::LogDebug("dropping redundant animation channel for node " + target.Name()); - continue; - } - - has_any = true; - - if (comp != TransformationComp_Rotation && comp != TransformationComp_Scaling && comp != TransformationComp_Translation) - { - has_complex = true; - } - } - } - - if (!has_any) { - FBXImporter::LogWarn("ignoring node animation, did not find any transformation key frames"); - return; - } - - // this needs to play nicely with GenerateTransformationNodeChain() which will - // be invoked _later_ (animations come first). If this node has only rotation, - // scaling and translation _and_ there are no animated other components either, - // we can use a single node and also a single node animation channel. - if (!has_complex && !NeedsComplexTransformationChain(target)) { - - aiNodeAnim* const nd = GenerateSimpleNodeAnim(fixed_name, target, chain, - node_property_map.end(), - layer_map, - start, stop, - max_time, - min_time, - true // input is TRS order, assimp is SRT - ); - - ai_assert(nd); - if (nd->mNumPositionKeys == 0 && nd->mNumRotationKeys == 0 && nd->mNumScalingKeys == 0) { - delete nd; - } - else { - node_anims.push_back(nd); - } - return; - } - - // otherwise, things get gruesome and we need separate animation channels - // for each part of the transformation chain. Remember which channels - // we generated and pass this information to the node conversion - // code to avoid nodes that have identity transform, but non-identity - // animations, being dropped. - unsigned int flags = 0, bit = 0x1; - for (size_t i = 0; i < TransformationComp_MAXIMUM; ++i, bit <<= 1) { - const TransformationComp comp = static_cast<TransformationComp>(i); - - if (chain[i] != node_property_map.end()) { - flags |= bit; - - ai_assert(comp != TransformationComp_RotationPivotInverse); - ai_assert(comp != TransformationComp_ScalingPivotInverse); - - const std::string& chain_name = NameTransformationChainNode(fixed_name, comp); - - aiNodeAnim* na = nullptr; - switch (comp) - { - case TransformationComp_Rotation: - case TransformationComp_PreRotation: - case TransformationComp_PostRotation: - case TransformationComp_GeometricRotation: - na = GenerateRotationNodeAnim(chain_name, - target, - (*chain[i]).second, - layer_map, - start, stop, - max_time, - min_time); - - break; - - case TransformationComp_RotationOffset: - case TransformationComp_RotationPivot: - case TransformationComp_ScalingOffset: - case TransformationComp_ScalingPivot: - case TransformationComp_Translation: - case TransformationComp_GeometricTranslation: - na = GenerateTranslationNodeAnim(chain_name, - target, - (*chain[i]).second, - layer_map, - start, stop, - max_time, - min_time); - - // pivoting requires us to generate an implicit inverse channel to undo the pivot translation - if (comp == TransformationComp_RotationPivot) { - const std::string& invName = NameTransformationChainNode(fixed_name, - TransformationComp_RotationPivotInverse); - - aiNodeAnim* const inv = GenerateTranslationNodeAnim(invName, - target, - (*chain[i]).second, - layer_map, - start, stop, - max_time, - min_time, - true); - - ai_assert(inv); - if (inv->mNumPositionKeys == 0 && inv->mNumRotationKeys == 0 && inv->mNumScalingKeys == 0) { - delete inv; - } - else { - node_anims.push_back(inv); - } - - ai_assert(TransformationComp_RotationPivotInverse > i); - flags |= bit << (TransformationComp_RotationPivotInverse - i); - } - else if (comp == TransformationComp_ScalingPivot) { - const std::string& invName = NameTransformationChainNode(fixed_name, - TransformationComp_ScalingPivotInverse); - - aiNodeAnim* const inv = GenerateTranslationNodeAnim(invName, - target, - (*chain[i]).second, - layer_map, - start, stop, - max_time, - min_time, - true); - - ai_assert(inv); - if (inv->mNumPositionKeys == 0 && inv->mNumRotationKeys == 0 && inv->mNumScalingKeys == 0) { - delete inv; - } - else { - node_anims.push_back(inv); - } - - ai_assert(TransformationComp_RotationPivotInverse > i); - flags |= bit << (TransformationComp_RotationPivotInverse - i); - } - - break; - - case TransformationComp_Scaling: - case TransformationComp_GeometricScaling: - na = GenerateScalingNodeAnim(chain_name, - target, - (*chain[i]).second, - layer_map, - start, stop, - max_time, - min_time); - - break; - - default: - ai_assert(false); - } - - ai_assert(na); - if (na->mNumPositionKeys == 0 && na->mNumRotationKeys == 0 && na->mNumScalingKeys == 0) { - delete na; - } - else { - node_anims.push_back(na); - } - continue; - } - } - - node_anim_chain_bits[fixed_name] = flags; - } - - - bool FBXConverter::IsRedundantAnimationData(const Model& target, - TransformationComp comp, - const std::vector<const AnimationCurveNode*>& curves) { - ai_assert(curves.size()); - - // look for animation nodes with - // * sub channels for all relevant components set - // * one key/value pair per component - // * combined values match up the corresponding value in the bind pose node transformation - // only such nodes are 'redundant' for this function. - - if (curves.size() > 1) { - return false; - } - - const AnimationCurveNode& nd = *curves.front(); - const AnimationCurveMap& sub_curves = nd.Curves(); - - const AnimationCurveMap::const_iterator dx = sub_curves.find("d|X"); - const AnimationCurveMap::const_iterator dy = sub_curves.find("d|Y"); - const AnimationCurveMap::const_iterator dz = sub_curves.find("d|Z"); - - if (dx == sub_curves.end() || dy == sub_curves.end() || dz == sub_curves.end()) { - return false; - } - - const KeyValueList& vx = (*dx).second->GetValues(); - const KeyValueList& vy = (*dy).second->GetValues(); - const KeyValueList& vz = (*dz).second->GetValues(); - - if (vx.size() != 1 || vy.size() != 1 || vz.size() != 1) { - return false; - } - - const aiVector3D dyn_val = aiVector3D(vx[0], vy[0], vz[0]); - const aiVector3D& static_val = PropertyGet<aiVector3D>(target.Props(), - NameTransformationCompProperty(comp), - TransformationCompDefaultValue(comp) - ); - - const float epsilon = Math::getEpsilon<float>(); - return (dyn_val - static_val).SquareLength() < epsilon; - } - - - aiNodeAnim* FBXConverter::GenerateRotationNodeAnim(const std::string& name, - const Model& target, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time) - { - std::unique_ptr<aiNodeAnim> na(new aiNodeAnim()); - na->mNodeName.Set(name); - - ConvertRotationKeys(na.get(), curves, layer_map, start, stop, max_time, min_time, target.RotationOrder()); - - // dummy scaling key - na->mScalingKeys = new aiVectorKey[1]; - na->mNumScalingKeys = 1; - - na->mScalingKeys[0].mTime = 0.; - na->mScalingKeys[0].mValue = aiVector3D(1.0f, 1.0f, 1.0f); - - // dummy position key - na->mPositionKeys = new aiVectorKey[1]; - na->mNumPositionKeys = 1; - - na->mPositionKeys[0].mTime = 0.; - na->mPositionKeys[0].mValue = aiVector3D(); - - return na.release(); - } - - aiNodeAnim* FBXConverter::GenerateScalingNodeAnim(const std::string& name, - const Model& /*target*/, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time) - { - std::unique_ptr<aiNodeAnim> na(new aiNodeAnim()); - na->mNodeName.Set(name); - - ConvertScaleKeys(na.get(), curves, layer_map, start, stop, max_time, min_time); - - // dummy rotation key - na->mRotationKeys = new aiQuatKey[1]; - na->mNumRotationKeys = 1; - - na->mRotationKeys[0].mTime = 0.; - na->mRotationKeys[0].mValue = aiQuaternion(); - - // dummy position key - na->mPositionKeys = new aiVectorKey[1]; - na->mNumPositionKeys = 1; - - na->mPositionKeys[0].mTime = 0.; - na->mPositionKeys[0].mValue = aiVector3D(); - - return na.release(); - } - - aiNodeAnim* FBXConverter::GenerateTranslationNodeAnim(const std::string& name, - const Model& /*target*/, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time, - bool inverse) { - std::unique_ptr<aiNodeAnim> na(new aiNodeAnim()); - na->mNodeName.Set(name); - - ConvertTranslationKeys(na.get(), curves, layer_map, start, stop, max_time, min_time); - - if (inverse) { - for (unsigned int i = 0; i < na->mNumPositionKeys; ++i) { - na->mPositionKeys[i].mValue *= -1.0f; - } - } - - // dummy scaling key - na->mScalingKeys = new aiVectorKey[1]; - na->mNumScalingKeys = 1; - - na->mScalingKeys[0].mTime = 0.; - na->mScalingKeys[0].mValue = aiVector3D(1.0f, 1.0f, 1.0f); - - // dummy rotation key - na->mRotationKeys = new aiQuatKey[1]; - na->mNumRotationKeys = 1; - - na->mRotationKeys[0].mTime = 0.; - na->mRotationKeys[0].mValue = aiQuaternion(); - - return na.release(); - } - - aiNodeAnim* FBXConverter::GenerateSimpleNodeAnim(const std::string& name, - const Model& target, - NodeMap::const_iterator chain[TransformationComp_MAXIMUM], - NodeMap::const_iterator iter_end, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time, - bool reverse_order) - - { - std::unique_ptr<aiNodeAnim> na(new aiNodeAnim()); - na->mNodeName.Set(name); - - const PropertyTable& props = target.Props(); - - // need to convert from TRS order to SRT? - if (reverse_order) { - - aiVector3D def_scale = PropertyGet(props, "Lcl Scaling", aiVector3D(1.f, 1.f, 1.f)); - aiVector3D def_translate = PropertyGet(props, "Lcl Translation", aiVector3D(0.f, 0.f, 0.f)); - aiVector3D def_rot = PropertyGet(props, "Lcl Rotation", aiVector3D(0.f, 0.f, 0.f)); - - KeyFrameListList scaling; - KeyFrameListList translation; - KeyFrameListList rotation; - - if (chain[TransformationComp_Scaling] != iter_end) { - scaling = GetKeyframeList((*chain[TransformationComp_Scaling]).second, start, stop); - } - - if (chain[TransformationComp_Translation] != iter_end) { - translation = GetKeyframeList((*chain[TransformationComp_Translation]).second, start, stop); - } - - if (chain[TransformationComp_Rotation] != iter_end) { - rotation = GetKeyframeList((*chain[TransformationComp_Rotation]).second, start, stop); - } - - KeyFrameListList joined; - joined.insert(joined.end(), scaling.begin(), scaling.end()); - joined.insert(joined.end(), translation.begin(), translation.end()); - joined.insert(joined.end(), rotation.begin(), rotation.end()); - - const KeyTimeList& times = GetKeyTimeList(joined); - - aiQuatKey* out_quat = new aiQuatKey[times.size()]; - aiVectorKey* out_scale = new aiVectorKey[times.size()]; - aiVectorKey* out_translation = new aiVectorKey[times.size()]; - - if (times.size()) - { - ConvertTransformOrder_TRStoSRT(out_quat, out_scale, out_translation, - scaling, - translation, - rotation, - times, - max_time, - min_time, - target.RotationOrder(), - def_scale, - def_translate, - def_rot); - } - - // XXX remove duplicates / redundant keys which this operation did - // likely produce if not all three channels were equally dense. - - na->mNumScalingKeys = static_cast<unsigned int>(times.size()); - na->mNumRotationKeys = na->mNumScalingKeys; - na->mNumPositionKeys = na->mNumScalingKeys; - - na->mScalingKeys = out_scale; - na->mRotationKeys = out_quat; - na->mPositionKeys = out_translation; - } - else { - - // if a particular transformation is not given, grab it from - // the corresponding node to meet the semantics of aiNodeAnim, - // which requires all of rotation, scaling and translation - // to be set. - if (chain[TransformationComp_Scaling] != iter_end) { - ConvertScaleKeys(na.get(), (*chain[TransformationComp_Scaling]).second, - layer_map, - start, stop, - max_time, - min_time); - } - else { - na->mScalingKeys = new aiVectorKey[1]; - na->mNumScalingKeys = 1; - - na->mScalingKeys[0].mTime = 0.; - na->mScalingKeys[0].mValue = PropertyGet(props, "Lcl Scaling", - aiVector3D(1.f, 1.f, 1.f)); - } - - if (chain[TransformationComp_Rotation] != iter_end) { - ConvertRotationKeys(na.get(), (*chain[TransformationComp_Rotation]).second, - layer_map, - start, stop, - max_time, - min_time, - target.RotationOrder()); - } - else { - na->mRotationKeys = new aiQuatKey[1]; - na->mNumRotationKeys = 1; - - na->mRotationKeys[0].mTime = 0.; - na->mRotationKeys[0].mValue = EulerToQuaternion( - PropertyGet(props, "Lcl Rotation", aiVector3D(0.f, 0.f, 0.f)), - target.RotationOrder()); - } - - if (chain[TransformationComp_Translation] != iter_end) { - ConvertTranslationKeys(na.get(), (*chain[TransformationComp_Translation]).second, - layer_map, - start, stop, - max_time, - min_time); - } - else { - na->mPositionKeys = new aiVectorKey[1]; - na->mNumPositionKeys = 1; - - na->mPositionKeys[0].mTime = 0.; - na->mPositionKeys[0].mValue = PropertyGet(props, "Lcl Translation", - aiVector3D(0.f, 0.f, 0.f)); - } - - } - return na.release(); - } - - FBXConverter::KeyFrameListList FBXConverter::GetKeyframeList(const std::vector<const AnimationCurveNode*>& nodes, int64_t start, int64_t stop) - { - KeyFrameListList inputs; - inputs.reserve(nodes.size() * 3); - - //give some breathing room for rounding errors - int64_t adj_start = start - 10000; - int64_t adj_stop = stop + 10000; - - for (const AnimationCurveNode* node : nodes) { - ai_assert(node); - - const AnimationCurveMap& curves = node->Curves(); - for (const AnimationCurveMap::value_type& kv : curves) { - - unsigned int mapto; - if (kv.first == "d|X") { - mapto = 0; - } - else if (kv.first == "d|Y") { - mapto = 1; - } - else if (kv.first == "d|Z") { - mapto = 2; - } - else { - FBXImporter::LogWarn("ignoring scale animation curve, did not recognize target component"); - continue; - } - - const AnimationCurve* const curve = kv.second; - ai_assert(curve->GetKeys().size() == curve->GetValues().size() && curve->GetKeys().size()); - - //get values within the start/stop time window - std::shared_ptr<KeyTimeList> Keys(new KeyTimeList()); - std::shared_ptr<KeyValueList> Values(new KeyValueList()); - const size_t count = curve->GetKeys().size(); - Keys->reserve(count); - Values->reserve(count); - for (size_t n = 0; n < count; n++) - { - int64_t k = curve->GetKeys().at(n); - if (k >= adj_start && k <= adj_stop) - { - Keys->push_back(k); - Values->push_back(curve->GetValues().at(n)); - } - } - - inputs.push_back(std::make_tuple(Keys, Values, mapto)); - } - } - return inputs; // pray for NRVO :-) - } - - - KeyTimeList FBXConverter::GetKeyTimeList(const KeyFrameListList& inputs) { - ai_assert(!inputs.empty()); - - // reserve some space upfront - it is likely that the key-frame lists - // have matching time values, so max(of all key-frame lists) should - // be a good estimate. - KeyTimeList keys; - - size_t estimate = 0; - for (const KeyFrameList& kfl : inputs) { - estimate = std::max(estimate, std::get<0>(kfl)->size()); - } - - keys.reserve(estimate); - - std::vector<unsigned int> next_pos; - next_pos.resize(inputs.size(), 0); - - const size_t count = inputs.size(); - while (true) { - - int64_t min_tick = std::numeric_limits<int64_t>::max(); - for (size_t i = 0; i < count; ++i) { - const KeyFrameList& kfl = inputs[i]; - - if (std::get<0>(kfl)->size() > next_pos[i] && std::get<0>(kfl)->at(next_pos[i]) < min_tick) { - min_tick = std::get<0>(kfl)->at(next_pos[i]); - } - } - - if (min_tick == std::numeric_limits<int64_t>::max()) { - break; - } - keys.push_back(min_tick); - - for (size_t i = 0; i < count; ++i) { - const KeyFrameList& kfl = inputs[i]; - - - while (std::get<0>(kfl)->size() > next_pos[i] && std::get<0>(kfl)->at(next_pos[i]) == min_tick) { - ++next_pos[i]; - } - } - } - - return keys; - } - - void FBXConverter::InterpolateKeys(aiVectorKey* valOut, const KeyTimeList& keys, const KeyFrameListList& inputs, - const aiVector3D& def_value, - double& max_time, - double& min_time) { - ai_assert(!keys.empty()); - ai_assert(nullptr != valOut); - - std::vector<unsigned int> next_pos; - const size_t count(inputs.size()); - - next_pos.resize(inputs.size(), 0); - - for (KeyTimeList::value_type time : keys) { - ai_real result[3] = { def_value.x, def_value.y, def_value.z }; - - for (size_t i = 0; i < count; ++i) { - const KeyFrameList& kfl = inputs[i]; - - const size_t ksize = std::get<0>(kfl)->size(); - if (ksize == 0) { - continue; - } - if (ksize > next_pos[i] && std::get<0>(kfl)->at(next_pos[i]) == time) { - ++next_pos[i]; - } - - const size_t id0 = next_pos[i] > 0 ? next_pos[i] - 1 : 0; - const size_t id1 = next_pos[i] == ksize ? ksize - 1 : next_pos[i]; - - // use lerp for interpolation - const KeyValueList::value_type valueA = std::get<1>(kfl)->at(id0); - const KeyValueList::value_type valueB = std::get<1>(kfl)->at(id1); - - const KeyTimeList::value_type timeA = std::get<0>(kfl)->at(id0); - const KeyTimeList::value_type timeB = std::get<0>(kfl)->at(id1); - - const ai_real factor = timeB == timeA ? ai_real(0.) : static_cast<ai_real>((time - timeA)) / (timeB - timeA); - const ai_real interpValue = static_cast<ai_real>(valueA + (valueB - valueA) * factor); - - result[std::get<2>(kfl)] = interpValue; - } - - // magic value to convert fbx times to seconds - valOut->mTime = CONVERT_FBX_TIME(time) * anim_fps; - - min_time = std::min(min_time, valOut->mTime); - max_time = std::max(max_time, valOut->mTime); - - valOut->mValue.x = result[0]; - valOut->mValue.y = result[1]; - valOut->mValue.z = result[2]; - - ++valOut; - } - } - - void FBXConverter::InterpolateKeys(aiQuatKey* valOut, const KeyTimeList& keys, const KeyFrameListList& inputs, - const aiVector3D& def_value, - double& maxTime, - double& minTime, - Model::RotOrder order) - { - ai_assert(!keys.empty()); - ai_assert(nullptr != valOut); - - std::unique_ptr<aiVectorKey[]> temp(new aiVectorKey[keys.size()]); - InterpolateKeys(temp.get(), keys, inputs, def_value, maxTime, minTime); - - aiMatrix4x4 m; - - aiQuaternion lastq; - - for (size_t i = 0, c = keys.size(); i < c; ++i) { - - valOut[i].mTime = temp[i].mTime; - - GetRotationMatrix(order, temp[i].mValue, m); - aiQuaternion quat = aiQuaternion(aiMatrix3x3(m)); - - // take shortest path by checking the inner product - // http://www.3dkingdoms.com/weekly/weekly.php?a=36 - if (quat.x * lastq.x + quat.y * lastq.y + quat.z * lastq.z + quat.w * lastq.w < 0) - { - quat.x = -quat.x; - quat.y = -quat.y; - quat.z = -quat.z; - quat.w = -quat.w; - } - lastq = quat; - - valOut[i].mValue = quat; - } - } - - void FBXConverter::ConvertTransformOrder_TRStoSRT(aiQuatKey* out_quat, aiVectorKey* out_scale, - aiVectorKey* out_translation, - const KeyFrameListList& scaling, - const KeyFrameListList& translation, - const KeyFrameListList& rotation, - const KeyTimeList& times, - double& maxTime, - double& minTime, - Model::RotOrder order, - const aiVector3D& def_scale, - const aiVector3D& def_translate, - const aiVector3D& def_rotation) - { - if (rotation.size()) { - InterpolateKeys(out_quat, times, rotation, def_rotation, maxTime, minTime, order); - } - else { - for (size_t i = 0; i < times.size(); ++i) { - out_quat[i].mTime = CONVERT_FBX_TIME(times[i]) * anim_fps; - out_quat[i].mValue = EulerToQuaternion(def_rotation, order); - } - } - - if (scaling.size()) { - InterpolateKeys(out_scale, times, scaling, def_scale, maxTime, minTime); - } - else { - for (size_t i = 0; i < times.size(); ++i) { - out_scale[i].mTime = CONVERT_FBX_TIME(times[i]) * anim_fps; - out_scale[i].mValue = def_scale; - } - } - - if (translation.size()) { - InterpolateKeys(out_translation, times, translation, def_translate, maxTime, minTime); - } - else { - for (size_t i = 0; i < times.size(); ++i) { - out_translation[i].mTime = CONVERT_FBX_TIME(times[i]) * anim_fps; - out_translation[i].mValue = def_translate; - } - } - - const size_t count = times.size(); - for (size_t i = 0; i < count; ++i) { - aiQuaternion& r = out_quat[i].mValue; - aiVector3D& s = out_scale[i].mValue; - aiVector3D& t = out_translation[i].mValue; - - aiMatrix4x4 mat, temp; - aiMatrix4x4::Translation(t, mat); - mat *= aiMatrix4x4(r.GetMatrix()); - mat *= aiMatrix4x4::Scaling(s, temp); - - mat.Decompose(s, r, t); - } - } - - aiQuaternion FBXConverter::EulerToQuaternion(const aiVector3D& rot, Model::RotOrder order) - { - aiMatrix4x4 m; - GetRotationMatrix(order, rot, m); - - return aiQuaternion(aiMatrix3x3(m)); - } - - void FBXConverter::ConvertScaleKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime) - { - ai_assert(nodes.size()); - - // XXX for now, assume scale should be blended geometrically (i.e. two - // layers should be multiplied with each other). There is a FBX - // property in the layer to specify the behaviour, though. - - const KeyFrameListList& inputs = GetKeyframeList(nodes, start, stop); - const KeyTimeList& keys = GetKeyTimeList(inputs); - - na->mNumScalingKeys = static_cast<unsigned int>(keys.size()); - na->mScalingKeys = new aiVectorKey[keys.size()]; - if (keys.size() > 0) { - InterpolateKeys(na->mScalingKeys, keys, inputs, aiVector3D(1.0f, 1.0f, 1.0f), maxTime, minTime); - } - } - - void FBXConverter::ConvertTranslationKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, - const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime) - { - ai_assert(nodes.size()); - - // XXX see notes in ConvertScaleKeys() - const KeyFrameListList& inputs = GetKeyframeList(nodes, start, stop); - const KeyTimeList& keys = GetKeyTimeList(inputs); - - na->mNumPositionKeys = static_cast<unsigned int>(keys.size()); - na->mPositionKeys = new aiVectorKey[keys.size()]; - if (keys.size() > 0) - InterpolateKeys(na->mPositionKeys, keys, inputs, aiVector3D(0.0f, 0.0f, 0.0f), maxTime, minTime); - } - - void FBXConverter::ConvertRotationKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, - const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime, - Model::RotOrder order) - { - ai_assert(nodes.size()); - - // XXX see notes in ConvertScaleKeys() - const std::vector< KeyFrameList >& inputs = GetKeyframeList(nodes, start, stop); - const KeyTimeList& keys = GetKeyTimeList(inputs); - - na->mNumRotationKeys = static_cast<unsigned int>(keys.size()); - na->mRotationKeys = new aiQuatKey[keys.size()]; - if (!keys.empty()) { - InterpolateKeys(na->mRotationKeys, keys, inputs, aiVector3D(0.0f, 0.0f, 0.0f), maxTime, minTime, order); - } - } - - void FBXConverter::ConvertGlobalSettings() { - if (nullptr == out) { - return; - } - - out->mMetaData = aiMetadata::Alloc(15); - out->mMetaData->Set(0, "UpAxis", doc.GlobalSettings().UpAxis()); - out->mMetaData->Set(1, "UpAxisSign", doc.GlobalSettings().UpAxisSign()); - out->mMetaData->Set(2, "FrontAxis", doc.GlobalSettings().FrontAxis()); - out->mMetaData->Set(3, "FrontAxisSign", doc.GlobalSettings().FrontAxisSign()); - out->mMetaData->Set(4, "CoordAxis", doc.GlobalSettings().CoordAxis()); - out->mMetaData->Set(5, "CoordAxisSign", doc.GlobalSettings().CoordAxisSign()); - out->mMetaData->Set(6, "OriginalUpAxis", doc.GlobalSettings().OriginalUpAxis()); - out->mMetaData->Set(7, "OriginalUpAxisSign", doc.GlobalSettings().OriginalUpAxisSign()); - out->mMetaData->Set(8, "UnitScaleFactor", (double)doc.GlobalSettings().UnitScaleFactor()); - out->mMetaData->Set(9, "OriginalUnitScaleFactor", doc.GlobalSettings().OriginalUnitScaleFactor()); - out->mMetaData->Set(10, "AmbientColor", doc.GlobalSettings().AmbientColor()); - out->mMetaData->Set(11, "FrameRate", (int)doc.GlobalSettings().TimeMode()); - out->mMetaData->Set(12, "TimeSpanStart", doc.GlobalSettings().TimeSpanStart()); - out->mMetaData->Set(13, "TimeSpanStop", doc.GlobalSettings().TimeSpanStop()); - out->mMetaData->Set(14, "CustomFrameRate", doc.GlobalSettings().CustomFrameRate()); - } - - void FBXConverter::TransferDataToScene() - { - ai_assert(!out->mMeshes); - ai_assert(!out->mNumMeshes); - - // note: the trailing () ensures initialization with nullptr - not - // many C++ users seem to know this, so pointing it out to avoid - // confusion why this code works. - - if (meshes.size()) { - out->mMeshes = new aiMesh*[meshes.size()](); - out->mNumMeshes = static_cast<unsigned int>(meshes.size()); - - std::swap_ranges(meshes.begin(), meshes.end(), out->mMeshes); - } - - if (materials.size()) { - out->mMaterials = new aiMaterial*[materials.size()](); - out->mNumMaterials = static_cast<unsigned int>(materials.size()); - - std::swap_ranges(materials.begin(), materials.end(), out->mMaterials); - } - - if (animations.size()) { - out->mAnimations = new aiAnimation*[animations.size()](); - out->mNumAnimations = static_cast<unsigned int>(animations.size()); - - std::swap_ranges(animations.begin(), animations.end(), out->mAnimations); - } - - if (lights.size()) { - out->mLights = new aiLight*[lights.size()](); - out->mNumLights = static_cast<unsigned int>(lights.size()); - - std::swap_ranges(lights.begin(), lights.end(), out->mLights); - } - - if (cameras.size()) { - out->mCameras = new aiCamera*[cameras.size()](); - out->mNumCameras = static_cast<unsigned int>(cameras.size()); - - std::swap_ranges(cameras.begin(), cameras.end(), out->mCameras); - } - - if (textures.size()) { - out->mTextures = new aiTexture*[textures.size()](); - out->mNumTextures = static_cast<unsigned int>(textures.size()); - - std::swap_ranges(textures.begin(), textures.end(), out->mTextures); - } - } - - void FBXConverter::ConvertOrphantEmbeddedTextures() - { - // in C++14 it could be: - // for (auto&& [id, object] : objects) - for (auto&& id_and_object : doc.Objects()) - { - auto&& id = std::get<0>(id_and_object); - auto&& object = std::get<1>(id_and_object); - // If an object doesn't have parent - if (doc.ConnectionsBySource().count(id) == 0) - { - const Texture* realTexture = nullptr; - try - { - const auto& element = object->GetElement(); - const Token& key = element.KeyToken(); - const char* obtype = key.begin(); - const size_t length = static_cast<size_t>(key.end() - key.begin()); - if (strncmp(obtype, "Texture", length) == 0) - { - const Texture* texture = static_cast<const Texture*>(object->Get()); - if (texture->Media() && texture->Media()->ContentLength() > 0) - { - realTexture = texture; - } - } - } - catch (...) - { - // do nothing - } - if (realTexture) - { - const Video* media = realTexture->Media(); - unsigned int index = ConvertVideo(*media); - textures_converted[*media] = index; - } - } - } - } - - // ------------------------------------------------------------------------------------------------ - void ConvertToAssimpScene(aiScene* out, const Document& doc, bool removeEmptyBones) - { - FBXConverter converter(out, doc, removeEmptyBones); - } - - } // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXConverter.h b/thirdparty/assimp/code/FBX/FBXConverter.h deleted file mode 100644 index 46693bdca6..0000000000 --- a/thirdparty/assimp/code/FBX/FBXConverter.h +++ /dev/null @@ -1,491 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXDConverter.h - * @brief FBX DOM to aiScene conversion - */ -#ifndef INCLUDED_AI_FBX_CONVERTER_H -#define INCLUDED_AI_FBX_CONVERTER_H - -#include "FBXParser.h" -#include "FBXMeshGeometry.h" -#include "FBXDocument.h" -#include "FBXUtil.h" -#include "FBXProperties.h" -#include "FBXImporter.h" - -#include <assimp/anim.h> -#include <assimp/material.h> -#include <assimp/light.h> -#include <assimp/texture.h> -#include <assimp/camera.h> -#include <assimp/StringComparison.h> -#include <unordered_map> -#include <unordered_set> - -struct aiScene; -struct aiNode; -struct aiMaterial; - -struct morphKeyData { - std::vector<unsigned int> values; - std::vector<float> weights; -}; -typedef std::map<int64_t, morphKeyData*> morphAnimData; - -namespace Assimp { -namespace FBX { - -class Document; -/** - * Convert a FBX #Document to #aiScene - * @param out Empty scene to be populated - * @param doc Parsed FBX document - * @param removeEmptyBones Will remove bones, which do not have any references to vertices. - */ -void ConvertToAssimpScene(aiScene* out, const Document& doc, bool removeEmptyBones); - -/** Dummy class to encapsulate the conversion process */ -class FBXConverter { -public: - /** - * The different parts that make up the final local transformation of a fbx-node - */ - enum TransformationComp { - TransformationComp_GeometricScalingInverse = 0, - TransformationComp_GeometricRotationInverse, - TransformationComp_GeometricTranslationInverse, - TransformationComp_Translation, - TransformationComp_RotationOffset, - TransformationComp_RotationPivot, - TransformationComp_PreRotation, - TransformationComp_Rotation, - TransformationComp_PostRotation, - TransformationComp_RotationPivotInverse, - TransformationComp_ScalingOffset, - TransformationComp_ScalingPivot, - TransformationComp_Scaling, - TransformationComp_ScalingPivotInverse, - TransformationComp_GeometricTranslation, - TransformationComp_GeometricRotation, - TransformationComp_GeometricScaling, - - TransformationComp_MAXIMUM - }; - -public: - FBXConverter(aiScene* out, const Document& doc, bool removeEmptyBones); - ~FBXConverter(); - -private: - // ------------------------------------------------------------------------------------------------ - // find scene root and trigger recursive scene conversion - void ConvertRootNode(); - - // ------------------------------------------------------------------------------------------------ - // collect and assign child nodes - void ConvertNodes(uint64_t id, aiNode *parent, aiNode *root_node); - - // ------------------------------------------------------------------------------------------------ - void ConvertLights(const Model& model, const std::string &orig_name ); - - // ------------------------------------------------------------------------------------------------ - void ConvertCameras(const Model& model, const std::string &orig_name ); - - // ------------------------------------------------------------------------------------------------ - void ConvertLight( const Light& light, const std::string &orig_name ); - - // ------------------------------------------------------------------------------------------------ - void ConvertCamera( const Camera& cam, const std::string &orig_name ); - - // ------------------------------------------------------------------------------------------------ - void GetUniqueName( const std::string &name, std::string& uniqueName ); - - // ------------------------------------------------------------------------------------------------ - // this returns unified names usable within assimp identifiers (i.e. no space characters - - // while these would be allowed, they are a potential trouble spot so better not use them). - const char* NameTransformationComp(TransformationComp comp); - - // ------------------------------------------------------------------------------------------------ - // Returns an unique name for a node or traverses up a hierarchy until a non-empty name is found and - // then makes this name unique - std::string MakeUniqueNodeName(const Model* const model, const aiNode& parent); - - // ------------------------------------------------------------------------------------------------ - // note: this returns the REAL fbx property names - const char* NameTransformationCompProperty(TransformationComp comp); - - // ------------------------------------------------------------------------------------------------ - aiVector3D TransformationCompDefaultValue(TransformationComp comp); - - // ------------------------------------------------------------------------------------------------ - void GetRotationMatrix(Model::RotOrder mode, const aiVector3D& rotation, aiMatrix4x4& out); - // ------------------------------------------------------------------------------------------------ - /** - * checks if a node has more than just scaling, rotation and translation components - */ - bool NeedsComplexTransformationChain(const Model& model); - - // ------------------------------------------------------------------------------------------------ - // note: name must be a FixNodeName() result - std::string NameTransformationChainNode(const std::string& name, TransformationComp comp); - - // ------------------------------------------------------------------------------------------------ - /** - * note: memory for output_nodes will be managed by the caller - */ - bool GenerateTransformationNodeChain(const Model& model, const std::string& name, std::vector<aiNode*>& output_nodes, std::vector<aiNode*>& post_output_nodes); - - // ------------------------------------------------------------------------------------------------ - void SetupNodeMetadata(const Model& model, aiNode& nd); - - // ------------------------------------------------------------------------------------------------ - void ConvertModel(const Model &model, aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform); - - // ------------------------------------------------------------------------------------------------ - // MeshGeometry -> aiMesh, return mesh index + 1 or 0 if the conversion failed - std::vector<unsigned int> - ConvertMesh(const MeshGeometry &mesh, const Model &model, aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform); - - // ------------------------------------------------------------------------------------------------ - std::vector<unsigned int> ConvertLine(const LineGeometry& line, const Model& model, - aiNode *parent, aiNode *root_node); - - // ------------------------------------------------------------------------------------------------ - aiMesh* SetupEmptyMesh(const Geometry& mesh, aiNode *parent); - - // ------------------------------------------------------------------------------------------------ - unsigned int ConvertMeshSingleMaterial(const MeshGeometry &mesh, const Model &model, - const aiMatrix4x4 &absolute_transform, aiNode *parent, - aiNode *root_node); - - // ------------------------------------------------------------------------------------------------ - std::vector<unsigned int> - ConvertMeshMultiMaterial(const MeshGeometry &mesh, const Model &model, aiNode *parent, aiNode *root_node, - const aiMatrix4x4 &absolute_transform); - - // ------------------------------------------------------------------------------------------------ - unsigned int ConvertMeshMultiMaterial(const MeshGeometry &mesh, const Model &model, MatIndexArray::value_type index, - aiNode *parent, aiNode *root_node, const aiMatrix4x4 &absolute_transform); - - // ------------------------------------------------------------------------------------------------ - static const unsigned int NO_MATERIAL_SEPARATION = /* std::numeric_limits<unsigned int>::max() */ - static_cast<unsigned int>(-1); - - // ------------------------------------------------------------------------------------------------ - /** - * - if materialIndex == NO_MATERIAL_SEPARATION, materials are not taken into - * account when determining which weights to include. - * - outputVertStartIndices is only used when a material index is specified, it gives for - * each output vertex the DOM index it maps to. - */ - void ConvertWeights(aiMesh *out, const Model &model, const MeshGeometry &geo, const aiMatrix4x4 &absolute_transform, - aiNode *parent = NULL, aiNode *root_node = NULL, - unsigned int materialIndex = NO_MATERIAL_SEPARATION, - std::vector<unsigned int> *outputVertStartIndices = NULL); - // lookup - static const aiNode* GetNodeByName( const aiString& name, aiNode *current_node ); - // ------------------------------------------------------------------------------------------------ - void ConvertCluster(std::vector<aiBone *> &local_mesh_bones, const Cluster *cl, - std::vector<size_t> &out_indices, std::vector<size_t> &index_out_indices, - std::vector<size_t> &count_out_indices, const aiMatrix4x4 &absolute_transform, - aiNode *parent, aiNode *root_node); - - // ------------------------------------------------------------------------------------------------ - void ConvertMaterialForMesh(aiMesh* out, const Model& model, const MeshGeometry& geo, - MatIndexArray::value_type materialIndex); - - // ------------------------------------------------------------------------------------------------ - unsigned int GetDefaultMaterial(); - - // ------------------------------------------------------------------------------------------------ - // Material -> aiMaterial - unsigned int ConvertMaterial(const Material& material, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - // Video -> aiTexture - unsigned int ConvertVideo(const Video& video); - - // ------------------------------------------------------------------------------------------------ - // convert embedded texture if necessary and return actual texture path - aiString GetTexturePath(const Texture* tex); - - // ------------------------------------------------------------------------------------------------ - void TrySetTextureProperties(aiMaterial* out_mat, const TextureMap& textures, - const std::string& propName, - aiTextureType target, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - void TrySetTextureProperties(aiMaterial* out_mat, const LayeredTextureMap& layeredTextures, - const std::string& propName, - aiTextureType target, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - void SetTextureProperties(aiMaterial* out_mat, const TextureMap& textures, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - void SetTextureProperties(aiMaterial* out_mat, const LayeredTextureMap& layeredTextures, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - aiColor3D GetColorPropertyFromMaterial(const PropertyTable& props, const std::string& baseName, - bool& result); - aiColor3D GetColorPropertyFactored(const PropertyTable& props, const std::string& colorName, - const std::string& factorName, bool& result, bool useTemplate = true); - aiColor3D GetColorProperty(const PropertyTable& props, const std::string& colorName, - bool& result, bool useTemplate = true); - - // ------------------------------------------------------------------------------------------------ - void SetShadingPropertiesCommon(aiMaterial* out_mat, const PropertyTable& props); - void SetShadingPropertiesRaw(aiMaterial* out_mat, const PropertyTable& props, const TextureMap& textures, const MeshGeometry* const mesh); - - // ------------------------------------------------------------------------------------------------ - // get the number of fps for a FrameRate enumerated value - static double FrameRateToDouble(FileGlobalSettings::FrameRate fp, double customFPSVal = -1.0); - - // ------------------------------------------------------------------------------------------------ - // convert animation data to aiAnimation et al - void ConvertAnimations(); - - // ------------------------------------------------------------------------------------------------ - // takes a fbx node name and returns the identifier to be used in the assimp output scene. - // the function is guaranteed to provide consistent results over multiple invocations - // UNLESS RenameNode() is called for a particular node name. - std::string FixNodeName(const std::string& name); - std::string FixAnimMeshName(const std::string& name); - - typedef std::map<const AnimationCurveNode*, const AnimationLayer*> LayerMap; - - // XXX: better use multi_map .. - typedef std::map<std::string, std::vector<const AnimationCurveNode*> > NodeMap; - - // ------------------------------------------------------------------------------------------------ - void ConvertAnimationStack(const AnimationStack& st); - - // ------------------------------------------------------------------------------------------------ - void ProcessMorphAnimDatas(std::map<std::string, morphAnimData*>* morphAnimDatas, const BlendShapeChannel* bsc, const AnimationCurveNode* node); - - // ------------------------------------------------------------------------------------------------ - void GenerateNodeAnimations(std::vector<aiNodeAnim*>& node_anims, - const std::string& fixed_name, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time); - - // ------------------------------------------------------------------------------------------------ - bool IsRedundantAnimationData(const Model& target, - TransformationComp comp, - const std::vector<const AnimationCurveNode*>& curves); - - // ------------------------------------------------------------------------------------------------ - aiNodeAnim* GenerateRotationNodeAnim(const std::string& name, - const Model& target, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time); - - // ------------------------------------------------------------------------------------------------ - aiNodeAnim* GenerateScalingNodeAnim(const std::string& name, - const Model& /*target*/, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time); - - // ------------------------------------------------------------------------------------------------ - aiNodeAnim* GenerateTranslationNodeAnim(const std::string& name, - const Model& /*target*/, - const std::vector<const AnimationCurveNode*>& curves, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time, - bool inverse = false); - - // ------------------------------------------------------------------------------------------------ - // generate node anim, extracting only Rotation, Scaling and Translation from the given chain - aiNodeAnim* GenerateSimpleNodeAnim(const std::string& name, - const Model& target, - NodeMap::const_iterator chain[TransformationComp_MAXIMUM], - NodeMap::const_iterator iter_end, - const LayerMap& layer_map, - int64_t start, int64_t stop, - double& max_time, - double& min_time, - bool reverse_order = false); - - // key (time), value, mapto (component index) - typedef std::tuple<std::shared_ptr<KeyTimeList>, std::shared_ptr<KeyValueList>, unsigned int > KeyFrameList; - typedef std::vector<KeyFrameList> KeyFrameListList; - - // ------------------------------------------------------------------------------------------------ - KeyFrameListList GetKeyframeList(const std::vector<const AnimationCurveNode*>& nodes, int64_t start, int64_t stop); - - // ------------------------------------------------------------------------------------------------ - KeyTimeList GetKeyTimeList(const KeyFrameListList& inputs); - - // ------------------------------------------------------------------------------------------------ - void InterpolateKeys(aiVectorKey* valOut, const KeyTimeList& keys, const KeyFrameListList& inputs, - const aiVector3D& def_value, - double& max_time, - double& min_time); - - // ------------------------------------------------------------------------------------------------ - void InterpolateKeys(aiQuatKey* valOut, const KeyTimeList& keys, const KeyFrameListList& inputs, - const aiVector3D& def_value, - double& maxTime, - double& minTime, - Model::RotOrder order); - - // ------------------------------------------------------------------------------------------------ - void ConvertTransformOrder_TRStoSRT(aiQuatKey* out_quat, aiVectorKey* out_scale, - aiVectorKey* out_translation, - const KeyFrameListList& scaling, - const KeyFrameListList& translation, - const KeyFrameListList& rotation, - const KeyTimeList& times, - double& maxTime, - double& minTime, - Model::RotOrder order, - const aiVector3D& def_scale, - const aiVector3D& def_translate, - const aiVector3D& def_rotation); - - // ------------------------------------------------------------------------------------------------ - // euler xyz -> quat - aiQuaternion EulerToQuaternion(const aiVector3D& rot, Model::RotOrder order); - - // ------------------------------------------------------------------------------------------------ - void ConvertScaleKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime); - - // ------------------------------------------------------------------------------------------------ - void ConvertTranslationKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, - const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime); - - // ------------------------------------------------------------------------------------------------ - void ConvertRotationKeys(aiNodeAnim* na, const std::vector<const AnimationCurveNode*>& nodes, - const LayerMap& /*layers*/, - int64_t start, int64_t stop, - double& maxTime, - double& minTime, - Model::RotOrder order); - - void ConvertGlobalSettings(); - - // ------------------------------------------------------------------------------------------------ - // copy generated meshes, animations, lights, cameras and textures to the output scene - void TransferDataToScene(); - - // ------------------------------------------------------------------------------------------------ - // FBX file could have embedded textures not connected to anything - void ConvertOrphantEmbeddedTextures(); - -private: - // 0: not assigned yet, others: index is value - 1 - unsigned int defaultMaterialIndex; - - std::vector<aiMesh*> meshes; - std::vector<aiMaterial*> materials; - std::vector<aiAnimation*> animations; - std::vector<aiLight*> lights; - std::vector<aiCamera*> cameras; - std::vector<aiTexture*> textures; - - using MaterialMap = std::fbx_unordered_map<const Material*, unsigned int>; - MaterialMap materials_converted; - - using VideoMap = std::fbx_unordered_map<const Video, unsigned int>; - VideoMap textures_converted; - - using MeshMap = std::fbx_unordered_map<const Geometry*, std::vector<unsigned int> >; - MeshMap meshes_converted; - - // fixed node name -> which trafo chain components have animations? - using NodeAnimBitMap = std::fbx_unordered_map<std::string, unsigned int> ; - NodeAnimBitMap node_anim_chain_bits; - - // number of nodes with the same name - using NodeNameCache = std::fbx_unordered_map<std::string, unsigned int>; - NodeNameCache mNodeNames; - - // Deformer name is not the same as a bone name - it does contain the bone name though :) - // Deformer names in FBX are always unique in an FBX file. - std::map<const std::string, aiBone *> bone_map; - - double anim_fps; - - aiScene* const out; - const FBX::Document& doc; - - static void BuildBoneList(aiNode *current_node, const aiNode *root_node, const aiScene *scene, - std::vector<aiBone*>& bones); - - void BuildBoneStack(aiNode *current_node, const aiNode *root_node, const aiScene *scene, - const std::vector<aiBone *> &bones, - std::map<aiBone *, aiNode *> &bone_stack, - std::vector<aiNode*> &node_stack ); - - static void BuildNodeList(aiNode *current_node, std::vector<aiNode *> &nodes); - - static aiNode *GetNodeFromStack(const aiString &node_name, std::vector<aiNode *> &nodes); - - static aiNode *GetArmatureRoot(aiNode *bone_node, std::vector<aiBone*> &bone_list); - - static bool IsBoneNode(const aiString &bone_name, std::vector<aiBone *> &bones); -}; - -} -} - -#endif // INCLUDED_AI_FBX_CONVERTER_H diff --git a/thirdparty/assimp/code/FBX/FBXDeformer.cpp b/thirdparty/assimp/code/FBX/FBXDeformer.cpp deleted file mode 100644 index 6927553450..0000000000 --- a/thirdparty/assimp/code/FBX/FBXDeformer.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXNoteAttribute.cpp - * @brief Assimp::FBX::NodeAttribute (and subclasses) implementation - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXMeshGeometry.h" -#include "FBXImporter.h" -#include "FBXDocumentUtil.h" - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -Deformer::Deformer(uint64_t id, const Element& element, const Document& doc, const std::string& name) - : Object(id,element,name) -{ - const Scope& sc = GetRequiredScope(element); - - const std::string& classname = ParseTokenAsString(GetRequiredToken(element,2)); - props = GetPropertyTable(doc,"Deformer.Fbx" + classname,element,sc,true); -} - - -// ------------------------------------------------------------------------------------------------ -Deformer::~Deformer() -{ - -} - - -// ------------------------------------------------------------------------------------------------ -Cluster::Cluster(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Deformer(id,element,doc,name) -, node() -{ - const Scope& sc = GetRequiredScope(element); - - const Element* const Indexes = sc["Indexes"]; - const Element* const Weights = sc["Weights"]; - - const Element& Transform = GetRequiredElement(sc,"Transform",&element); - const Element& TransformLink = GetRequiredElement(sc,"TransformLink",&element); - - transform = ReadMatrix(Transform); - transformLink = ReadMatrix(TransformLink); - - // it is actually possible that there be Deformer's with no weights - if (!!Indexes != !!Weights) { - DOMError("either Indexes or Weights are missing from Cluster",&element); - } - - if(Indexes) { - ParseVectorDataArray(indices,*Indexes); - ParseVectorDataArray(weights,*Weights); - } - - if(indices.size() != weights.size()) { - DOMError("sizes of index and weight array don't match up",&element); - } - - // read assigned node - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"Model"); - for(const Connection* con : conns) { - const Model* const mod = ProcessSimpleConnection<Model>(*con, false, "Model -> Cluster", element); - if(mod) { - node = mod; - break; - } - } - - if (!node) { - DOMError("failed to read target Node for Cluster",&element); - } -} - - -// ------------------------------------------------------------------------------------------------ -Cluster::~Cluster() -{ - -} - - -// ------------------------------------------------------------------------------------------------ -Skin::Skin(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Deformer(id,element,doc,name) -, accuracy( 0.0f ) { - const Scope& sc = GetRequiredScope(element); - - const Element* const Link_DeformAcuracy = sc["Link_DeformAcuracy"]; - if(Link_DeformAcuracy) { - accuracy = ParseTokenAsFloat(GetRequiredToken(*Link_DeformAcuracy,0)); - } - - // resolve assigned clusters - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"Deformer"); - - clusters.reserve(conns.size()); - for(const Connection* con : conns) { - - const Cluster* const cluster = ProcessSimpleConnection<Cluster>(*con, false, "Cluster -> Skin", element); - if(cluster) { - clusters.push_back(cluster); - continue; - } - } -} - - -// ------------------------------------------------------------------------------------------------ -Skin::~Skin() -{ - -} -// ------------------------------------------------------------------------------------------------ -BlendShape::BlendShape(uint64_t id, const Element& element, const Document& doc, const std::string& name) - : Deformer(id, element, doc, name) -{ - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(), "Deformer"); - blendShapeChannels.reserve(conns.size()); - for (const Connection* con : conns) { - const BlendShapeChannel* const bspc = ProcessSimpleConnection<BlendShapeChannel>(*con, false, "BlendShapeChannel -> BlendShape", element); - if (bspc) { - blendShapeChannels.push_back(bspc); - continue; - } - } -} -// ------------------------------------------------------------------------------------------------ -BlendShape::~BlendShape() -{ - -} -// ------------------------------------------------------------------------------------------------ -BlendShapeChannel::BlendShapeChannel(uint64_t id, const Element& element, const Document& doc, const std::string& name) - : Deformer(id, element, doc, name) -{ - const Scope& sc = GetRequiredScope(element); - const Element* const DeformPercent = sc["DeformPercent"]; - if (DeformPercent) { - percent = ParseTokenAsFloat(GetRequiredToken(*DeformPercent, 0)); - } - const Element* const FullWeights = sc["FullWeights"]; - if (FullWeights) { - ParseVectorDataArray(fullWeights, *FullWeights); - } - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(), "Geometry"); - shapeGeometries.reserve(conns.size()); - for (const Connection* con : conns) { - const ShapeGeometry* const sg = ProcessSimpleConnection<ShapeGeometry>(*con, false, "Shape -> BlendShapeChannel", element); - if (sg) { - shapeGeometries.push_back(sg); - continue; - } - } -} -// ------------------------------------------------------------------------------------------------ -BlendShapeChannel::~BlendShapeChannel() -{ - -} -// ------------------------------------------------------------------------------------------------ -} -} -#endif - diff --git a/thirdparty/assimp/code/FBX/FBXDocument.cpp b/thirdparty/assimp/code/FBX/FBXDocument.cpp deleted file mode 100644 index 506fd978dd..0000000000 --- a/thirdparty/assimp/code/FBX/FBXDocument.cpp +++ /dev/null @@ -1,718 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the* - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXDocument.cpp - * @brief Implementation of the FBX DOM classes - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXDocument.h" -#include "FBXMeshGeometry.h" -#include "FBXParser.h" -#include "FBXUtil.h" -#include "FBXImporter.h" -#include "FBXImportSettings.h" -#include "FBXDocumentUtil.h" -#include "FBXProperties.h" - -#include <memory> -#include <functional> -#include <map> - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -LazyObject::LazyObject(uint64_t id, const Element& element, const Document& doc) -: doc(doc) -, element(element) -, id(id) -, flags() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -LazyObject::~LazyObject() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -const Object* LazyObject::Get(bool dieOnError) -{ - if(IsBeingConstructed() || FailedToConstruct()) { - return nullptr; - } - - if (object.get()) { - return object.get(); - } - - const Token& key = element.KeyToken(); - const TokenList& tokens = element.Tokens(); - - if(tokens.size() < 3) { - DOMError("expected at least 3 tokens: id, name and class tag",&element); - } - - const char* err; - std::string name = ParseTokenAsString(*tokens[1],err); - if (err) { - DOMError(err,&element); - } - - // small fix for binary reading: binary fbx files don't use - // prefixes such as Model:: in front of their names. The - // loading code expects this at many places, though! - // so convert the binary representation (a 0x0001) to the - // double colon notation. - if(tokens[1]->IsBinary()) { - for (size_t i = 0; i < name.length(); ++i) { - if (name[i] == 0x0 && name[i+1] == 0x1) { - name = name.substr(i+2) + "::" + name.substr(0,i); - } - } - } - - const std::string classtag = ParseTokenAsString(*tokens[2],err); - if (err) { - DOMError(err,&element); - } - - // prevent recursive calls - flags |= BEING_CONSTRUCTED; - - try { - // this needs to be relatively fast since it happens a lot, - // so avoid constructing strings all the time. - const char* obtype = key.begin(); - const size_t length = static_cast<size_t>(key.end()-key.begin()); - - // For debugging - //dumpObjectClassInfo( objtype, classtag ); - - if (!strncmp(obtype,"Geometry",length)) { - if (!strcmp(classtag.c_str(),"Mesh")) { - object.reset(new MeshGeometry(id,element,name,doc)); - } - if (!strcmp(classtag.c_str(), "Shape")) { - object.reset(new ShapeGeometry(id, element, name, doc)); - } - if (!strcmp(classtag.c_str(), "Line")) { - object.reset(new LineGeometry(id, element, name, doc)); - } - } - else if (!strncmp(obtype,"NodeAttribute",length)) { - if (!strcmp(classtag.c_str(),"Camera")) { - object.reset(new Camera(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(),"CameraSwitcher")) { - object.reset(new CameraSwitcher(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(),"Light")) { - object.reset(new Light(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(),"Null")) { - object.reset(new Null(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(),"LimbNode")) { - object.reset(new LimbNode(id,element,doc,name)); - } - } - else if (!strncmp(obtype,"Deformer",length)) { - if (!strcmp(classtag.c_str(),"Cluster")) { - object.reset(new Cluster(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(),"Skin")) { - object.reset(new Skin(id,element,doc,name)); - } - else if (!strcmp(classtag.c_str(), "BlendShape")) { - object.reset(new BlendShape(id, element, doc, name)); - } - else if (!strcmp(classtag.c_str(), "BlendShapeChannel")) { - object.reset(new BlendShapeChannel(id, element, doc, name)); - } - } - else if ( !strncmp( obtype, "Model", length ) ) { - // FK and IK effectors are not supported - if ( strcmp( classtag.c_str(), "IKEffector" ) && strcmp( classtag.c_str(), "FKEffector" ) ) { - object.reset( new Model( id, element, doc, name ) ); - } - } - else if (!strncmp(obtype,"Material",length)) { - object.reset(new Material(id,element,doc,name)); - } - else if (!strncmp(obtype,"Texture",length)) { - object.reset(new Texture(id,element,doc,name)); - } - else if (!strncmp(obtype,"LayeredTexture",length)) { - object.reset(new LayeredTexture(id,element,doc,name)); - } - else if (!strncmp(obtype,"Video",length)) { - object.reset(new Video(id,element,doc,name)); - } - else if (!strncmp(obtype,"AnimationStack",length)) { - object.reset(new AnimationStack(id,element,name,doc)); - } - else if (!strncmp(obtype,"AnimationLayer",length)) { - object.reset(new AnimationLayer(id,element,name,doc)); - } - // note: order matters for these two - else if (!strncmp(obtype,"AnimationCurve",length)) { - object.reset(new AnimationCurve(id,element,name,doc)); - } - else if (!strncmp(obtype,"AnimationCurveNode",length)) { - object.reset(new AnimationCurveNode(id,element,name,doc)); - } - } - catch(std::exception& ex) { - flags &= ~BEING_CONSTRUCTED; - flags |= FAILED_TO_CONSTRUCT; - - if(dieOnError || doc.Settings().strictMode) { - throw; - } - - // note: the error message is already formatted, so raw logging is ok - if(!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_ERROR(ex.what()); - } - return NULL; - } - - if (!object.get()) { - //DOMError("failed to convert element to DOM object, class: " + classtag + ", name: " + name,&element); - } - - flags &= ~BEING_CONSTRUCTED; - return object.get(); -} - -// ------------------------------------------------------------------------------------------------ -Object::Object(uint64_t id, const Element& element, const std::string& name) -: element(element) -, name(name) -, id(id) -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -Object::~Object() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -FileGlobalSettings::FileGlobalSettings(const Document& doc, std::shared_ptr<const PropertyTable> props) -: props(props) -, doc(doc) -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -FileGlobalSettings::~FileGlobalSettings() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -Document::Document(const Parser& parser, const ImportSettings& settings) -: settings(settings) -, parser(parser) -{ - // Cannot use array default initialization syntax because vc8 fails on it - for (auto &timeStamp : creationTimeStamp) { - timeStamp = 0; - } - - ReadHeader(); - ReadPropertyTemplates(); - - ReadGlobalSettings(); - - // This order is important, connections need parsed objects to check - // whether connections are ok or not. Objects may not be evaluated yet, - // though, since this may require valid connections. - ReadObjects(); - ReadConnections(); -} - -// ------------------------------------------------------------------------------------------------ -Document::~Document() -{ - for(ObjectMap::value_type& v : objects) { - delete v.second; - } - - for(ConnectionMap::value_type& v : src_connections) { - delete v.second; - } - // |dest_connections| contain the same Connection objects as the |src_connections| -} - -// ------------------------------------------------------------------------------------------------ -static const unsigned int LowerSupportedVersion = 7100; -static const unsigned int UpperSupportedVersion = 7400; - -void Document::ReadHeader() { - // Read ID objects from "Objects" section - const Scope& sc = parser.GetRootScope(); - const Element* const ehead = sc["FBXHeaderExtension"]; - if(!ehead || !ehead->Compound()) { - DOMError("no FBXHeaderExtension dictionary found"); - } - - const Scope& shead = *ehead->Compound(); - fbxVersion = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(shead,"FBXVersion",ehead),0)); - - // While we may have some success with newer files, we don't support - // the older 6.n fbx format - if(fbxVersion < LowerSupportedVersion ) { - DOMError("unsupported, old format version, supported are only FBX 2011, FBX 2012 and FBX 2013"); - } - if(fbxVersion > UpperSupportedVersion ) { - if(Settings().strictMode) { - DOMError("unsupported, newer format version, supported are only FBX 2011, FBX 2012 and FBX 2013" - " (turn off strict mode to try anyhow) "); - } - else { - DOMWarning("unsupported, newer format version, supported are only FBX 2011, FBX 2012 and FBX 2013," - " trying to read it nevertheless"); - } - } - - const Element* const ecreator = shead["Creator"]; - if(ecreator) { - creator = ParseTokenAsString(GetRequiredToken(*ecreator,0)); - } - - const Element* const etimestamp = shead["CreationTimeStamp"]; - if(etimestamp && etimestamp->Compound()) { - const Scope& stimestamp = *etimestamp->Compound(); - creationTimeStamp[0] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Year"),0)); - creationTimeStamp[1] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Month"),0)); - creationTimeStamp[2] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Day"),0)); - creationTimeStamp[3] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Hour"),0)); - creationTimeStamp[4] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Minute"),0)); - creationTimeStamp[5] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Second"),0)); - creationTimeStamp[6] = ParseTokenAsInt(GetRequiredToken(GetRequiredElement(stimestamp,"Millisecond"),0)); - } -} - -// ------------------------------------------------------------------------------------------------ -void Document::ReadGlobalSettings() -{ - const Scope& sc = parser.GetRootScope(); - const Element* const ehead = sc["GlobalSettings"]; - if ( nullptr == ehead || !ehead->Compound() ) { - DOMWarning( "no GlobalSettings dictionary found" ); - globals.reset(new FileGlobalSettings(*this, std::make_shared<const PropertyTable>())); - return; - } - - std::shared_ptr<const PropertyTable> props = GetPropertyTable( *this, "", *ehead, *ehead->Compound(), true ); - - //double v = PropertyGet<float>( *props.get(), std::string("UnitScaleFactor"), 1.0 ); - - if(!props) { - DOMError("GlobalSettings dictionary contains no property table"); - } - - globals.reset(new FileGlobalSettings(*this, props)); -} - -// ------------------------------------------------------------------------------------------------ -void Document::ReadObjects() -{ - // read ID objects from "Objects" section - const Scope& sc = parser.GetRootScope(); - const Element* const eobjects = sc["Objects"]; - if(!eobjects || !eobjects->Compound()) { - DOMError("no Objects dictionary found"); - } - - // add a dummy entry to represent the Model::RootNode object (id 0), - // which is only indirectly defined in the input file - objects[0] = new LazyObject(0L, *eobjects, *this); - - const Scope& sobjects = *eobjects->Compound(); - for(const ElementMap::value_type& el : sobjects.Elements()) { - - // extract ID - const TokenList& tok = el.second->Tokens(); - - if (tok.empty()) { - DOMError("expected ID after object key",el.second); - } - - const char* err; - const uint64_t id = ParseTokenAsID(*tok[0], err); - if(err) { - DOMError(err,el.second); - } - - // id=0 is normally implicit - if(id == 0L) { - DOMError("encountered object with implicitly defined id 0",el.second); - } - - if(objects.find(id) != objects.end()) { - DOMWarning("encountered duplicate object id, ignoring first occurrence",el.second); - } - - objects[id] = new LazyObject(id, *el.second, *this); - - // grab all animation stacks upfront since there is no listing of them - if(!strcmp(el.first.c_str(),"AnimationStack")) { - animationStacks.push_back(id); - } - } -} - -// ------------------------------------------------------------------------------------------------ -void Document::ReadPropertyTemplates() -{ - const Scope& sc = parser.GetRootScope(); - // read property templates from "Definitions" section - const Element* const edefs = sc["Definitions"]; - if(!edefs || !edefs->Compound()) { - DOMWarning("no Definitions dictionary found"); - return; - } - - const Scope& sdefs = *edefs->Compound(); - const ElementCollection otypes = sdefs.GetCollection("ObjectType"); - for(ElementMap::const_iterator it = otypes.first; it != otypes.second; ++it) { - const Element& el = *(*it).second; - const Scope* sc = el.Compound(); - if(!sc) { - DOMWarning("expected nested scope in ObjectType, ignoring",&el); - continue; - } - - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - DOMWarning("expected name for ObjectType element, ignoring",&el); - continue; - } - - const std::string& oname = ParseTokenAsString(*tok[0]); - - const ElementCollection templs = sc->GetCollection("PropertyTemplate"); - for(ElementMap::const_iterator it = templs.first; it != templs.second; ++it) { - const Element& el = *(*it).second; - const Scope* sc = el.Compound(); - if(!sc) { - DOMWarning("expected nested scope in PropertyTemplate, ignoring",&el); - continue; - } - - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - DOMWarning("expected name for PropertyTemplate element, ignoring",&el); - continue; - } - - const std::string& pname = ParseTokenAsString(*tok[0]); - - const Element* Properties70 = (*sc)["Properties70"]; - if(Properties70) { - std::shared_ptr<const PropertyTable> props = std::make_shared<const PropertyTable>( - *Properties70,std::shared_ptr<const PropertyTable>(static_cast<const PropertyTable*>(NULL)) - ); - - templates[oname+"."+pname] = props; - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -void Document::ReadConnections() -{ - const Scope& sc = parser.GetRootScope(); - // read property templates from "Definitions" section - const Element* const econns = sc["Connections"]; - if(!econns || !econns->Compound()) { - DOMError("no Connections dictionary found"); - } - - uint64_t insertionOrder = 0l; - const Scope& sconns = *econns->Compound(); - const ElementCollection conns = sconns.GetCollection("C"); - for(ElementMap::const_iterator it = conns.first; it != conns.second; ++it) { - const Element& el = *(*it).second; - const std::string& type = ParseTokenAsString(GetRequiredToken(el,0)); - - // PP = property-property connection, ignored for now - // (tokens: "PP", ID1, "Property1", ID2, "Property2") - if ( type == "PP" ) { - continue; - } - - const uint64_t src = ParseTokenAsID(GetRequiredToken(el,1)); - const uint64_t dest = ParseTokenAsID(GetRequiredToken(el,2)); - - // OO = object-object connection - // OP = object-property connection, in which case the destination property follows the object ID - const std::string& prop = (type == "OP" ? ParseTokenAsString(GetRequiredToken(el,3)) : ""); - - if(objects.find(src) == objects.end()) { - DOMWarning("source object for connection does not exist",&el); - continue; - } - - // dest may be 0 (root node) but we added a dummy object before - if(objects.find(dest) == objects.end()) { - DOMWarning("destination object for connection does not exist",&el); - continue; - } - - // add new connection - const Connection* const c = new Connection(insertionOrder++,src,dest,prop,*this); - src_connections.insert(ConnectionMap::value_type(src,c)); - dest_connections.insert(ConnectionMap::value_type(dest,c)); - } -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<const AnimationStack*>& Document::AnimationStacks() const -{ - if (!animationStacksResolved.empty() || animationStacks.empty()) { - return animationStacksResolved; - } - - animationStacksResolved.reserve(animationStacks.size()); - for(uint64_t id : animationStacks) { - LazyObject* const lazy = GetObject(id); - const AnimationStack* stack; - if(!lazy || !(stack = lazy->Get<AnimationStack>())) { - DOMWarning("failed to read AnimationStack object"); - continue; - } - animationStacksResolved.push_back(stack); - } - - return animationStacksResolved; -} - -// ------------------------------------------------------------------------------------------------ -LazyObject* Document::GetObject(uint64_t id) const -{ - ObjectMap::const_iterator it = objects.find(id); - return it == objects.end() ? nullptr : (*it).second; -} - -#define MAX_CLASSNAMES 6 - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsSequenced(uint64_t id, const ConnectionMap& conns) const -{ - std::vector<const Connection*> temp; - - const std::pair<ConnectionMap::const_iterator,ConnectionMap::const_iterator> range = - conns.equal_range(id); - - temp.reserve(std::distance(range.first,range.second)); - for (ConnectionMap::const_iterator it = range.first; it != range.second; ++it) { - temp.push_back((*it).second); - } - - std::sort(temp.begin(), temp.end(), std::mem_fn(&Connection::Compare)); - - return temp; // NRVO should handle this -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsSequenced(uint64_t id, bool is_src, - const ConnectionMap& conns, - const char* const* classnames, - size_t count) const - -{ - ai_assert(classnames); - ai_assert( count != 0 ); - ai_assert( count <= MAX_CLASSNAMES); - - size_t lengths[MAX_CLASSNAMES]; - - const size_t c = count; - for (size_t i = 0; i < c; ++i) { - lengths[ i ] = strlen(classnames[i]); - } - - std::vector<const Connection*> temp; - const std::pair<ConnectionMap::const_iterator,ConnectionMap::const_iterator> range = - conns.equal_range(id); - - temp.reserve(std::distance(range.first,range.second)); - for (ConnectionMap::const_iterator it = range.first; it != range.second; ++it) { - const Token& key = (is_src - ? (*it).second->LazyDestinationObject() - : (*it).second->LazySourceObject() - ).GetElement().KeyToken(); - - const char* obtype = key.begin(); - - for (size_t i = 0; i < c; ++i) { - ai_assert(classnames[i]); - if(static_cast<size_t>(std::distance(key.begin(),key.end())) == lengths[i] && !strncmp(classnames[i],obtype,lengths[i])) { - obtype = nullptr; - break; - } - } - - if(obtype) { - continue; - } - - temp.push_back((*it).second); - } - - std::sort(temp.begin(), temp.end(), std::mem_fn(&Connection::Compare)); - return temp; // NRVO should handle this -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsBySourceSequenced(uint64_t source) const -{ - return GetConnectionsSequenced(source, ConnectionsBySource()); -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsBySourceSequenced(uint64_t src, const char* classname) const -{ - const char* arr[] = {classname}; - return GetConnectionsBySourceSequenced(src, arr,1); -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsBySourceSequenced(uint64_t source, - const char* const* classnames, size_t count) const -{ - return GetConnectionsSequenced(source, true, ConnectionsBySource(),classnames, count); -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsByDestinationSequenced(uint64_t dest, - const char* classname) const -{ - const char* arr[] = {classname}; - return GetConnectionsByDestinationSequenced(dest, arr,1); -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsByDestinationSequenced(uint64_t dest) const -{ - return GetConnectionsSequenced(dest, ConnectionsByDestination()); -} - -// ------------------------------------------------------------------------------------------------ -std::vector<const Connection*> Document::GetConnectionsByDestinationSequenced(uint64_t dest, - const char* const* classnames, size_t count) const - -{ - return GetConnectionsSequenced(dest, false, ConnectionsByDestination(),classnames, count); -} - -// ------------------------------------------------------------------------------------------------ -Connection::Connection(uint64_t insertionOrder, uint64_t src, uint64_t dest, const std::string& prop, - const Document& doc) - -: insertionOrder(insertionOrder) -, prop(prop) -, src(src) -, dest(dest) -, doc(doc) -{ - ai_assert(doc.Objects().find(src) != doc.Objects().end()); - // dest may be 0 (root node) - ai_assert(!dest || doc.Objects().find(dest) != doc.Objects().end()); -} - -// ------------------------------------------------------------------------------------------------ -Connection::~Connection() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -LazyObject& Connection::LazySourceObject() const -{ - LazyObject* const lazy = doc.GetObject(src); - ai_assert(lazy); - return *lazy; -} - -// ------------------------------------------------------------------------------------------------ -LazyObject& Connection::LazyDestinationObject() const -{ - LazyObject* const lazy = doc.GetObject(dest); - ai_assert(lazy); - return *lazy; -} - -// ------------------------------------------------------------------------------------------------ -const Object* Connection::SourceObject() const -{ - LazyObject* const lazy = doc.GetObject(src); - ai_assert(lazy); - return lazy->Get(); -} - -// ------------------------------------------------------------------------------------------------ -const Object* Connection::DestinationObject() const -{ - LazyObject* const lazy = doc.GetObject(dest); - ai_assert(lazy); - return lazy->Get(); -} - -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXDocument.h b/thirdparty/assimp/code/FBX/FBXDocument.h deleted file mode 100644 index a60d7d9efa..0000000000 --- a/thirdparty/assimp/code/FBX/FBXDocument.h +++ /dev/null @@ -1,1215 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXDocument.h - * @brief FBX DOM - */ -#ifndef INCLUDED_AI_FBX_DOCUMENT_H -#define INCLUDED_AI_FBX_DOCUMENT_H - -#include <numeric> -#include <stdint.h> -#include <assimp/mesh.h> -#include "FBXProperties.h" -#include "FBXParser.h" - -#define _AI_CONCAT(a,b) a ## b -#define AI_CONCAT(a,b) _AI_CONCAT(a,b) - -namespace Assimp { -namespace FBX { - -class Parser; -class Object; -struct ImportSettings; - -class PropertyTable; -class Document; -class Material; -class ShapeGeometry; -class LineGeometry; -class Geometry; - -class Video; - -class AnimationCurve; -class AnimationCurveNode; -class AnimationLayer; -class AnimationStack; - -class BlendShapeChannel; -class BlendShape; -class Skin; -class Cluster; - - -/** Represents a delay-parsed FBX objects. Many objects in the scene - * are not needed by assimp, so it makes no sense to parse them - * upfront. */ -class LazyObject { -public: - LazyObject(uint64_t id, const Element& element, const Document& doc); - - ~LazyObject(); - - const Object* Get(bool dieOnError = false); - - template <typename T> - const T* Get(bool dieOnError = false) { - const Object* const ob = Get(dieOnError); - return ob ? dynamic_cast<const T*>(ob) : NULL; - } - - uint64_t ID() const { - return id; - } - - bool IsBeingConstructed() const { - return (flags & BEING_CONSTRUCTED) != 0; - } - - bool FailedToConstruct() const { - return (flags & FAILED_TO_CONSTRUCT) != 0; - } - - const Element& GetElement() const { - return element; - } - - const Document& GetDocument() const { - return doc; - } - -private: - const Document& doc; - const Element& element; - std::unique_ptr<const Object> object; - - const uint64_t id; - - enum Flags { - BEING_CONSTRUCTED = 0x1, - FAILED_TO_CONSTRUCT = 0x2 - }; - - unsigned int flags; -}; - -/** Base class for in-memory (DOM) representations of FBX objects */ -class Object { -public: - Object(uint64_t id, const Element& element, const std::string& name); - - virtual ~Object(); - - const Element& SourceElement() const { - return element; - } - - const std::string& Name() const { - return name; - } - - uint64_t ID() const { - return id; - } - -protected: - const Element& element; - const std::string name; - const uint64_t id; -}; - -/** DOM class for generic FBX NoteAttribute blocks. NoteAttribute's just hold a property table, - * fixed members are added by deriving classes. */ -class NodeAttribute : public Object { -public: - NodeAttribute(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~NodeAttribute(); - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - -private: - std::shared_ptr<const PropertyTable> props; -}; - -/** DOM base class for FBX camera settings attached to a node */ -class CameraSwitcher : public NodeAttribute { -public: - CameraSwitcher(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~CameraSwitcher(); - - int CameraID() const { - return cameraId; - } - - const std::string& CameraName() const { - return cameraName; - } - - const std::string& CameraIndexName() const { - return cameraIndexName; - } - -private: - int cameraId; - std::string cameraName; - std::string cameraIndexName; -}; - -#define fbx_stringize(a) #a - -#define fbx_simple_property(name, type, default_value) \ - type name() const { \ - return PropertyGet<type>(Props(), fbx_stringize(name), (default_value)); \ - } - -// XXX improve logging -#define fbx_simple_enum_property(name, type, default_value) \ - type name() const { \ - const int ival = PropertyGet<int>(Props(), fbx_stringize(name), static_cast<int>(default_value)); \ - if (ival < 0 || ival >= AI_CONCAT(type, _MAX)) { \ - ai_assert(static_cast<int>(default_value) >= 0 && static_cast<int>(default_value) < AI_CONCAT(type, _MAX)); \ - return static_cast<type>(default_value); \ - } \ - return static_cast<type>(ival); \ -} - - -/** DOM base class for FBX cameras attached to a node */ -class Camera : public NodeAttribute { -public: - Camera(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Camera(); - - fbx_simple_property(Position, aiVector3D, aiVector3D(0,0,0)) - fbx_simple_property(UpVector, aiVector3D, aiVector3D(0,1,0)) - fbx_simple_property(InterestPosition, aiVector3D, aiVector3D(0,0,0)) - - fbx_simple_property(AspectWidth, float, 1.0f) - fbx_simple_property(AspectHeight, float, 1.0f) - fbx_simple_property(FilmWidth, float, 1.0f) - fbx_simple_property(FilmHeight, float, 1.0f) - - fbx_simple_property(NearPlane, float, 0.1f) - fbx_simple_property(FarPlane, float, 100.0f) - - fbx_simple_property(FilmAspectRatio, float, 1.0f) - fbx_simple_property(ApertureMode, int, 0) - - fbx_simple_property(FieldOfView, float, 1.0f) - fbx_simple_property(FocalLength, float, 1.0f) -}; - -/** DOM base class for FBX null markers attached to a node */ -class Null : public NodeAttribute { -public: - Null(uint64_t id, const Element& element, const Document& doc, const std::string& name); - virtual ~Null(); -}; - -/** DOM base class for FBX limb node markers attached to a node */ -class LimbNode : public NodeAttribute { -public: - LimbNode(uint64_t id, const Element& element, const Document& doc, const std::string& name); - virtual ~LimbNode(); -}; - -/** DOM base class for FBX lights attached to a node */ -class Light : public NodeAttribute { -public: - Light(uint64_t id, const Element& element, const Document& doc, const std::string& name); - virtual ~Light(); - - enum Type - { - Type_Point, - Type_Directional, - Type_Spot, - Type_Area, - Type_Volume, - - Type_MAX // end-of-enum sentinel - }; - - enum Decay - { - Decay_None, - Decay_Linear, - Decay_Quadratic, - Decay_Cubic, - - Decay_MAX // end-of-enum sentinel - }; - - fbx_simple_property(Color, aiVector3D, aiVector3D(1,1,1)) - fbx_simple_enum_property(LightType, Type, 0) - fbx_simple_property(CastLightOnObject, bool, false) - fbx_simple_property(DrawVolumetricLight, bool, true) - fbx_simple_property(DrawGroundProjection, bool, true) - fbx_simple_property(DrawFrontFacingVolumetricLight, bool, false) - fbx_simple_property(Intensity, float, 100.0f) - fbx_simple_property(InnerAngle, float, 0.0f) - fbx_simple_property(OuterAngle, float, 45.0f) - fbx_simple_property(Fog, int, 50) - fbx_simple_enum_property(DecayType, Decay, 2) - fbx_simple_property(DecayStart, float, 1.0f) - fbx_simple_property(FileName, std::string, "") - - fbx_simple_property(EnableNearAttenuation, bool, false) - fbx_simple_property(NearAttenuationStart, float, 0.0f) - fbx_simple_property(NearAttenuationEnd, float, 0.0f) - fbx_simple_property(EnableFarAttenuation, bool, false) - fbx_simple_property(FarAttenuationStart, float, 0.0f) - fbx_simple_property(FarAttenuationEnd, float, 0.0f) - - fbx_simple_property(CastShadows, bool, true) - fbx_simple_property(ShadowColor, aiVector3D, aiVector3D(0,0,0)) - - fbx_simple_property(AreaLightShape, int, 0) - - fbx_simple_property(LeftBarnDoor, float, 20.0f) - fbx_simple_property(RightBarnDoor, float, 20.0f) - fbx_simple_property(TopBarnDoor, float, 20.0f) - fbx_simple_property(BottomBarnDoor, float, 20.0f) - fbx_simple_property(EnableBarnDoor, bool, true) -}; - -/** DOM base class for FBX models (even though its semantics are more "node" than "model" */ -class Model : public Object { -public: - enum RotOrder { - RotOrder_EulerXYZ = 0, - RotOrder_EulerXZY, - RotOrder_EulerYZX, - RotOrder_EulerYXZ, - RotOrder_EulerZXY, - RotOrder_EulerZYX, - - RotOrder_SphericXYZ, - - RotOrder_MAX // end-of-enum sentinel - }; - - enum TransformInheritance { - TransformInheritance_RrSs = 0, - TransformInheritance_RSrs, - TransformInheritance_Rrs, - - TransformInheritance_MAX // end-of-enum sentinel - }; - - Model(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Model(); - - fbx_simple_property(QuaternionInterpolate, int, 0) - - fbx_simple_property(RotationOffset, aiVector3D, aiVector3D()) - fbx_simple_property(RotationPivot, aiVector3D, aiVector3D()) - fbx_simple_property(ScalingOffset, aiVector3D, aiVector3D()) - fbx_simple_property(ScalingPivot, aiVector3D, aiVector3D()) - fbx_simple_property(TranslationActive, bool, false) - - fbx_simple_property(TranslationMin, aiVector3D, aiVector3D()) - fbx_simple_property(TranslationMax, aiVector3D, aiVector3D()) - - fbx_simple_property(TranslationMinX, bool, false) - fbx_simple_property(TranslationMaxX, bool, false) - fbx_simple_property(TranslationMinY, bool, false) - fbx_simple_property(TranslationMaxY, bool, false) - fbx_simple_property(TranslationMinZ, bool, false) - fbx_simple_property(TranslationMaxZ, bool, false) - - fbx_simple_enum_property(RotationOrder, RotOrder, 0) - fbx_simple_property(RotationSpaceForLimitOnly, bool, false) - fbx_simple_property(RotationStiffnessX, float, 0.0f) - fbx_simple_property(RotationStiffnessY, float, 0.0f) - fbx_simple_property(RotationStiffnessZ, float, 0.0f) - fbx_simple_property(AxisLen, float, 0.0f) - - fbx_simple_property(PreRotation, aiVector3D, aiVector3D()) - fbx_simple_property(PostRotation, aiVector3D, aiVector3D()) - fbx_simple_property(RotationActive, bool, false) - - fbx_simple_property(RotationMin, aiVector3D, aiVector3D()) - fbx_simple_property(RotationMax, aiVector3D, aiVector3D()) - - fbx_simple_property(RotationMinX, bool, false) - fbx_simple_property(RotationMaxX, bool, false) - fbx_simple_property(RotationMinY, bool, false) - fbx_simple_property(RotationMaxY, bool, false) - fbx_simple_property(RotationMinZ, bool, false) - fbx_simple_property(RotationMaxZ, bool, false) - fbx_simple_enum_property(InheritType, TransformInheritance, 0) - - fbx_simple_property(ScalingActive, bool, false) - fbx_simple_property(ScalingMin, aiVector3D, aiVector3D()) - fbx_simple_property(ScalingMax, aiVector3D, aiVector3D(1.f,1.f,1.f)) - fbx_simple_property(ScalingMinX, bool, false) - fbx_simple_property(ScalingMaxX, bool, false) - fbx_simple_property(ScalingMinY, bool, false) - fbx_simple_property(ScalingMaxY, bool, false) - fbx_simple_property(ScalingMinZ, bool, false) - fbx_simple_property(ScalingMaxZ, bool, false) - - fbx_simple_property(GeometricTranslation, aiVector3D, aiVector3D()) - fbx_simple_property(GeometricRotation, aiVector3D, aiVector3D()) - fbx_simple_property(GeometricScaling, aiVector3D, aiVector3D(1.f, 1.f, 1.f)) - - fbx_simple_property(MinDampRangeX, float, 0.0f) - fbx_simple_property(MinDampRangeY, float, 0.0f) - fbx_simple_property(MinDampRangeZ, float, 0.0f) - fbx_simple_property(MaxDampRangeX, float, 0.0f) - fbx_simple_property(MaxDampRangeY, float, 0.0f) - fbx_simple_property(MaxDampRangeZ, float, 0.0f) - - fbx_simple_property(MinDampStrengthX, float, 0.0f) - fbx_simple_property(MinDampStrengthY, float, 0.0f) - fbx_simple_property(MinDampStrengthZ, float, 0.0f) - fbx_simple_property(MaxDampStrengthX, float, 0.0f) - fbx_simple_property(MaxDampStrengthY, float, 0.0f) - fbx_simple_property(MaxDampStrengthZ, float, 0.0f) - - fbx_simple_property(PreferredAngleX, float, 0.0f) - fbx_simple_property(PreferredAngleY, float, 0.0f) - fbx_simple_property(PreferredAngleZ, float, 0.0f) - - fbx_simple_property(Show, bool, true) - fbx_simple_property(LODBox, bool, false) - fbx_simple_property(Freeze, bool, false) - - const std::string& Shading() const { - return shading; - } - - const std::string& Culling() const { - return culling; - } - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - /** Get material links */ - const std::vector<const Material*>& GetMaterials() const { - return materials; - } - - /** Get geometry links */ - const std::vector<const Geometry*>& GetGeometry() const { - return geometry; - } - - /** Get node attachments */ - const std::vector<const NodeAttribute*>& GetAttributes() const { - return attributes; - } - - /** convenience method to check if the node has a Null node marker */ - bool IsNull() const; - -private: - void ResolveLinks(const Element& element, const Document& doc); - -private: - std::vector<const Material*> materials; - std::vector<const Geometry*> geometry; - std::vector<const NodeAttribute*> attributes; - - std::string shading; - std::string culling; - std::shared_ptr<const PropertyTable> props; -}; - -/** DOM class for generic FBX textures */ -class Texture : public Object { -public: - Texture(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Texture(); - - const std::string& Type() const { - return type; - } - - const std::string& FileName() const { - return fileName; - } - - const std::string& RelativeFilename() const { - return relativeFileName; - } - - const std::string& AlphaSource() const { - return alphaSource; - } - - const aiVector2D& UVTranslation() const { - return uvTrans; - } - - const aiVector2D& UVScaling() const { - return uvScaling; - } - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - // return a 4-tuple - const unsigned int* Crop() const { - return crop; - } - - const Video* Media() const { - return media; - } - -private: - aiVector2D uvTrans; - aiVector2D uvScaling; - - std::string type; - std::string relativeFileName; - std::string fileName; - std::string alphaSource; - std::shared_ptr<const PropertyTable> props; - - unsigned int crop[4]; - - const Video* media; -}; - -/** DOM class for layered FBX textures */ -class LayeredTexture : public Object { -public: - LayeredTexture(uint64_t id, const Element& element, const Document& doc, const std::string& name); - virtual ~LayeredTexture(); - - // Can only be called after construction of the layered texture object due to construction flag. - void fillTexture(const Document& doc); - - enum BlendMode { - BlendMode_Translucent, - BlendMode_Additive, - BlendMode_Modulate, - BlendMode_Modulate2, - BlendMode_Over, - BlendMode_Normal, - BlendMode_Dissolve, - BlendMode_Darken, - BlendMode_ColorBurn, - BlendMode_LinearBurn, - BlendMode_DarkerColor, - BlendMode_Lighten, - BlendMode_Screen, - BlendMode_ColorDodge, - BlendMode_LinearDodge, - BlendMode_LighterColor, - BlendMode_SoftLight, - BlendMode_HardLight, - BlendMode_VividLight, - BlendMode_LinearLight, - BlendMode_PinLight, - BlendMode_HardMix, - BlendMode_Difference, - BlendMode_Exclusion, - BlendMode_Subtract, - BlendMode_Divide, - BlendMode_Hue, - BlendMode_Saturation, - BlendMode_Color, - BlendMode_Luminosity, - BlendMode_Overlay, - BlendMode_BlendModeCount - }; - - const Texture* getTexture(int index=0) const - { - return textures[index]; - - } - int textureCount() const { - return static_cast<int>(textures.size()); - } - BlendMode GetBlendMode() const - { - return blendMode; - } - float Alpha() - { - return alpha; - } -private: - std::vector<const Texture*> textures; - BlendMode blendMode; - float alpha; -}; - -typedef std::fbx_unordered_map<std::string, const Texture*> TextureMap; -typedef std::fbx_unordered_map<std::string, const LayeredTexture*> LayeredTextureMap; - - -/** DOM class for generic FBX videos */ -class Video : public Object { -public: - Video(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Video(); - - const std::string& Type() const { - return type; - } - - const std::string& FileName() const { - return fileName; - } - - const std::string& RelativeFilename() const { - return relativeFileName; - } - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - const uint8_t* Content() const { - ai_assert(content); - return content; - } - - uint64_t ContentLength() const { - return contentLength; - } - - uint8_t* RelinquishContent() { - uint8_t* ptr = content; - content = 0; - return ptr; - } - - bool operator==(const Video& other) const - { - return ( - type == other.type - && relativeFileName == other.relativeFileName - && fileName == other.fileName - ); - } - - bool operator<(const Video& other) const - { - return std::tie(type, relativeFileName, fileName) < std::tie(other.type, other.relativeFileName, other.fileName); - } - -private: - std::string type; - std::string relativeFileName; - std::string fileName; - std::shared_ptr<const PropertyTable> props; - - uint64_t contentLength; - uint8_t* content; -}; - -/** DOM class for generic FBX materials */ -class Material : public Object { -public: - Material(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Material(); - - const std::string& GetShadingModel() const { - return shading; - } - - bool IsMultilayer() const { - return multilayer; - } - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - const TextureMap& Textures() const { - return textures; - } - - const LayeredTextureMap& LayeredTextures() const { - return layeredTextures; - } - -private: - std::string shading; - bool multilayer; - std::shared_ptr<const PropertyTable> props; - - TextureMap textures; - LayeredTextureMap layeredTextures; -}; - -typedef std::vector<int64_t> KeyTimeList; -typedef std::vector<float> KeyValueList; - -/** Represents a FBX animation curve (i.e. a 1-dimensional set of keyframes and values therefor) */ -class AnimationCurve : public Object { -public: - AnimationCurve(uint64_t id, const Element& element, const std::string& name, const Document& doc); - virtual ~AnimationCurve(); - - /** get list of keyframe positions (time). - * Invariant: |GetKeys()| > 0 */ - const KeyTimeList& GetKeys() const { - return keys; - } - - /** get list of keyframe values. - * Invariant: |GetKeys()| == |GetValues()| && |GetKeys()| > 0*/ - const KeyValueList& GetValues() const { - return values; - } - - const std::vector<float>& GetAttributes() const { - return attributes; - } - - const std::vector<unsigned int>& GetFlags() const { - return flags; - } - -private: - KeyTimeList keys; - KeyValueList values; - std::vector<float> attributes; - std::vector<unsigned int> flags; -}; - -// property-name -> animation curve -typedef std::map<std::string, const AnimationCurve*> AnimationCurveMap; - -/** Represents a FBX animation curve (i.e. a mapping from single animation curves to nodes) */ -class AnimationCurveNode : public Object { -public: - /* the optional white list specifies a list of property names for which the caller - wants animations for. If the curve node does not match one of these, std::range_error - will be thrown. */ - AnimationCurveNode(uint64_t id, const Element& element, const std::string& name, const Document& doc, - const char* const * target_prop_whitelist = NULL, size_t whitelist_size = 0); - - virtual ~AnimationCurveNode(); - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - - const AnimationCurveMap& Curves() const; - - /** Object the curve is assigned to, this can be NULL if the - * target object has no DOM representation or could not - * be read for other reasons.*/ - const Object* Target() const { - return target; - } - - const Model* TargetAsModel() const { - return dynamic_cast<const Model*>(target); - } - - const NodeAttribute* TargetAsNodeAttribute() const { - return dynamic_cast<const NodeAttribute*>(target); - } - - /** Property of Target() that is being animated*/ - const std::string& TargetProperty() const { - return prop; - } - -private: - const Object* target; - std::shared_ptr<const PropertyTable> props; - mutable AnimationCurveMap curves; - - std::string prop; - const Document& doc; -}; - -typedef std::vector<const AnimationCurveNode*> AnimationCurveNodeList; - -/** Represents a FBX animation layer (i.e. a list of node animations) */ -class AnimationLayer : public Object { -public: - AnimationLayer(uint64_t id, const Element& element, const std::string& name, const Document& doc); - virtual ~AnimationLayer(); - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - /* the optional white list specifies a list of property names for which the caller - wants animations for. Curves not matching this list will not be added to the - animation layer. */ - AnimationCurveNodeList Nodes(const char* const * target_prop_whitelist = nullptr, size_t whitelist_size = 0) const; - -private: - std::shared_ptr<const PropertyTable> props; - const Document& doc; -}; - -typedef std::vector<const AnimationLayer*> AnimationLayerList; - -/** Represents a FBX animation stack (i.e. a list of animation layers) */ -class AnimationStack : public Object { -public: - AnimationStack(uint64_t id, const Element& element, const std::string& name, const Document& doc); - virtual ~AnimationStack(); - - fbx_simple_property(LocalStart, int64_t, 0L) - fbx_simple_property(LocalStop, int64_t, 0L) - fbx_simple_property(ReferenceStart, int64_t, 0L) - fbx_simple_property(ReferenceStop, int64_t, 0L) - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - const AnimationLayerList& Layers() const { - return layers; - } - -private: - std::shared_ptr<const PropertyTable> props; - AnimationLayerList layers; -}; - - -/** DOM class for deformers */ -class Deformer : public Object { -public: - Deformer(uint64_t id, const Element& element, const Document& doc, const std::string& name); - virtual ~Deformer(); - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - -private: - std::shared_ptr<const PropertyTable> props; -}; - -typedef std::vector<float> WeightArray; -typedef std::vector<unsigned int> WeightIndexArray; - - -/** DOM class for BlendShapeChannel deformers */ -class BlendShapeChannel : public Deformer { -public: - BlendShapeChannel(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~BlendShapeChannel(); - - float DeformPercent() const { - return percent; - } - - const WeightArray& GetFullWeights() const { - return fullWeights; - } - - const std::vector<const ShapeGeometry*>& GetShapeGeometries() const { - return shapeGeometries; - } - -private: - float percent; - WeightArray fullWeights; - std::vector<const ShapeGeometry*> shapeGeometries; -}; - -/** DOM class for BlendShape deformers */ -class BlendShape : public Deformer { -public: - BlendShape(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~BlendShape(); - - const std::vector<const BlendShapeChannel*>& BlendShapeChannels() const { - return blendShapeChannels; - } - -private: - std::vector<const BlendShapeChannel*> blendShapeChannels; -}; - -/** DOM class for skin deformer clusters (aka sub-deformers) */ -class Cluster : public Deformer { -public: - Cluster(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Cluster(); - - /** get the list of deformer weights associated with this cluster. - * Use #GetIndices() to get the associated vertices. Both arrays - * have the same size (and may also be empty). */ - const WeightArray& GetWeights() const { - return weights; - } - - /** get indices into the vertex data of the geometry associated - * with this cluster. Use #GetWeights() to get the associated weights. - * Both arrays have the same size (and may also be empty). */ - const WeightIndexArray& GetIndices() const { - return indices; - } - - /** */ - const aiMatrix4x4& Transform() const { - return transform; - } - - const aiMatrix4x4& TransformLink() const { - return transformLink; - } - - const Model* TargetNode() const { - return node; - } - -private: - WeightArray weights; - WeightIndexArray indices; - - aiMatrix4x4 transform; - aiMatrix4x4 transformLink; - - const Model* node; -}; - -/** DOM class for skin deformers */ -class Skin : public Deformer { -public: - Skin(uint64_t id, const Element& element, const Document& doc, const std::string& name); - - virtual ~Skin(); - - float DeformAccuracy() const { - return accuracy; - } - - const std::vector<const Cluster*>& Clusters() const { - return clusters; - } - -private: - float accuracy; - std::vector<const Cluster*> clusters; -}; - -/** Represents a link between two FBX objects. */ -class Connection { -public: - Connection(uint64_t insertionOrder, uint64_t src, uint64_t dest, const std::string& prop, const Document& doc); - - ~Connection(); - - // note: a connection ensures that the source and dest objects exist, but - // not that they have DOM representations, so the return value of one of - // these functions can still be NULL. - const Object* SourceObject() const; - const Object* DestinationObject() const; - - // these, however, are always guaranteed to be valid - LazyObject& LazySourceObject() const; - LazyObject& LazyDestinationObject() const; - - - /** return the name of the property the connection is attached to. - * this is an empty string for object to object (OO) connections. */ - const std::string& PropertyName() const { - return prop; - } - - uint64_t InsertionOrder() const { - return insertionOrder; - } - - int CompareTo(const Connection* c) const { - ai_assert( nullptr != c ); - - // note: can't subtract because this would overflow uint64_t - if(InsertionOrder() > c->InsertionOrder()) { - return 1; - } - else if(InsertionOrder() < c->InsertionOrder()) { - return -1; - } - return 0; - } - - bool Compare(const Connection* c) const { - ai_assert( nullptr != c ); - - return InsertionOrder() < c->InsertionOrder(); - } - -public: - uint64_t insertionOrder; - const std::string prop; - - uint64_t src, dest; - const Document& doc; -}; - -// XXX again, unique_ptr would be useful. shared_ptr is too -// bloated since the objects have a well-defined single owner -// during their entire lifetime (Document). FBX files have -// up to many thousands of objects (most of which we never use), -// so the memory overhead for them should be kept at a minimum. -typedef std::fbx_unordered_map<uint64_t, LazyObject*> ObjectMap; -typedef std::fbx_unordered_map<std::string, std::shared_ptr<const PropertyTable> > PropertyTemplateMap; - -typedef std::fbx_unordered_multimap<uint64_t, const Connection*> ConnectionMap; - -/** DOM class for global document settings, a single instance per document can - * be accessed via Document.Globals(). */ -class FileGlobalSettings { -public: - FileGlobalSettings(const Document& doc, std::shared_ptr<const PropertyTable> props); - - ~FileGlobalSettings(); - - const PropertyTable& Props() const { - ai_assert(props.get()); - return *props.get(); - } - - const Document& GetDocument() const { - return doc; - } - - fbx_simple_property(UpAxis, int, 1) - fbx_simple_property(UpAxisSign, int, 1) - fbx_simple_property(FrontAxis, int, 2) - fbx_simple_property(FrontAxisSign, int, 1) - fbx_simple_property(CoordAxis, int, 0) - fbx_simple_property(CoordAxisSign, int, 1) - fbx_simple_property(OriginalUpAxis, int, 0) - fbx_simple_property(OriginalUpAxisSign, int, 1) - fbx_simple_property(UnitScaleFactor, float, 1) - fbx_simple_property(OriginalUnitScaleFactor, float, 1) - fbx_simple_property(AmbientColor, aiVector3D, aiVector3D(0,0,0)) - fbx_simple_property(DefaultCamera, std::string, "") - - - enum FrameRate { - FrameRate_DEFAULT = 0, - FrameRate_120 = 1, - FrameRate_100 = 2, - FrameRate_60 = 3, - FrameRate_50 = 4, - FrameRate_48 = 5, - FrameRate_30 = 6, - FrameRate_30_DROP = 7, - FrameRate_NTSC_DROP_FRAME = 8, - FrameRate_NTSC_FULL_FRAME = 9, - FrameRate_PAL = 10, - FrameRate_CINEMA = 11, - FrameRate_1000 = 12, - FrameRate_CINEMA_ND = 13, - FrameRate_CUSTOM = 14, - - FrameRate_MAX// end-of-enum sentinel - }; - - fbx_simple_enum_property(TimeMode, FrameRate, FrameRate_DEFAULT) - fbx_simple_property(TimeSpanStart, uint64_t, 0L) - fbx_simple_property(TimeSpanStop, uint64_t, 0L) - fbx_simple_property(CustomFrameRate, float, -1.0f) - -private: - std::shared_ptr<const PropertyTable> props; - const Document& doc; -}; - -/** DOM root for a FBX file */ -class Document { -public: - Document(const Parser& parser, const ImportSettings& settings); - - ~Document(); - - LazyObject* GetObject(uint64_t id) const; - - bool IsBinary() const { - return parser.IsBinary(); - } - - unsigned int FBXVersion() const { - return fbxVersion; - } - - const std::string& Creator() const { - return creator; - } - - // elements (in this order): Year, Month, Day, Hour, Second, Millisecond - const unsigned int* CreationTimeStamp() const { - return creationTimeStamp; - } - - const FileGlobalSettings& GlobalSettings() const { - ai_assert(globals.get()); - return *globals.get(); - } - - const PropertyTemplateMap& Templates() const { - return templates; - } - - const ObjectMap& Objects() const { - return objects; - } - - const ImportSettings& Settings() const { - return settings; - } - - const ConnectionMap& ConnectionsBySource() const { - return src_connections; - } - - const ConnectionMap& ConnectionsByDestination() const { - return dest_connections; - } - - // note: the implicit rule in all DOM classes is to always resolve - // from destination to source (since the FBX object hierarchy is, - // with very few exceptions, a DAG, this avoids cycles). In all - // cases that may involve back-facing edges in the object graph, - // use LazyObject::IsBeingConstructed() to check. - - std::vector<const Connection*> GetConnectionsBySourceSequenced(uint64_t source) const; - std::vector<const Connection*> GetConnectionsByDestinationSequenced(uint64_t dest) const; - - std::vector<const Connection*> GetConnectionsBySourceSequenced(uint64_t source, const char* classname) const; - std::vector<const Connection*> GetConnectionsByDestinationSequenced(uint64_t dest, const char* classname) const; - - std::vector<const Connection*> GetConnectionsBySourceSequenced(uint64_t source, - const char* const* classnames, size_t count) const; - std::vector<const Connection*> GetConnectionsByDestinationSequenced(uint64_t dest, - const char* const* classnames, - size_t count) const; - - const std::vector<const AnimationStack*>& AnimationStacks() const; - -private: - std::vector<const Connection*> GetConnectionsSequenced(uint64_t id, const ConnectionMap&) const; - std::vector<const Connection*> GetConnectionsSequenced(uint64_t id, bool is_src, - const ConnectionMap&, - const char* const* classnames, - size_t count) const; - void ReadHeader(); - void ReadObjects(); - void ReadPropertyTemplates(); - void ReadConnections(); - void ReadGlobalSettings(); - -private: - const ImportSettings& settings; - - ObjectMap objects; - const Parser& parser; - - PropertyTemplateMap templates; - ConnectionMap src_connections; - ConnectionMap dest_connections; - - unsigned int fbxVersion; - std::string creator; - unsigned int creationTimeStamp[7]; - - std::vector<uint64_t> animationStacks; - mutable std::vector<const AnimationStack*> animationStacksResolved; - - std::unique_ptr<FileGlobalSettings> globals; -}; - -} // Namespace FBX -} // Namespace Assimp - -namespace std -{ - template <> - struct hash<const Assimp::FBX::Video> - { - std::size_t operator()(const Assimp::FBX::Video& video) const - { - using std::size_t; - using std::hash; - using std::string; - - size_t res = 17; - res = res * 31 + hash<string>()(video.Name()); - res = res * 31 + hash<string>()(video.RelativeFilename()); - res = res * 31 + hash<string>()(video.Type()); - - return res; - } - }; -} - -#endif // INCLUDED_AI_FBX_DOCUMENT_H diff --git a/thirdparty/assimp/code/FBX/FBXDocumentUtil.cpp b/thirdparty/assimp/code/FBX/FBXDocumentUtil.cpp deleted file mode 100644 index f84691479a..0000000000 --- a/thirdparty/assimp/code/FBX/FBXDocumentUtil.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXDocumentUtil.cpp - * @brief Implementation of the FBX DOM utility functions declared in FBXDocumentUtil.h - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXUtil.h" -#include "FBXDocumentUtil.h" -#include "FBXProperties.h" - - -namespace Assimp { -namespace FBX { -namespace Util { - -// ------------------------------------------------------------------------------------------------ -// signal DOM construction error, this is always unrecoverable. Throws DeadlyImportError. -void DOMError(const std::string& message, const Token& token) -{ - throw DeadlyImportError(Util::AddTokenText("FBX-DOM",message,&token)); -} - -// ------------------------------------------------------------------------------------------------ -void DOMError(const std::string& message, const Element* element /*= NULL*/) -{ - if(element) { - DOMError(message,element->KeyToken()); - } - throw DeadlyImportError("FBX-DOM " + message); -} - - -// ------------------------------------------------------------------------------------------------ -// print warning, do return -void DOMWarning(const std::string& message, const Token& token) -{ - if(DefaultLogger::get()) { - ASSIMP_LOG_WARN(Util::AddTokenText("FBX-DOM",message,&token)); - } -} - -// ------------------------------------------------------------------------------------------------ -void DOMWarning(const std::string& message, const Element* element /*= NULL*/) -{ - if(element) { - DOMWarning(message,element->KeyToken()); - return; - } - if(DefaultLogger::get()) { - ASSIMP_LOG_WARN("FBX-DOM: " + message); - } -} - - -// ------------------------------------------------------------------------------------------------ -// fetch a property table and the corresponding property template -std::shared_ptr<const PropertyTable> GetPropertyTable(const Document& doc, - const std::string& templateName, - const Element &element, - const Scope& sc, - bool no_warn /*= false*/) -{ - const Element* const Properties70 = sc["Properties70"]; - std::shared_ptr<const PropertyTable> templateProps = std::shared_ptr<const PropertyTable>( - static_cast<const PropertyTable*>(NULL)); - - if(templateName.length()) { - PropertyTemplateMap::const_iterator it = doc.Templates().find(templateName); - if(it != doc.Templates().end()) { - templateProps = (*it).second; - } - } - - if(!Properties70 || !Properties70->Compound()) { - if(!no_warn) { - DOMWarning("property table (Properties70) not found",&element); - } - if(templateProps) { - return templateProps; - } - else { - return std::make_shared<const PropertyTable>(); - } - } - return std::make_shared<const PropertyTable>(*Properties70,templateProps); -} -} // !Util -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXDocumentUtil.h b/thirdparty/assimp/code/FBX/FBXDocumentUtil.h deleted file mode 100644 index 2450109e59..0000000000 --- a/thirdparty/assimp/code/FBX/FBXDocumentUtil.h +++ /dev/null @@ -1,120 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2012, assimp team -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXDocumentUtil.h - * @brief FBX internal utilities used by the DOM reading code - */ -#ifndef INCLUDED_AI_FBX_DOCUMENT_UTIL_H -#define INCLUDED_AI_FBX_DOCUMENT_UTIL_H - -#include <assimp/defs.h> -#include <string> -#include <memory> -#include "FBXDocument.h" - -struct Token; -struct Element; - -namespace Assimp { -namespace FBX { -namespace Util { - -/* DOM/Parse error reporting - does not return */ -AI_WONT_RETURN void DOMError(const std::string& message, const Token& token) AI_WONT_RETURN_SUFFIX; -AI_WONT_RETURN void DOMError(const std::string& message, const Element* element = NULL) AI_WONT_RETURN_SUFFIX; - -// does return -void DOMWarning(const std::string& message, const Token& token); -void DOMWarning(const std::string& message, const Element* element = NULL); - - -// fetch a property table and the corresponding property template -std::shared_ptr<const PropertyTable> GetPropertyTable(const Document& doc, - const std::string& templateName, - const Element &element, - const Scope& sc, - bool no_warn = false); - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -const T* ProcessSimpleConnection(const Connection& con, - bool is_object_property_conn, - const char* name, - const Element& element, - const char** propNameOut = nullptr) -{ - if (is_object_property_conn && !con.PropertyName().length()) { - DOMWarning("expected incoming " + std::string(name) + - " link to be an object-object connection, ignoring", - &element - ); - return nullptr; - } - else if (!is_object_property_conn && con.PropertyName().length()) { - DOMWarning("expected incoming " + std::string(name) + - " link to be an object-property connection, ignoring", - &element - ); - return nullptr; - } - - if(is_object_property_conn && propNameOut) { - // note: this is ok, the return value of PropertyValue() is guaranteed to - // remain valid and unchanged as long as the document exists. - *propNameOut = con.PropertyName().c_str(); - } - - const Object* const ob = con.SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for incoming " + std::string(name) + - " link, ignoring", - &element); - return nullptr; - } - - return dynamic_cast<const T*>(ob); -} - -} //!Util -} //!FBX -} //!Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXExportNode.cpp b/thirdparty/assimp/code/FBX/FBXExportNode.cpp deleted file mode 100644 index 06c89cee46..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExportNode.cpp +++ /dev/null @@ -1,571 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#ifndef ASSIMP_BUILD_NO_EXPORT -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include "FBXExportNode.h" -#include "FBXCommon.h" - -#include <assimp/StreamWriter.h> // StreamWriterLE -#include <assimp/Exceptional.h> // DeadlyExportError -#include <assimp/ai_assert.h> -#include <assimp/StringUtils.h> // ai_snprintf - -#include <string> -#include <ostream> -#include <sstream> // ostringstream -#include <memory> // shared_ptr - -namespace Assimp { -// AddP70<type> helpers... there's no usable pattern here, -// so all are defined as separate functions. -// Even "animatable" properties are often completely different -// from the standard (nonanimated) property definition, -// so they are specified with an 'A' suffix. - -void FBX::Node::AddP70int( - const std::string& name, int32_t value -) { - FBX::Node n("P"); - n.AddProperties(name, "int", "Integer", "", value); - AddChild(n); -} - -void FBX::Node::AddP70bool( - const std::string& name, bool value -) { - FBX::Node n("P"); - n.AddProperties(name, "bool", "", "", int32_t(value)); - AddChild(n); -} - -void FBX::Node::AddP70double( - const std::string& name, double value -) { - FBX::Node n("P"); - n.AddProperties(name, "double", "Number", "", value); - AddChild(n); -} - -void FBX::Node::AddP70numberA( - const std::string& name, double value -) { - FBX::Node n("P"); - n.AddProperties(name, "Number", "", "A", value); - AddChild(n); -} - -void FBX::Node::AddP70color( - const std::string& name, double r, double g, double b -) { - FBX::Node n("P"); - n.AddProperties(name, "ColorRGB", "Color", "", r, g, b); - AddChild(n); -} - -void FBX::Node::AddP70colorA( - const std::string& name, double r, double g, double b -) { - FBX::Node n("P"); - n.AddProperties(name, "Color", "", "A", r, g, b); - AddChild(n); -} - -void FBX::Node::AddP70vector( - const std::string& name, double x, double y, double z -) { - FBX::Node n("P"); - n.AddProperties(name, "Vector3D", "Vector", "", x, y, z); - AddChild(n); -} - -void FBX::Node::AddP70vectorA( - const std::string& name, double x, double y, double z -) { - FBX::Node n("P"); - n.AddProperties(name, "Vector", "", "A", x, y, z); - AddChild(n); -} - -void FBX::Node::AddP70string( - const std::string& name, const std::string& value -) { - FBX::Node n("P"); - n.AddProperties(name, "KString", "", "", value); - AddChild(n); -} - -void FBX::Node::AddP70enum( - const std::string& name, int32_t value -) { - FBX::Node n("P"); - n.AddProperties(name, "enum", "", "", value); - AddChild(n); -} - -void FBX::Node::AddP70time( - const std::string& name, int64_t value -) { - FBX::Node n("P"); - n.AddProperties(name, "KTime", "Time", "", value); - AddChild(n); -} - - -// public member functions for writing nodes to stream - -void FBX::Node::Dump( - std::shared_ptr<Assimp::IOStream> outfile, - bool binary, int indent -) { - if (binary) { - Assimp::StreamWriterLE outstream(outfile); - DumpBinary(outstream); - } else { - std::ostringstream ss; - DumpAscii(ss, indent); - std::string s = ss.str(); - outfile->Write(s.c_str(), s.size(), 1); - } -} - -void FBX::Node::Dump( - Assimp::StreamWriterLE &outstream, - bool binary, int indent -) { - if (binary) { - DumpBinary(outstream); - } else { - std::ostringstream ss; - DumpAscii(ss, indent); - outstream.PutString(ss.str()); - } -} - - -// public member functions for low-level writing - -void FBX::Node::Begin( - Assimp::StreamWriterLE &s, - bool binary, int indent -) { - if (binary) { - BeginBinary(s); - } else { - // assume we're at the correct place to start already - (void)indent; - std::ostringstream ss; - BeginAscii(ss, indent); - s.PutString(ss.str()); - } -} - -void FBX::Node::DumpProperties( - Assimp::StreamWriterLE& s, - bool binary, int indent -) { - if (binary) { - DumpPropertiesBinary(s); - } else { - std::ostringstream ss; - DumpPropertiesAscii(ss, indent); - s.PutString(ss.str()); - } -} - -void FBX::Node::EndProperties( - Assimp::StreamWriterLE &s, - bool binary, int indent -) { - EndProperties(s, binary, indent, properties.size()); -} - -void FBX::Node::EndProperties( - Assimp::StreamWriterLE &s, - bool binary, int indent, - size_t num_properties -) { - if (binary) { - EndPropertiesBinary(s, num_properties); - } else { - // nothing to do - (void)indent; - } -} - -void FBX::Node::BeginChildren( - Assimp::StreamWriterLE &s, - bool binary, int indent -) { - if (binary) { - // nothing to do - } else { - std::ostringstream ss; - BeginChildrenAscii(ss, indent); - s.PutString(ss.str()); - } -} - -void FBX::Node::DumpChildren( - Assimp::StreamWriterLE& s, - bool binary, int indent -) { - if (binary) { - DumpChildrenBinary(s); - } else { - std::ostringstream ss; - DumpChildrenAscii(ss, indent); - if (ss.tellp() > 0) - s.PutString(ss.str()); - } -} - -void FBX::Node::End( - Assimp::StreamWriterLE &s, - bool binary, int indent, - bool has_children -) { - if (binary) { - EndBinary(s, has_children); - } else { - std::ostringstream ss; - EndAscii(ss, indent, has_children); - if (ss.tellp() > 0) - s.PutString(ss.str()); - } -} - - -// public member functions for writing to binary fbx - -void FBX::Node::DumpBinary(Assimp::StreamWriterLE &s) -{ - // write header section (with placeholders for some things) - BeginBinary(s); - - // write properties - DumpPropertiesBinary(s); - - // go back and fill in property related placeholders - EndPropertiesBinary(s, properties.size()); - - // write children - DumpChildrenBinary(s); - - // finish, filling in end offset placeholder - EndBinary(s, force_has_children || !children.empty()); -} - - -// public member functions for writing to ascii fbx - -void FBX::Node::DumpAscii(std::ostream &s, int indent) -{ - // write name - BeginAscii(s, indent); - - // write properties - DumpPropertiesAscii(s, indent); - - if (force_has_children || !children.empty()) { - // begin children (with a '{') - BeginChildrenAscii(s, indent + 1); - // write children - DumpChildrenAscii(s, indent + 1); - } - - // finish (also closing the children bracket '}') - EndAscii(s, indent, force_has_children || !children.empty()); -} - - -// private member functions for low-level writing to fbx - -void FBX::Node::BeginBinary(Assimp::StreamWriterLE &s) -{ - // remember start pos so we can come back and write the end pos - this->start_pos = s.Tell(); - - // placeholders for end pos and property section info - s.PutU4(0); // end pos - s.PutU4(0); // number of properties - s.PutU4(0); // total property section length - - // node name - s.PutU1(uint8_t(name.size())); // length of node name - s.PutString(name); // node name as raw bytes - - // property data comes after here - this->property_start = s.Tell(); -} - -void FBX::Node::DumpPropertiesBinary(Assimp::StreamWriterLE& s) -{ - for (auto &p : properties) { - p.DumpBinary(s); - } -} - -void FBX::Node::EndPropertiesBinary( - Assimp::StreamWriterLE &s, - size_t num_properties -) { - if (num_properties == 0) { return; } - size_t pos = s.Tell(); - ai_assert(pos > property_start); - size_t property_section_size = pos - property_start; - s.Seek(start_pos + 4); - s.PutU4(uint32_t(num_properties)); - s.PutU4(uint32_t(property_section_size)); - s.Seek(pos); -} - -void FBX::Node::DumpChildrenBinary(Assimp::StreamWriterLE& s) -{ - for (FBX::Node& child : children) { - child.DumpBinary(s); - } -} - -void FBX::Node::EndBinary( - Assimp::StreamWriterLE &s, - bool has_children -) { - // if there were children, add a null record - if (has_children) { s.PutString(Assimp::FBX::NULL_RECORD); } - - // now go back and write initial pos - this->end_pos = s.Tell(); - s.Seek(start_pos); - s.PutU4(uint32_t(end_pos)); - s.Seek(end_pos); -} - - -void FBX::Node::BeginAscii(std::ostream& s, int indent) -{ - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << name << ": "; -} - -void FBX::Node::DumpPropertiesAscii(std::ostream &s, int indent) -{ - for (size_t i = 0; i < properties.size(); ++i) { - if (i > 0) { s << ", "; } - properties[i].DumpAscii(s, indent); - } -} - -void FBX::Node::BeginChildrenAscii(std::ostream& s, int indent) -{ - // only call this if there are actually children - s << " {"; - (void)indent; -} - -void FBX::Node::DumpChildrenAscii(std::ostream& s, int indent) -{ - // children will need a lot of padding and corralling - if (children.size() || force_has_children) { - for (size_t i = 0; i < children.size(); ++i) { - // no compression in ascii files, so skip this node if it exists - if (children[i].name == "EncryptionType") { continue; } - // the child can dump itself - children[i].DumpAscii(s, indent); - } - } -} - -void FBX::Node::EndAscii(std::ostream& s, int indent, bool has_children) -{ - if (!has_children) { return; } // nothing to do - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << "}"; -} - -// private helpers for static member functions - -// ascii property node from vector of doubles -void FBX::Node::WritePropertyNodeAscii( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s, - int indent -){ - char buffer[32]; - FBX::Node node(name); - node.Begin(s, false, indent); - std::string vsize = to_string(v.size()); - // *<size> { - s.PutChar('*'); s.PutString(vsize); s.PutString(" {\n"); - // indent + 1 - for (int i = 0; i < indent + 1; ++i) { s.PutChar('\t'); } - // a: value,value,value,... - s.PutString("a: "); - int count = 0; - for (size_t i = 0; i < v.size(); ++i) { - if (i > 0) { s.PutChar(','); } - int len = ai_snprintf(buffer, sizeof(buffer), "%f", v[i]); - count += len; - if (count > 2048) { s.PutChar('\n'); count = 0; } - if (len < 0 || len > 31) { - // this should never happen - throw DeadlyExportError("failed to convert double to string"); - } - for (int j = 0; j < len; ++j) { s.PutChar(buffer[j]); } - } - // } - s.PutChar('\n'); - for (int i = 0; i < indent; ++i) { s.PutChar('\t'); } - s.PutChar('}'); s.PutChar(' '); - node.End(s, false, indent, false); -} - -// ascii property node from vector of int32_t -void FBX::Node::WritePropertyNodeAscii( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s, - int indent -){ - char buffer[32]; - FBX::Node node(name); - node.Begin(s, false, indent); - std::string vsize = to_string(v.size()); - // *<size> { - s.PutChar('*'); s.PutString(vsize); s.PutString(" {\n"); - // indent + 1 - for (int i = 0; i < indent + 1; ++i) { s.PutChar('\t'); } - // a: value,value,value,... - s.PutString("a: "); - int count = 0; - for (size_t i = 0; i < v.size(); ++i) { - if (i > 0) { s.PutChar(','); } - int len = ai_snprintf(buffer, sizeof(buffer), "%d", v[i]); - count += len; - if (count > 2048) { s.PutChar('\n'); count = 0; } - if (len < 0 || len > 31) { - // this should never happen - throw DeadlyExportError("failed to convert double to string"); - } - for (int j = 0; j < len; ++j) { s.PutChar(buffer[j]); } - } - // } - s.PutChar('\n'); - for (int i = 0; i < indent; ++i) { s.PutChar('\t'); } - s.PutChar('}'); s.PutChar(' '); - node.End(s, false, indent, false); -} - -// binary property node from vector of doubles -// TODO: optional zip compression! -void FBX::Node::WritePropertyNodeBinary( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s -){ - FBX::Node node(name); - node.BeginBinary(s); - s.PutU1('d'); - s.PutU4(uint32_t(v.size())); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - s.PutU4(uint32_t(v.size()) * 8); // data size - for (auto it = v.begin(); it != v.end(); ++it) { s.PutF8(*it); } - node.EndPropertiesBinary(s, 1); - node.EndBinary(s, false); -} - -// binary property node from vector of int32_t -// TODO: optional zip compression! -void FBX::Node::WritePropertyNodeBinary( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s -){ - FBX::Node node(name); - node.BeginBinary(s); - s.PutU1('i'); - s.PutU4(uint32_t(v.size())); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - s.PutU4(uint32_t(v.size()) * 4); // data size - for (auto it = v.begin(); it != v.end(); ++it) { s.PutI4(*it); } - node.EndPropertiesBinary(s, 1); - node.EndBinary(s, false); -} - -// public static member functions - -// convenience function to create and write a property node, -// holding a single property which is an array of values. -// does not copy the data, so is efficient for large arrays. -void FBX::Node::WritePropertyNode( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s, - bool binary, int indent -){ - if (binary) { - FBX::Node::WritePropertyNodeBinary(name, v, s); - } else { - FBX::Node::WritePropertyNodeAscii(name, v, s, indent); - } -} - -// convenience function to create and write a property node, -// holding a single property which is an array of values. -// does not copy the data, so is efficient for large arrays. -void FBX::Node::WritePropertyNode( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s, - bool binary, int indent -){ - if (binary) { - FBX::Node::WritePropertyNodeBinary(name, v, s); - } else { - FBX::Node::WritePropertyNodeAscii(name, v, s, indent); - } -} -} -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER -#endif // ASSIMP_BUILD_NO_EXPORT diff --git a/thirdparty/assimp/code/FBX/FBXExportNode.h b/thirdparty/assimp/code/FBX/FBXExportNode.h deleted file mode 100644 index ef3bc781a4..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExportNode.h +++ /dev/null @@ -1,271 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXExportNode.h -* Declares the FBX::Node helper class for fbx export. -*/ -#ifndef AI_FBXEXPORTNODE_H_INC -#define AI_FBXEXPORTNODE_H_INC - -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include "FBXExportProperty.h" - -#include <assimp/StreamWriter.h> // StreamWriterLE - -#include <string> -#include <vector> - -namespace Assimp { -namespace FBX { - class Node; -} - -class FBX::Node { -public: - // TODO: accessors - std::string name; // node name - std::vector<FBX::FBXExportProperty> properties; // node properties - std::vector<FBX::Node> children; // child nodes - - // some nodes always pretend they have children... - bool force_has_children = false; - -public: // constructors - /// The default class constructor. - Node() = default; - - /// The class constructor with the name. - Node(const std::string& n) - : name(n) - , properties() - , children() - , force_has_children( false ) { - // empty - } - - // convenience template to construct with properties directly - template <typename... More> - Node(const std::string& n, const More... more) - : name(n) - , properties() - , children() - , force_has_children(false) { - AddProperties(more...); - } - -public: // functions to add properties or children - // add a single property to the node - template <typename T> - void AddProperty(T value) { - properties.emplace_back(value); - } - - // convenience function to add multiple properties at once - template <typename T, typename... More> - void AddProperties(T value, More... more) { - properties.emplace_back(value); - AddProperties(more...); - } - void AddProperties() {} - - // add a child node directly - void AddChild(const Node& node) { children.push_back(node); } - - // convenience function to add a child node with a single property - template <typename... More> - void AddChild( - const std::string& name, - More... more - ) { - FBX::Node c(name); - c.AddProperties(more...); - children.push_back(c); - } - -public: // support specifically for dealing with Properties70 nodes - - // it really is simpler to make these all separate functions. - // the versions with 'A' suffixes are for animatable properties. - // those often follow a completely different format internally in FBX. - void AddP70int(const std::string& name, int32_t value); - void AddP70bool(const std::string& name, bool value); - void AddP70double(const std::string& name, double value); - void AddP70numberA(const std::string& name, double value); - void AddP70color(const std::string& name, double r, double g, double b); - void AddP70colorA(const std::string& name, double r, double g, double b); - void AddP70vector(const std::string& name, double x, double y, double z); - void AddP70vectorA(const std::string& name, double x, double y, double z); - void AddP70string(const std::string& name, const std::string& value); - void AddP70enum(const std::string& name, int32_t value); - void AddP70time(const std::string& name, int64_t value); - - // template for custom P70 nodes. - // anything that doesn't fit in the above can be created manually. - template <typename... More> - void AddP70( - const std::string& name, - const std::string& type, - const std::string& type2, - const std::string& flags, - More... more - ) { - Node n("P"); - n.AddProperties(name, type, type2, flags, more...); - AddChild(n); - } - -public: // member functions for writing data to a file or stream - - // write the full node to the given file or stream - void Dump( - std::shared_ptr<Assimp::IOStream> outfile, - bool binary, int indent - ); - void Dump(Assimp::StreamWriterLE &s, bool binary, int indent); - - // these other functions are for writing data piece by piece. - // they must be used carefully. - // for usage examples see FBXExporter.cpp. - void Begin(Assimp::StreamWriterLE &s, bool binary, int indent); - void DumpProperties(Assimp::StreamWriterLE& s, bool binary, int indent); - void EndProperties(Assimp::StreamWriterLE &s, bool binary, int indent); - void EndProperties( - Assimp::StreamWriterLE &s, bool binary, int indent, - size_t num_properties - ); - void BeginChildren(Assimp::StreamWriterLE &s, bool binary, int indent); - void DumpChildren(Assimp::StreamWriterLE& s, bool binary, int indent); - void End( - Assimp::StreamWriterLE &s, bool binary, int indent, - bool has_children - ); - -private: // internal functions used for writing - - void DumpBinary(Assimp::StreamWriterLE &s); - void DumpAscii(Assimp::StreamWriterLE &s, int indent); - void DumpAscii(std::ostream &s, int indent); - - void BeginBinary(Assimp::StreamWriterLE &s); - void DumpPropertiesBinary(Assimp::StreamWriterLE& s); - void EndPropertiesBinary(Assimp::StreamWriterLE &s); - void EndPropertiesBinary(Assimp::StreamWriterLE &s, size_t num_properties); - void DumpChildrenBinary(Assimp::StreamWriterLE& s); - void EndBinary(Assimp::StreamWriterLE &s, bool has_children); - - void BeginAscii(std::ostream &s, int indent); - void DumpPropertiesAscii(std::ostream &s, int indent); - void BeginChildrenAscii(std::ostream &s, int indent); - void DumpChildrenAscii(std::ostream &s, int indent); - void EndAscii(std::ostream &s, int indent, bool has_children); - -private: // data used for binary dumps - size_t start_pos; // starting position in stream - size_t end_pos; // ending position in stream - size_t property_start; // starting position of property section - -public: // static member functions - - // convenience function to create a node with a single property, - // and write it to the stream. - template <typename T> - static void WritePropertyNode( - const std::string& name, - const T value, - Assimp::StreamWriterLE& s, - bool binary, int indent - ) { - FBX::FBXExportProperty p(value); - FBX::Node node(name, p); - node.Dump(s, binary, indent); - } - - // convenience function to create and write a property node, - // holding a single property which is an array of values. - // does not copy the data, so is efficient for large arrays. - static void WritePropertyNode( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s, - bool binary, int indent - ); - - // convenience function to create and write a property node, - // holding a single property which is an array of values. - // does not copy the data, so is efficient for large arrays. - static void WritePropertyNode( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s, - bool binary, int indent - ); - -private: // static helper functions - static void WritePropertyNodeAscii( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s, - int indent - ); - static void WritePropertyNodeAscii( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s, - int indent - ); - static void WritePropertyNodeBinary( - const std::string& name, - const std::vector<double>& v, - Assimp::StreamWriterLE& s - ); - static void WritePropertyNodeBinary( - const std::string& name, - const std::vector<int32_t>& v, - Assimp::StreamWriterLE& s - ); - -}; -} - -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER - -#endif // AI_FBXEXPORTNODE_H_INC diff --git a/thirdparty/assimp/code/FBX/FBXExportProperty.cpp b/thirdparty/assimp/code/FBX/FBXExportProperty.cpp deleted file mode 100644 index f2a63b72b9..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExportProperty.cpp +++ /dev/null @@ -1,385 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#ifndef ASSIMP_BUILD_NO_EXPORT -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include "FBXExportProperty.h" - -#include <assimp/StreamWriter.h> // StreamWriterLE -#include <assimp/Exceptional.h> // DeadlyExportError - -#include <string> -#include <vector> -#include <ostream> -#include <locale> -#include <sstream> // ostringstream - -namespace Assimp { -namespace FBX { - -// constructors for single element properties - -FBXExportProperty::FBXExportProperty(bool v) -: type('C') -, data(1, uint8_t(v)) {} - -FBXExportProperty::FBXExportProperty(int16_t v) -: type('Y') -, data(2) { - uint8_t* d = data.data(); - (reinterpret_cast<int16_t*>(d))[0] = v; -} - -FBXExportProperty::FBXExportProperty(int32_t v) -: type('I') -, data(4) { - uint8_t* d = data.data(); - (reinterpret_cast<int32_t*>(d))[0] = v; -} - -FBXExportProperty::FBXExportProperty(float v) -: type('F') -, data(4) { - uint8_t* d = data.data(); - (reinterpret_cast<float*>(d))[0] = v; -} - -FBXExportProperty::FBXExportProperty(double v) -: type('D') -, data(8) { - uint8_t* d = data.data(); - (reinterpret_cast<double*>(d))[0] = v; -} - -FBXExportProperty::FBXExportProperty(int64_t v) -: type('L') -, data(8) { - uint8_t* d = data.data(); - (reinterpret_cast<int64_t*>(d))[0] = v; -} - -// constructors for array-type properties - -FBXExportProperty::FBXExportProperty(const char* c, bool raw) -: FBXExportProperty(std::string(c), raw) { - // empty -} - -// strings can either be saved as "raw" (R) data, or "string" (S) data -FBXExportProperty::FBXExportProperty(const std::string& s, bool raw) -: type(raw ? 'R' : 'S') -, data(s.size()) { - for (size_t i = 0; i < s.size(); ++i) { - data[i] = uint8_t(s[i]); - } -} - -FBXExportProperty::FBXExportProperty(const std::vector<uint8_t>& r) -: type('R') -, data(r) { - // empty -} - -FBXExportProperty::FBXExportProperty(const std::vector<int32_t>& va) -: type('i') -, data(4 * va.size() ) { - int32_t* d = reinterpret_cast<int32_t*>(data.data()); - for (size_t i = 0; i < va.size(); ++i) { - d[i] = va[i]; - } -} - -FBXExportProperty::FBXExportProperty(const std::vector<int64_t>& va) -: type('l') -, data(8 * va.size()) { - int64_t* d = reinterpret_cast<int64_t*>(data.data()); - for (size_t i = 0; i < va.size(); ++i) { - d[i] = va[i]; - } -} - -FBXExportProperty::FBXExportProperty(const std::vector<float>& va) -: type('f') -, data(4 * va.size()) { - float* d = reinterpret_cast<float*>(data.data()); - for (size_t i = 0; i < va.size(); ++i) { - d[i] = va[i]; - } -} - -FBXExportProperty::FBXExportProperty(const std::vector<double>& va) -: type('d') -, data(8 * va.size()) { - double* d = reinterpret_cast<double*>(data.data()); - for (size_t i = 0; i < va.size(); ++i) { - d[i] = va[i]; - } -} - -FBXExportProperty::FBXExportProperty(const aiMatrix4x4& vm) -: type('d') -, data(8 * 16) { - double* d = reinterpret_cast<double*>(data.data()); - for (unsigned int c = 0; c < 4; ++c) { - for (unsigned int r = 0; r < 4; ++r) { - d[4 * c + r] = vm[r][c]; - } - } -} - -// public member functions - -size_t FBXExportProperty::size() { - switch (type) { - case 'C': - case 'Y': - case 'I': - case 'F': - case 'D': - case 'L': - return data.size() + 1; - case 'S': - case 'R': - return data.size() + 5; - case 'i': - case 'd': - return data.size() + 13; - default: - throw DeadlyExportError("Requested size on property of unknown type"); - } -} - -void FBXExportProperty::DumpBinary(Assimp::StreamWriterLE& s) { - s.PutU1(type); - uint8_t* d = data.data(); - size_t N; - switch (type) { - case 'C': s.PutU1(*(reinterpret_cast<uint8_t*>(d))); return; - case 'Y': s.PutI2(*(reinterpret_cast<int16_t*>(d))); return; - case 'I': s.PutI4(*(reinterpret_cast<int32_t*>(d))); return; - case 'F': s.PutF4(*(reinterpret_cast<float*>(d))); return; - case 'D': s.PutF8(*(reinterpret_cast<double*>(d))); return; - case 'L': s.PutI8(*(reinterpret_cast<int64_t*>(d))); return; - case 'S': - case 'R': - s.PutU4(uint32_t(data.size())); - for (size_t i = 0; i < data.size(); ++i) { s.PutU1(data[i]); } - return; - case 'i': - N = data.size() / 4; - s.PutU4(uint32_t(N)); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - // TODO: compress if large? - s.PutU4(uint32_t(data.size())); // data size - for (size_t i = 0; i < N; ++i) { - s.PutI4((reinterpret_cast<int32_t*>(d))[i]); - } - return; - case 'l': - N = data.size() / 8; - s.PutU4(uint32_t(N)); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - // TODO: compress if large? - s.PutU4(uint32_t(data.size())); // data size - for (size_t i = 0; i < N; ++i) { - s.PutI8((reinterpret_cast<int64_t*>(d))[i]); - } - return; - case 'f': - N = data.size() / 4; - s.PutU4(uint32_t(N)); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - // TODO: compress if large? - s.PutU4(uint32_t(data.size())); // data size - for (size_t i = 0; i < N; ++i) { - s.PutF4((reinterpret_cast<float*>(d))[i]); - } - return; - case 'd': - N = data.size() / 8; - s.PutU4(uint32_t(N)); // number of elements - s.PutU4(0); // no encoding (1 would be zip-compressed) - // TODO: compress if large? - s.PutU4(uint32_t(data.size())); // data size - for (size_t i = 0; i < N; ++i) { - s.PutF8((reinterpret_cast<double*>(d))[i]); - } - return; - default: - std::ostringstream err; - err << "Tried to dump property with invalid type '"; - err << type << "'!"; - throw DeadlyExportError(err.str()); - } -} - -void FBXExportProperty::DumpAscii(Assimp::StreamWriterLE& outstream, int indent) { - std::ostringstream ss; - ss.imbue(std::locale::classic()); - ss.precision(15); // this seems to match official FBX SDK exports - DumpAscii(ss, indent); - outstream.PutString(ss.str()); -} - -void FBXExportProperty::DumpAscii(std::ostream& s, int indent) { - // no writing type... or anything. just shove it into the stream. - uint8_t* d = data.data(); - size_t N; - size_t swap = data.size(); - size_t count = 0; - switch (type) { - case 'C': - if (*(reinterpret_cast<uint8_t*>(d))) { s << 'T'; } - else { s << 'F'; } - return; - case 'Y': s << *(reinterpret_cast<int16_t*>(d)); return; - case 'I': s << *(reinterpret_cast<int32_t*>(d)); return; - case 'F': s << *(reinterpret_cast<float*>(d)); return; - case 'D': s << *(reinterpret_cast<double*>(d)); return; - case 'L': s << *(reinterpret_cast<int64_t*>(d)); return; - case 'S': - // first search to see if it has "\x00\x01" in it - - // which separates fields which are reversed in the ascii version. - // yeah. - // FBX, yeah. - for (size_t i = 0; i < data.size(); ++i) { - if (data[i] == '\0') { - swap = i; - break; - } - } - case 'R': - s << '"'; - // we might as well check this now, - // probably it will never happen - for (size_t i = 0; i < data.size(); ++i) { - char c = data[i]; - if (c == '"') { - throw runtime_error("can't handle quotes in property string"); - } - } - // first write the SWAPPED member (if any) - for (size_t i = swap + 2; i < data.size(); ++i) { - char c = data[i]; - s << c; - } - // then a separator - if (swap != data.size()) { - s << "::"; - } - // then the initial member - for (size_t i = 0; i < swap; ++i) { - char c = data[i]; - s << c; - } - s << '"'; - return; - case 'i': - N = data.size() / 4; // number of elements - s << '*' << N << " {\n"; - for (int i = 0; i < indent + 1; ++i) { s << '\t'; } - s << "a: "; - for (size_t i = 0; i < N; ++i) { - if (i > 0) { s << ','; } - if (count++ > 120) { s << '\n'; count = 0; } - s << (reinterpret_cast<int32_t*>(d))[i]; - } - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << "} "; - return; - case 'l': - N = data.size() / 8; - s << '*' << N << " {\n"; - for (int i = 0; i < indent + 1; ++i) { s << '\t'; } - s << "a: "; - for (size_t i = 0; i < N; ++i) { - if (i > 0) { s << ','; } - if (count++ > 120) { s << '\n'; count = 0; } - s << (reinterpret_cast<int64_t*>(d))[i]; - } - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << "} "; - return; - case 'f': - N = data.size() / 4; - s << '*' << N << " {\n"; - for (int i = 0; i < indent + 1; ++i) { s << '\t'; } - s << "a: "; - for (size_t i = 0; i < N; ++i) { - if (i > 0) { s << ','; } - if (count++ > 120) { s << '\n'; count = 0; } - s << (reinterpret_cast<float*>(d))[i]; - } - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << "} "; - return; - case 'd': - N = data.size() / 8; - s << '*' << N << " {\n"; - for (int i = 0; i < indent + 1; ++i) { s << '\t'; } - s << "a: "; - // set precision to something that can handle doubles - s.precision(15); - for (size_t i = 0; i < N; ++i) { - if (i > 0) { s << ','; } - if (count++ > 120) { s << '\n'; count = 0; } - s << (reinterpret_cast<double*>(d))[i]; - } - s << '\n'; - for (int i = 0; i < indent; ++i) { s << '\t'; } - s << "} "; - return; - default: - std::ostringstream err; - err << "Tried to dump property with invalid type '"; - err << type << "'!"; - throw runtime_error(err.str()); - } -} - -} // Namespace FBX -} // Namespace Assimp - -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER -#endif // ASSIMP_BUILD_NO_EXPORT diff --git a/thirdparty/assimp/code/FBX/FBXExportProperty.h b/thirdparty/assimp/code/FBX/FBXExportProperty.h deleted file mode 100644 index d692fe6ee3..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExportProperty.h +++ /dev/null @@ -1,129 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXExportProperty.h -* Declares the FBX::Property helper class for fbx export. -*/ -#ifndef AI_FBXEXPORTPROPERTY_H_INC -#define AI_FBXEXPORTPROPERTY_H_INC - -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include <assimp/types.h> // aiMatrix4x4 -#include <assimp/StreamWriter.h> // StreamWriterLE - -#include <string> -#include <vector> -#include <ostream> -#include <type_traits> // is_void - -namespace Assimp { -namespace FBX { - -/** @brief FBX::Property - * - * Holds a value of any of FBX's recognized types, - * each represented by a particular one-character code. - * C : 1-byte uint8, usually 0x00 or 0x01 to represent boolean false and true - * Y : 2-byte int16 - * I : 4-byte int32 - * F : 4-byte float - * D : 8-byte double - * L : 8-byte int64 - * i : array of int32 - * f : array of float - * d : array of double - * l : array of int64 - * b : array of 1-byte booleans (0x00 or 0x01) - * S : string (array of 1-byte char) - * R : raw data (array of bytes) - */ -class FBXExportProperty { -public: - // constructors for basic types. - // all explicit to avoid accidental typecasting - explicit FBXExportProperty(bool v); - // TODO: determine if there is actually a byte type, - // or if this always means <bool>. 'C' seems to imply <char>, - // so possibly the above was intended to represent both. - explicit FBXExportProperty(int16_t v); - explicit FBXExportProperty(int32_t v); - explicit FBXExportProperty(float v); - explicit FBXExportProperty(double v); - explicit FBXExportProperty(int64_t v); - // strings can either be stored as 'R' (raw) or 'S' (string) type - explicit FBXExportProperty(const char* c, bool raw = false); - explicit FBXExportProperty(const std::string& s, bool raw = false); - explicit FBXExportProperty(const std::vector<uint8_t>& r); - explicit FBXExportProperty(const std::vector<int32_t>& va); - explicit FBXExportProperty(const std::vector<int64_t>& va); - explicit FBXExportProperty(const std::vector<double>& va); - explicit FBXExportProperty(const std::vector<float>& va); - explicit FBXExportProperty(const aiMatrix4x4& vm); - - // this will catch any type not defined above, - // so that we don't accidentally convert something we don't want. - // for example (const char*) --> (bool)... seriously wtf C++ - template <class T> - explicit FBXExportProperty(T v) : type('X') { - static_assert(std::is_void<T>::value, "TRIED TO CREATE FBX PROPERTY WITH UNSUPPORTED TYPE, CHECK YOUR PROPERTY INSTANTIATION"); - } // note: no line wrap so it appears verbatim on the compiler error - - // the size of this property node in a binary file, in bytes - size_t size(); - - // write this property node as binary data to the given stream - void DumpBinary(Assimp::StreamWriterLE& s); - void DumpAscii(Assimp::StreamWriterLE& s, int indent = 0); - void DumpAscii(std::ostream& s, int indent = 0); - // note: make sure the ostream is in classic "C" locale - -private: - char type; - std::vector<uint8_t> data; -}; - -} // Namespace FBX -} // Namespace Assimp - -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER - -#endif // AI_FBXEXPORTPROPERTY_H_INC diff --git a/thirdparty/assimp/code/FBX/FBXExporter.cpp b/thirdparty/assimp/code/FBX/FBXExporter.cpp deleted file mode 100644 index 9316dc4f02..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExporter.cpp +++ /dev/null @@ -1,2556 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#ifndef ASSIMP_BUILD_NO_EXPORT -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include "FBXExporter.h" -#include "FBXExportNode.h" -#include "FBXExportProperty.h" -#include "FBXCommon.h" -#include "FBXUtil.h" - -#include <assimp/version.h> // aiGetVersion -#include <assimp/IOSystem.hpp> -#include <assimp/Exporter.hpp> -#include <assimp/DefaultLogger.hpp> -#include <assimp/StreamWriter.h> // StreamWriterLE -#include <assimp/Exceptional.h> // DeadlyExportError -#include <assimp/material.h> // aiTextureType -#include <assimp/scene.h> -#include <assimp/mesh.h> - -// Header files, standard library. -#include <memory> // shared_ptr -#include <string> -#include <sstream> // stringstream -#include <ctime> // localtime, tm_* -#include <map> -#include <set> -#include <vector> -#include <array> -#include <unordered_set> -#include <numeric> - -// RESOURCES: -// https://code.blender.org/2013/08/fbx-binary-file-format-specification/ -// https://wiki.blender.org/index.php/User:Mont29/Foundation/FBX_File_Structure - -const ai_real DEG = ai_real( 57.29577951308232087679815481 ); // degrees per radian - -using namespace Assimp; -using namespace Assimp::FBX; - -// some constants that we'll use for writing metadata -namespace Assimp { -namespace FBX { - const std::string EXPORT_VERSION_STR = "7.4.0"; - const uint32_t EXPORT_VERSION_INT = 7400; // 7.4 == 2014/2015 - // FBX files have some hashed values that depend on the creation time field, - // but for now we don't actually know how to generate these. - // what we can do is set them to a known-working version. - // this is the data that Blender uses in their FBX export process. - const std::string GENERIC_CTIME = "1970-01-01 10:00:00:000"; - const std::string GENERIC_FILEID = - "\x28\xb3\x2a\xeb\xb6\x24\xcc\xc2\xbf\xc8\xb0\x2a\xa9\x2b\xfc\xf1"; - const std::string GENERIC_FOOTID = - "\xfa\xbc\xab\x09\xd0\xc8\xd4\x66\xb1\x76\xfb\x83\x1c\xf7\x26\x7e"; - const std::string FOOT_MAGIC = - "\xf8\x5a\x8c\x6a\xde\xf5\xd9\x7e\xec\xe9\x0c\xe3\x75\x8f\x29\x0b"; - const std::string COMMENT_UNDERLINE = - ";------------------------------------------------------------------"; -} - - // --------------------------------------------------------------------- - // Worker function for exporting a scene to binary FBX. - // Prototyped and registered in Exporter.cpp - void ExportSceneFBX ( - const char* pFile, - IOSystem* pIOSystem, - const aiScene* pScene, - const ExportProperties* pProperties - ){ - // initialize the exporter - FBXExporter exporter(pScene, pProperties); - - // perform binary export - exporter.ExportBinary(pFile, pIOSystem); - } - - // --------------------------------------------------------------------- - // Worker function for exporting a scene to ASCII FBX. - // Prototyped and registered in Exporter.cpp - void ExportSceneFBXA ( - const char* pFile, - IOSystem* pIOSystem, - const aiScene* pScene, - const ExportProperties* pProperties - - ){ - // initialize the exporter - FBXExporter exporter(pScene, pProperties); - - // perform ascii export - exporter.ExportAscii(pFile, pIOSystem); - } - -} // end of namespace Assimp - -FBXExporter::FBXExporter ( const aiScene* pScene, const ExportProperties* pProperties ) -: binary(false) -, mScene(pScene) -, mProperties(pProperties) -, outfile() -, connections() -, mesh_uids() -, material_uids() -, node_uids() { - // will probably need to determine UIDs, connections, etc here. - // basically anything that needs to be known - // before we start writing sections to the stream. -} - -void FBXExporter::ExportBinary ( - const char* pFile, - IOSystem* pIOSystem -){ - // remember that we're exporting in binary mode - binary = true; - - // we're not currently using these preferences, - // but clang will cry about it if we never touch it. - // TODO: some of these might be relevant to export - (void)mProperties; - - // open the indicated file for writing (in binary mode) - outfile.reset(pIOSystem->Open(pFile,"wb")); - if (!outfile) { - throw DeadlyExportError( - "could not open output .fbx file: " + std::string(pFile) - ); - } - - // first a binary-specific file header - WriteBinaryHeader(); - - // the rest of the file is in node entries. - // we have to serialize each entry before we write to the output, - // as the first thing we write is the byte offset of the _next_ entry. - // Either that or we can skip back to write the offset when we finish. - WriteAllNodes(); - - // finally we have a binary footer to the file - WriteBinaryFooter(); - - // explicitly release file pointer, - // so we don't have to rely on class destruction. - outfile.reset(); -} - -void FBXExporter::ExportAscii ( - const char* pFile, - IOSystem* pIOSystem -){ - // remember that we're exporting in ascii mode - binary = false; - - // open the indicated file for writing in text mode - outfile.reset(pIOSystem->Open(pFile,"wt")); - if (!outfile) { - throw DeadlyExportError( - "could not open output .fbx file: " + std::string(pFile) - ); - } - - // write the ascii header - WriteAsciiHeader(); - - // write all the sections - WriteAllNodes(); - - // make sure the file ends with a newline. - // note: if the file is opened in text mode, - // this should do the right cross-platform thing. - outfile->Write("\n", 1, 1); - - // explicitly release file pointer, - // so we don't have to rely on class destruction. - outfile.reset(); -} - -void FBXExporter::WriteAsciiHeader() -{ - // basically just a comment at the top of the file - std::stringstream head; - head << "; FBX " << EXPORT_VERSION_STR << " project file\n"; - head << "; Created by the Open Asset Import Library (Assimp)\n"; - head << "; http://assimp.org\n"; - head << "; -------------------------------------------------\n"; - const std::string ascii_header = head.str(); - outfile->Write(ascii_header.c_str(), ascii_header.size(), 1); -} - -void FBXExporter::WriteAsciiSectionHeader(const std::string& title) -{ - StreamWriterLE outstream(outfile); - std::stringstream s; - s << "\n\n; " << title << '\n'; - s << FBX::COMMENT_UNDERLINE << "\n"; - outstream.PutString(s.str()); -} - -void FBXExporter::WriteBinaryHeader() -{ - // first a specific sequence of 23 bytes, always the same - const char binary_header[24] = "Kaydara FBX Binary\x20\x20\x00\x1a\x00"; - outfile->Write(binary_header, 1, 23); - - // then FBX version number, "multiplied" by 1000, as little-endian uint32. - // so 7.3 becomes 7300 == 0x841C0000, 7.4 becomes 7400 == 0xE81C0000, etc - { - StreamWriterLE outstream(outfile); - outstream.PutU4(EXPORT_VERSION_INT); - } // StreamWriter destructor writes the data to the file - - // after this the node data starts immediately - // (probably with the FBXHEaderExtension node) -} - -void FBXExporter::WriteBinaryFooter() -{ - outfile->Write(NULL_RECORD.c_str(), NULL_RECORD.size(), 1); - - outfile->Write(GENERIC_FOOTID.c_str(), GENERIC_FOOTID.size(), 1); - - // here some padding is added for alignment to 16 bytes. - // if already aligned, the full 16 bytes is added. - size_t pos = outfile->Tell(); - size_t pad = 16 - (pos % 16); - for (size_t i = 0; i < pad; ++i) { - outfile->Write("\x00", 1, 1); - } - - // not sure what this is, but it seems to always be 0 in modern files - for (size_t i = 0; i < 4; ++i) { - outfile->Write("\x00", 1, 1); - } - - // now the file version again - { - StreamWriterLE outstream(outfile); - outstream.PutU4(EXPORT_VERSION_INT); - } // StreamWriter destructor writes the data to the file - - // and finally some binary footer added to all files - for (size_t i = 0; i < 120; ++i) { - outfile->Write("\x00", 1, 1); - } - outfile->Write(FOOT_MAGIC.c_str(), FOOT_MAGIC.size(), 1); -} - -void FBXExporter::WriteAllNodes () -{ - // header - // (and fileid, creation time, creator, if binary) - WriteHeaderExtension(); - - // global settings - WriteGlobalSettings(); - - // documents - WriteDocuments(); - - // references - WriteReferences(); - - // definitions - WriteDefinitions(); - - // objects - WriteObjects(); - - // connections - WriteConnections(); - - // WriteTakes? (deprecated since at least 2015 (fbx 7.4)) -} - -//FBXHeaderExtension top-level node -void FBXExporter::WriteHeaderExtension () -{ - if (!binary) { - // no title, follows directly from the top comment - } - FBX::Node n("FBXHeaderExtension"); - StreamWriterLE outstream(outfile); - int indent = 0; - - // begin node - n.Begin(outstream, binary, indent); - - // write properties - // (none) - - // finish properties - n.EndProperties(outstream, binary, indent, 0); - - // begin children - n.BeginChildren(outstream, binary, indent); - - indent = 1; - - // write child nodes - FBX::Node::WritePropertyNode( - "FBXHeaderVersion", int32_t(1003), outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "FBXVersion", int32_t(EXPORT_VERSION_INT), outstream, binary, indent - ); - if (binary) { - FBX::Node::WritePropertyNode( - "EncryptionType", int32_t(0), outstream, binary, indent - ); - } - - FBX::Node CreationTimeStamp("CreationTimeStamp"); - time_t rawtime; - time(&rawtime); - struct tm * now = localtime(&rawtime); - CreationTimeStamp.AddChild("Version", int32_t(1000)); - CreationTimeStamp.AddChild("Year", int32_t(now->tm_year + 1900)); - CreationTimeStamp.AddChild("Month", int32_t(now->tm_mon + 1)); - CreationTimeStamp.AddChild("Day", int32_t(now->tm_mday)); - CreationTimeStamp.AddChild("Hour", int32_t(now->tm_hour)); - CreationTimeStamp.AddChild("Minute", int32_t(now->tm_min)); - CreationTimeStamp.AddChild("Second", int32_t(now->tm_sec)); - CreationTimeStamp.AddChild("Millisecond", int32_t(0)); - CreationTimeStamp.Dump(outstream, binary, indent); - - std::stringstream creator; - creator << "Open Asset Import Library (Assimp) " << aiGetVersionMajor() - << "." << aiGetVersionMinor() << "." << aiGetVersionRevision(); - FBX::Node::WritePropertyNode( - "Creator", creator.str(), outstream, binary, indent - ); - - //FBX::Node sceneinfo("SceneInfo"); - //sceneinfo.AddProperty("GlobalInfo" + FBX::SEPARATOR + "SceneInfo"); - // not sure if any of this is actually needed, - // so just write an empty node for now. - //sceneinfo.Dump(outstream, binary, indent); - - indent = 0; - - // finish node - n.End(outstream, binary, indent, true); - - // that's it for FBXHeaderExtension... - if (!binary) { return; } - - // but binary files also need top-level FileID, CreationTime, Creator: - std::vector<uint8_t> raw(GENERIC_FILEID.size()); - for (size_t i = 0; i < GENERIC_FILEID.size(); ++i) { - raw[i] = uint8_t(GENERIC_FILEID[i]); - } - FBX::Node::WritePropertyNode( - "FileId", raw, outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "CreationTime", GENERIC_CTIME, outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "Creator", creator.str(), outstream, binary, indent - ); -} - -void FBXExporter::WriteGlobalSettings () -{ - if (!binary) { - // no title, follows directly from the header extension - } - FBX::Node gs("GlobalSettings"); - gs.AddChild("Version", int32_t(1000)); - - FBX::Node p("Properties70"); - p.AddP70int("UpAxis", 1); - p.AddP70int("UpAxisSign", 1); - p.AddP70int("FrontAxis", 2); - p.AddP70int("FrontAxisSign", 1); - p.AddP70int("CoordAxis", 0); - p.AddP70int("CoordAxisSign", 1); - p.AddP70int("OriginalUpAxis", 1); - p.AddP70int("OriginalUpAxisSign", 1); - p.AddP70double("UnitScaleFactor", 1.0); - p.AddP70double("OriginalUnitScaleFactor", 1.0); - p.AddP70color("AmbientColor", 0.0, 0.0, 0.0); - p.AddP70string("DefaultCamera", "Producer Perspective"); - p.AddP70enum("TimeMode", 11); - p.AddP70enum("TimeProtocol", 2); - p.AddP70enum("SnapOnFrameMode", 0); - p.AddP70time("TimeSpanStart", 0); // TODO: animation support - p.AddP70time("TimeSpanStop", FBX::SECOND); // TODO: animation support - p.AddP70double("CustomFrameRate", -1.0); - p.AddP70("TimeMarker", "Compound", "", ""); // not sure what this is - p.AddP70int("CurrentTimeMarker", -1); - gs.AddChild(p); - - gs.Dump(outfile, binary, 0); -} - -void FBXExporter::WriteDocuments () -{ - if (!binary) { - WriteAsciiSectionHeader("Documents Description"); - } - - // not sure what the use of multiple documents would be, - // or whether any end-application supports it - FBX::Node docs("Documents"); - docs.AddChild("Count", int32_t(1)); - FBX::Node doc("Document"); - - // generate uid - int64_t uid = generate_uid(); - doc.AddProperties(uid, "", "Scene"); - FBX::Node p("Properties70"); - p.AddP70("SourceObject", "object", "", ""); // what is this even for? - p.AddP70string("ActiveAnimStackName", ""); // should do this properly? - doc.AddChild(p); - - // UID for root node in scene hierarchy. - // always set to 0 in the case of a single document. - // not sure what happens if more than one document exists, - // but that won't matter to us as we're exporting a single scene. - doc.AddChild("RootNode", int64_t(0)); - - docs.AddChild(doc); - docs.Dump(outfile, binary, 0); -} - -void FBXExporter::WriteReferences () -{ - if (!binary) { - WriteAsciiSectionHeader("Document References"); - } - // always empty for now. - // not really sure what this is for. - FBX::Node n("References"); - n.force_has_children = true; - n.Dump(outfile, binary, 0); -} - - -// --------------------------------------------------------------- -// some internal helper functions used for writing the definitions -// (before any actual data is written) -// --------------------------------------------------------------- - -size_t count_nodes(const aiNode* n) { - size_t count = 1; - for (size_t i = 0; i < n->mNumChildren; ++i) { - count += count_nodes(n->mChildren[i]); - } - return count; -} - -bool has_phong_mat(const aiScene* scene) -{ - // just search for any material with a shininess exponent - for (size_t i = 0; i < scene->mNumMaterials; ++i) { - aiMaterial* mat = scene->mMaterials[i]; - float shininess = 0; - mat->Get(AI_MATKEY_SHININESS, shininess); - if (shininess > 0) { - return true; - } - } - return false; -} - -size_t count_images(const aiScene* scene) { - std::unordered_set<std::string> images; - aiString texpath; - for (size_t i = 0; i < scene->mNumMaterials; ++i) { - aiMaterial* mat = scene->mMaterials[i]; - for ( - size_t tt = aiTextureType_DIFFUSE; - tt < aiTextureType_UNKNOWN; - ++tt - ){ - const aiTextureType textype = static_cast<aiTextureType>(tt); - const size_t texcount = mat->GetTextureCount(textype); - for (unsigned int j = 0; j < texcount; ++j) { - mat->GetTexture(textype, j, &texpath); - images.insert(std::string(texpath.C_Str())); - } - } - } - return images.size(); -} - -size_t count_textures(const aiScene* scene) { - size_t count = 0; - for (size_t i = 0; i < scene->mNumMaterials; ++i) { - aiMaterial* mat = scene->mMaterials[i]; - for ( - size_t tt = aiTextureType_DIFFUSE; - tt < aiTextureType_UNKNOWN; - ++tt - ){ - // TODO: handle layered textures - if (mat->GetTextureCount(static_cast<aiTextureType>(tt)) > 0) { - count += 1; - } - } - } - return count; -} - -size_t count_deformers(const aiScene* scene) { - size_t count = 0; - for (size_t i = 0; i < scene->mNumMeshes; ++i) { - const size_t n = scene->mMeshes[i]->mNumBones; - if (n) { - // 1 main deformer, 1 subdeformer per bone - count += n + 1; - } - } - return count; -} - -void FBXExporter::WriteDefinitions () -{ - // basically this is just bookkeeping: - // determining how many of each type of object there are - // and specifying the base properties to use when otherwise unspecified. - - // ascii section header - if (!binary) { - WriteAsciiSectionHeader("Object definitions"); - } - - // we need to count the objects - int32_t count; - int32_t total_count = 0; - - // and store them - std::vector<FBX::Node> object_nodes; - FBX::Node n, pt, p; - - // GlobalSettings - // this seems to always be here in Maya exports - n = FBX::Node("ObjectType", "GlobalSettings"); - count = 1; - n.AddChild("Count", count); - object_nodes.push_back(n); - total_count += count; - - // AnimationStack / FbxAnimStack - // this seems to always be here in Maya exports, - // but no harm seems to come of leaving it out. - count = mScene->mNumAnimations; - if (count) { - n = FBX::Node("ObjectType", "AnimationStack"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxAnimStack"); - p = FBX::Node("Properties70"); - p.AddP70string("Description", ""); - p.AddP70time("LocalStart", 0); - p.AddP70time("LocalStop", 0); - p.AddP70time("ReferenceStart", 0); - p.AddP70time("ReferenceStop", 0); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // AnimationLayer / FbxAnimLayer - // this seems to always be here in Maya exports, - // but no harm seems to come of leaving it out. - // Assimp doesn't support animation layers, - // so there will be one per aiAnimation - count = mScene->mNumAnimations; - if (count) { - n = FBX::Node("ObjectType", "AnimationLayer"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FBXAnimLayer"); - p = FBX::Node("Properties70"); - p.AddP70("Weight", "Number", "", "A", double(100)); - p.AddP70bool("Mute", 0); - p.AddP70bool("Solo", 0); - p.AddP70bool("Lock", 0); - p.AddP70color("Color", 0.8, 0.8, 0.8); - p.AddP70("BlendMode", "enum", "", "", int32_t(0)); - p.AddP70("RotationAccumulationMode", "enum", "", "", int32_t(0)); - p.AddP70("ScaleAccumulationMode", "enum", "", "", int32_t(0)); - p.AddP70("BlendModeBypass", "ULongLong", "", "", int64_t(0)); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // NodeAttribute - // this is completely absurd. - // there can only be one "NodeAttribute" template, - // but FbxSkeleton, FbxCamera, FbxLight all are "NodeAttributes". - // so if only one exists we should set the template for that, - // otherwise... we just pick one :/. - // the others have to set all their properties every instance, - // because there's no template. - count = 1; // TODO: select properly - if (count) { - // FbxSkeleton - n = FBX::Node("ObjectType", "NodeAttribute"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxSkeleton"); - p = FBX::Node("Properties70"); - p.AddP70color("Color", 0.8, 0.8, 0.8); - p.AddP70double("Size", 33.333333333333); - p.AddP70("LimbLength", "double", "Number", "H", double(1)); - // note: not sure what the "H" flag is for - hidden? - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // Model / FbxNode - // <~~ node hierarchy - count = int32_t(count_nodes(mScene->mRootNode)) - 1; // (not counting root node) - if (count) { - n = FBX::Node("ObjectType", "Model"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxNode"); - p = FBX::Node("Properties70"); - p.AddP70enum("QuaternionInterpolate", 0); - p.AddP70vector("RotationOffset", 0.0, 0.0, 0.0); - p.AddP70vector("RotationPivot", 0.0, 0.0, 0.0); - p.AddP70vector("ScalingOffset", 0.0, 0.0, 0.0); - p.AddP70vector("ScalingPivot", 0.0, 0.0, 0.0); - p.AddP70bool("TranslationActive", 0); - p.AddP70vector("TranslationMin", 0.0, 0.0, 0.0); - p.AddP70vector("TranslationMax", 0.0, 0.0, 0.0); - p.AddP70bool("TranslationMinX", 0); - p.AddP70bool("TranslationMinY", 0); - p.AddP70bool("TranslationMinZ", 0); - p.AddP70bool("TranslationMaxX", 0); - p.AddP70bool("TranslationMaxY", 0); - p.AddP70bool("TranslationMaxZ", 0); - p.AddP70enum("RotationOrder", 0); - p.AddP70bool("RotationSpaceForLimitOnly", 0); - p.AddP70double("RotationStiffnessX", 0.0); - p.AddP70double("RotationStiffnessY", 0.0); - p.AddP70double("RotationStiffnessZ", 0.0); - p.AddP70double("AxisLen", 10.0); - p.AddP70vector("PreRotation", 0.0, 0.0, 0.0); - p.AddP70vector("PostRotation", 0.0, 0.0, 0.0); - p.AddP70bool("RotationActive", 0); - p.AddP70vector("RotationMin", 0.0, 0.0, 0.0); - p.AddP70vector("RotationMax", 0.0, 0.0, 0.0); - p.AddP70bool("RotationMinX", 0); - p.AddP70bool("RotationMinY", 0); - p.AddP70bool("RotationMinZ", 0); - p.AddP70bool("RotationMaxX", 0); - p.AddP70bool("RotationMaxY", 0); - p.AddP70bool("RotationMaxZ", 0); - p.AddP70enum("InheritType", 0); - p.AddP70bool("ScalingActive", 0); - p.AddP70vector("ScalingMin", 0.0, 0.0, 0.0); - p.AddP70vector("ScalingMax", 1.0, 1.0, 1.0); - p.AddP70bool("ScalingMinX", 0); - p.AddP70bool("ScalingMinY", 0); - p.AddP70bool("ScalingMinZ", 0); - p.AddP70bool("ScalingMaxX", 0); - p.AddP70bool("ScalingMaxY", 0); - p.AddP70bool("ScalingMaxZ", 0); - p.AddP70vector("GeometricTranslation", 0.0, 0.0, 0.0); - p.AddP70vector("GeometricRotation", 0.0, 0.0, 0.0); - p.AddP70vector("GeometricScaling", 1.0, 1.0, 1.0); - p.AddP70double("MinDampRangeX", 0.0); - p.AddP70double("MinDampRangeY", 0.0); - p.AddP70double("MinDampRangeZ", 0.0); - p.AddP70double("MaxDampRangeX", 0.0); - p.AddP70double("MaxDampRangeY", 0.0); - p.AddP70double("MaxDampRangeZ", 0.0); - p.AddP70double("MinDampStrengthX", 0.0); - p.AddP70double("MinDampStrengthY", 0.0); - p.AddP70double("MinDampStrengthZ", 0.0); - p.AddP70double("MaxDampStrengthX", 0.0); - p.AddP70double("MaxDampStrengthY", 0.0); - p.AddP70double("MaxDampStrengthZ", 0.0); - p.AddP70double("PreferedAngleX", 0.0); - p.AddP70double("PreferedAngleY", 0.0); - p.AddP70double("PreferedAngleZ", 0.0); - p.AddP70("LookAtProperty", "object", "", ""); - p.AddP70("UpVectorProperty", "object", "", ""); - p.AddP70bool("Show", 1); - p.AddP70bool("NegativePercentShapeSupport", 1); - p.AddP70int("DefaultAttributeIndex", -1); - p.AddP70bool("Freeze", 0); - p.AddP70bool("LODBox", 0); - p.AddP70( - "Lcl Translation", "Lcl Translation", "", "A", - double(0), double(0), double(0) - ); - p.AddP70( - "Lcl Rotation", "Lcl Rotation", "", "A", - double(0), double(0), double(0) - ); - p.AddP70( - "Lcl Scaling", "Lcl Scaling", "", "A", - double(1), double(1), double(1) - ); - p.AddP70("Visibility", "Visibility", "", "A", double(1)); - p.AddP70( - "Visibility Inheritance", "Visibility Inheritance", "", "", - int32_t(1) - ); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // Geometry / FbxMesh - // <~~ aiMesh - count = mScene->mNumMeshes; - if (count) { - n = FBX::Node("ObjectType", "Geometry"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxMesh"); - p = FBX::Node("Properties70"); - p.AddP70color("Color", 0, 0, 0); - p.AddP70vector("BBoxMin", 0, 0, 0); - p.AddP70vector("BBoxMax", 0, 0, 0); - p.AddP70bool("Primary Visibility", 1); - p.AddP70bool("Casts Shadows", 1); - p.AddP70bool("Receive Shadows", 1); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // Material / FbxSurfacePhong, FbxSurfaceLambert, FbxSurfaceMaterial - // <~~ aiMaterial - // basically if there's any phong material this is defined as phong, - // and otherwise lambert. - // More complex materials cause a bare-bones FbxSurfaceMaterial definition - // and are treated specially, as they're not really supported by FBX. - // TODO: support Maya's Stingray PBS material - count = mScene->mNumMaterials; - if (count) { - bool has_phong = has_phong_mat(mScene); - n = FBX::Node("ObjectType", "Material"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate"); - if (has_phong) { - pt.AddProperty("FbxSurfacePhong"); - } else { - pt.AddProperty("FbxSurfaceLambert"); - } - p = FBX::Node("Properties70"); - if (has_phong) { - p.AddP70string("ShadingModel", "Phong"); - } else { - p.AddP70string("ShadingModel", "Lambert"); - } - p.AddP70bool("MultiLayer", 0); - p.AddP70colorA("EmissiveColor", 0.0, 0.0, 0.0); - p.AddP70numberA("EmissiveFactor", 1.0); - p.AddP70colorA("AmbientColor", 0.2, 0.2, 0.2); - p.AddP70numberA("AmbientFactor", 1.0); - p.AddP70colorA("DiffuseColor", 0.8, 0.8, 0.8); - p.AddP70numberA("DiffuseFactor", 1.0); - p.AddP70vector("Bump", 0.0, 0.0, 0.0); - p.AddP70vector("NormalMap", 0.0, 0.0, 0.0); - p.AddP70double("BumpFactor", 1.0); - p.AddP70colorA("TransparentColor", 0.0, 0.0, 0.0); - p.AddP70numberA("TransparencyFactor", 0.0); - p.AddP70color("DisplacementColor", 0.0, 0.0, 0.0); - p.AddP70double("DisplacementFactor", 1.0); - p.AddP70color("VectorDisplacementColor", 0.0, 0.0, 0.0); - p.AddP70double("VectorDisplacementFactor", 1.0); - if (has_phong) { - p.AddP70colorA("SpecularColor", 0.2, 0.2, 0.2); - p.AddP70numberA("SpecularFactor", 1.0); - p.AddP70numberA("ShininessExponent", 20.0); - p.AddP70colorA("ReflectionColor", 0.0, 0.0, 0.0); - p.AddP70numberA("ReflectionFactor", 1.0); - } - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // Video / FbxVideo - // one for each image file. - count = int32_t(count_images(mScene)); - if (count) { - n = FBX::Node("ObjectType", "Video"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxVideo"); - p = FBX::Node("Properties70"); - p.AddP70bool("ImageSequence", 0); - p.AddP70int("ImageSequenceOffset", 0); - p.AddP70double("FrameRate", 0.0); - p.AddP70int("LastFrame", 0); - p.AddP70int("Width", 0); - p.AddP70int("Height", 0); - p.AddP70("Path", "KString", "XRefUrl", "", ""); - p.AddP70int("StartFrame", 0); - p.AddP70int("StopFrame", 0); - p.AddP70double("PlaySpeed", 0.0); - p.AddP70time("Offset", 0); - p.AddP70enum("InterlaceMode", 0); - p.AddP70bool("FreeRunning", 0); - p.AddP70bool("Loop", 0); - p.AddP70enum("AccessMode", 0); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // Texture / FbxFileTexture - // <~~ aiTexture - count = int32_t(count_textures(mScene)); - if (count) { - n = FBX::Node("ObjectType", "Texture"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxFileTexture"); - p = FBX::Node("Properties70"); - p.AddP70enum("TextureTypeUse", 0); - p.AddP70numberA("Texture alpha", 1.0); - p.AddP70enum("CurrentMappingType", 0); - p.AddP70enum("WrapModeU", 0); - p.AddP70enum("WrapModeV", 0); - p.AddP70bool("UVSwap", 0); - p.AddP70bool("PremultiplyAlpha", 1); - p.AddP70vectorA("Translation", 0.0, 0.0, 0.0); - p.AddP70vectorA("Rotation", 0.0, 0.0, 0.0); - p.AddP70vectorA("Scaling", 1.0, 1.0, 1.0); - p.AddP70vector("TextureRotationPivot", 0.0, 0.0, 0.0); - p.AddP70vector("TextureScalingPivot", 0.0, 0.0, 0.0); - p.AddP70enum("CurrentTextureBlendMode", 1); - p.AddP70string("UVSet", "default"); - p.AddP70bool("UseMaterial", 0); - p.AddP70bool("UseMipMap", 0); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // AnimationCurveNode / FbxAnimCurveNode - count = mScene->mNumAnimations * 3; - if (count) { - n = FBX::Node("ObjectType", "AnimationCurveNode"); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", "FbxAnimCurveNode"); - p = FBX::Node("Properties70"); - p.AddP70("d", "Compound", "", ""); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // AnimationCurve / FbxAnimCurve - count = mScene->mNumAnimations * 9; - if (count) { - n = FBX::Node("ObjectType", "AnimationCurve"); - n.AddChild("Count", count); - object_nodes.push_back(n); - total_count += count; - } - - // Pose - count = 0; - for (size_t i = 0; i < mScene->mNumMeshes; ++i) { - aiMesh* mesh = mScene->mMeshes[i]; - if (mesh->HasBones()) { ++count; } - } - if (count) { - n = FBX::Node("ObjectType", "Pose"); - n.AddChild("Count", count); - object_nodes.push_back(n); - total_count += count; - } - - // Deformer - count = int32_t(count_deformers(mScene)); - if (count) { - n = FBX::Node("ObjectType", "Deformer"); - n.AddChild("Count", count); - object_nodes.push_back(n); - total_count += count; - } - - // (template) - count = 0; - if (count) { - n = FBX::Node("ObjectType", ""); - n.AddChild("Count", count); - pt = FBX::Node("PropertyTemplate", ""); - p = FBX::Node("Properties70"); - pt.AddChild(p); - n.AddChild(pt); - object_nodes.push_back(n); - total_count += count; - } - - // now write it all - FBX::Node defs("Definitions"); - defs.AddChild("Version", int32_t(100)); - defs.AddChild("Count", int32_t(total_count)); - for (auto &n : object_nodes) { defs.AddChild(n); } - defs.Dump(outfile, binary, 0); -} - - -// ------------------------------------------------------------------- -// some internal helper functions used for writing the objects section -// (which holds the actual data) -// ------------------------------------------------------------------- - -aiNode* get_node_for_mesh(unsigned int meshIndex, aiNode* node) -{ - for (size_t i = 0; i < node->mNumMeshes; ++i) { - if (node->mMeshes[i] == meshIndex) { - return node; - } - } - for (size_t i = 0; i < node->mNumChildren; ++i) { - aiNode* ret = get_node_for_mesh(meshIndex, node->mChildren[i]); - if (ret) { return ret; } - } - return nullptr; -} - -aiMatrix4x4 get_world_transform(const aiNode* node, const aiScene* scene) -{ - std::vector<const aiNode*> node_chain; - while (node != scene->mRootNode) { - node_chain.push_back(node); - node = node->mParent; - } - aiMatrix4x4 transform; - for (auto n = node_chain.rbegin(); n != node_chain.rend(); ++n) { - transform *= (*n)->mTransformation; - } - return transform; -} - -int64_t to_ktime(double ticks, const aiAnimation* anim) { - if (anim->mTicksPerSecond <= 0) { - return static_cast<int64_t>(ticks) * FBX::SECOND; - } - return (static_cast<int64_t>(ticks) / static_cast<int64_t>(anim->mTicksPerSecond)) * FBX::SECOND; -} - -int64_t to_ktime(double time) { - return (static_cast<int64_t>(time * FBX::SECOND)); -} - -void FBXExporter::WriteObjects () -{ - if (!binary) { - WriteAsciiSectionHeader("Object properties"); - } - // numbers should match those given in definitions! make sure to check - StreamWriterLE outstream(outfile); - FBX::Node object_node("Objects"); - int indent = 0; - object_node.Begin(outstream, binary, indent); - object_node.EndProperties(outstream, binary, indent); - object_node.BeginChildren(outstream, binary, indent); - - bool bJoinIdenticalVertices = mProperties->GetPropertyBool("bJoinIdenticalVertices", true); - std::vector<std::vector<int32_t>> vVertexIndice;//save vertex_indices as it is needed later - - // geometry (aiMesh) - mesh_uids.clear(); - indent = 1; - for (size_t mi = 0; mi < mScene->mNumMeshes; ++mi) { - // it's all about this mesh - aiMesh* m = mScene->mMeshes[mi]; - - // start the node record - FBX::Node n("Geometry"); - int64_t uid = generate_uid(); - mesh_uids.push_back(uid); - n.AddProperty(uid); - n.AddProperty(FBX::SEPARATOR + "Geometry"); - n.AddProperty("Mesh"); - n.Begin(outstream, binary, indent); - n.DumpProperties(outstream, binary, indent); - n.EndProperties(outstream, binary, indent); - n.BeginChildren(outstream, binary, indent); - indent = 2; - - // output vertex data - each vertex should be unique (probably) - std::vector<double> flattened_vertices; - // index of original vertex in vertex data vector - std::vector<int32_t> vertex_indices; - // map of vertex value to its index in the data vector - std::map<aiVector3D,size_t> index_by_vertex_value; - if(bJoinIdenticalVertices){ - int32_t index = 0; - for (size_t vi = 0; vi < m->mNumVertices; ++vi) { - aiVector3D vtx = m->mVertices[vi]; - auto elem = index_by_vertex_value.find(vtx); - if (elem == index_by_vertex_value.end()) { - vertex_indices.push_back(index); - index_by_vertex_value[vtx] = index; - flattened_vertices.push_back(vtx[0]); - flattened_vertices.push_back(vtx[1]); - flattened_vertices.push_back(vtx[2]); - ++index; - } else { - vertex_indices.push_back(int32_t(elem->second)); - } - } - } - else { // do not join vertex, respect the export flag - vertex_indices.resize(m->mNumVertices); - std::iota(vertex_indices.begin(), vertex_indices.end(), 0); - for(unsigned int v = 0; v < m->mNumVertices; ++ v) { - aiVector3D vtx = m->mVertices[v]; - flattened_vertices.push_back(vtx.x); - flattened_vertices.push_back(vtx.y); - flattened_vertices.push_back(vtx.z); - } - } - vVertexIndice.push_back(vertex_indices); - - FBX::Node::WritePropertyNode( - "Vertices", flattened_vertices, outstream, binary, indent - ); - - // output polygon data as a flattened array of vertex indices. - // the last vertex index of each polygon is negated and - 1 - std::vector<int32_t> polygon_data; - for (size_t fi = 0; fi < m->mNumFaces; ++fi) { - const aiFace &f = m->mFaces[fi]; - for (size_t pvi = 0; pvi < f.mNumIndices - 1; ++pvi) { - polygon_data.push_back(vertex_indices[f.mIndices[pvi]]); - } - polygon_data.push_back( - -1 - vertex_indices[f.mIndices[f.mNumIndices-1]] - ); - } - FBX::Node::WritePropertyNode( - "PolygonVertexIndex", polygon_data, outstream, binary, indent - ); - - // here could be edges but they're insane. - // it's optional anyway, so let's ignore it. - - FBX::Node::WritePropertyNode( - "GeometryVersion", int32_t(124), outstream, binary, indent - ); - - // normals, if any - if (m->HasNormals()) { - FBX::Node normals("LayerElementNormal", int32_t(0)); - normals.Begin(outstream, binary, indent); - normals.DumpProperties(outstream, binary, indent); - normals.EndProperties(outstream, binary, indent); - normals.BeginChildren(outstream, binary, indent); - indent = 3; - FBX::Node::WritePropertyNode( - "Version", int32_t(101), outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "Name", "", outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "MappingInformationType", "ByPolygonVertex", - outstream, binary, indent - ); - // TODO: vertex-normals or indexed normals when appropriate - FBX::Node::WritePropertyNode( - "ReferenceInformationType", "Direct", - outstream, binary, indent - ); - std::vector<double> normal_data; - normal_data.reserve(3 * polygon_data.size()); - for (size_t fi = 0; fi < m->mNumFaces; ++fi) { - const aiFace &f = m->mFaces[fi]; - for (size_t pvi = 0; pvi < f.mNumIndices; ++pvi) { - const aiVector3D &n = m->mNormals[f.mIndices[pvi]]; - normal_data.push_back(n.x); - normal_data.push_back(n.y); - normal_data.push_back(n.z); - } - } - FBX::Node::WritePropertyNode( - "Normals", normal_data, outstream, binary, indent - ); - // note: version 102 has a NormalsW also... not sure what it is, - // so we can stick with version 101 for now. - indent = 2; - normals.End(outstream, binary, indent, true); - } - - // colors, if any - // TODO only one color channel currently - const int32_t colorChannelIndex = 0; - if (m->HasVertexColors(colorChannelIndex)) { - FBX::Node vertexcolors("LayerElementColor", int32_t(colorChannelIndex)); - vertexcolors.Begin(outstream, binary, indent); - vertexcolors.DumpProperties(outstream, binary, indent); - vertexcolors.EndProperties(outstream, binary, indent); - vertexcolors.BeginChildren(outstream, binary, indent); - indent = 3; - FBX::Node::WritePropertyNode( - "Version", int32_t(101), outstream, binary, indent - ); - char layerName[8]; - sprintf(layerName, "COLOR_%d", colorChannelIndex); - FBX::Node::WritePropertyNode( - "Name", (const char*)layerName, outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "MappingInformationType", "ByPolygonVertex", - outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "ReferenceInformationType", "Direct", - outstream, binary, indent - ); - std::vector<double> color_data; - color_data.reserve(4 * polygon_data.size()); - for (size_t fi = 0; fi < m->mNumFaces; ++fi) { - const aiFace &f = m->mFaces[fi]; - for (size_t pvi = 0; pvi < f.mNumIndices; ++pvi) { - const aiColor4D &c = m->mColors[colorChannelIndex][f.mIndices[pvi]]; - color_data.push_back(c.r); - color_data.push_back(c.g); - color_data.push_back(c.b); - color_data.push_back(c.a); - } - } - FBX::Node::WritePropertyNode( - "Colors", color_data, outstream, binary, indent - ); - indent = 2; - vertexcolors.End(outstream, binary, indent, true); - } - - // uvs, if any - for (size_t uvi = 0; uvi < m->GetNumUVChannels(); ++uvi) { - if (m->mNumUVComponents[uvi] > 2) { - // FBX only supports 2-channel UV maps... - // or at least i'm not sure how to indicate a different number - std::stringstream err; - err << "Only 2-channel UV maps supported by FBX,"; - err << " but mesh " << mi; - if (m->mName.length) { - err << " (" << m->mName.C_Str() << ")"; - } - err << " UV map " << uvi; - err << " has " << m->mNumUVComponents[uvi]; - err << " components! Data will be preserved,"; - err << " but may be incorrectly interpreted on load."; - ASSIMP_LOG_WARN(err.str()); - } - FBX::Node uv("LayerElementUV", int32_t(uvi)); - uv.Begin(outstream, binary, indent); - uv.DumpProperties(outstream, binary, indent); - uv.EndProperties(outstream, binary, indent); - uv.BeginChildren(outstream, binary, indent); - indent = 3; - FBX::Node::WritePropertyNode( - "Version", int32_t(101), outstream, binary, indent - ); - // it doesn't seem like assimp keeps the uv map name, - // so just leave it blank. - FBX::Node::WritePropertyNode( - "Name", "", outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "MappingInformationType", "ByPolygonVertex", - outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "ReferenceInformationType", "IndexToDirect", - outstream, binary, indent - ); - - std::vector<double> uv_data; - std::vector<int32_t> uv_indices; - std::map<aiVector3D,int32_t> index_by_uv; - int32_t index = 0; - for (size_t fi = 0; fi < m->mNumFaces; ++fi) { - const aiFace &f = m->mFaces[fi]; - for (size_t pvi = 0; pvi < f.mNumIndices; ++pvi) { - const aiVector3D &uv = - m->mTextureCoords[uvi][f.mIndices[pvi]]; - auto elem = index_by_uv.find(uv); - if (elem == index_by_uv.end()) { - index_by_uv[uv] = index; - uv_indices.push_back(index); - for (unsigned int x = 0; x < m->mNumUVComponents[uvi]; ++x) { - uv_data.push_back(uv[x]); - } - ++index; - } else { - uv_indices.push_back(elem->second); - } - } - } - FBX::Node::WritePropertyNode( - "UV", uv_data, outstream, binary, indent - ); - FBX::Node::WritePropertyNode( - "UVIndex", uv_indices, outstream, binary, indent - ); - indent = 2; - uv.End(outstream, binary, indent, true); - } - - // i'm not really sure why this material section exists, - // as the material is linked via "Connections". - // it seems to always have the same "0" value. - FBX::Node mat("LayerElementMaterial", int32_t(0)); - mat.AddChild("Version", int32_t(101)); - mat.AddChild("Name", ""); - mat.AddChild("MappingInformationType", "AllSame"); - mat.AddChild("ReferenceInformationType", "IndexToDirect"); - std::vector<int32_t> mat_indices = {0}; - mat.AddChild("Materials", mat_indices); - mat.Dump(outstream, binary, indent); - - // finally we have the layer specifications, - // which select the normals / UV set / etc to use. - // TODO: handle multiple uv sets correctly? - FBX::Node layer("Layer", int32_t(0)); - layer.AddChild("Version", int32_t(100)); - FBX::Node le("LayerElement"); - le.AddChild("Type", "LayerElementNormal"); - le.AddChild("TypedIndex", int32_t(0)); - layer.AddChild(le); - // TODO only 1 color channel currently - le = FBX::Node("LayerElement"); - le.AddChild("Type", "LayerElementColor"); - le.AddChild("TypedIndex", int32_t(0)); - layer.AddChild(le); - le = FBX::Node("LayerElement"); - le.AddChild("Type", "LayerElementMaterial"); - le.AddChild("TypedIndex", int32_t(0)); - layer.AddChild(le); - le = FBX::Node("LayerElement"); - le.AddChild("Type", "LayerElementUV"); - le.AddChild("TypedIndex", int32_t(0)); - layer.AddChild(le); - layer.Dump(outstream, binary, indent); - - for(unsigned int lr = 1; lr < m->GetNumUVChannels(); ++ lr) - { - FBX::Node layerExtra("Layer", int32_t(lr)); - layerExtra.AddChild("Version", int32_t(100)); - FBX::Node leExtra("LayerElement"); - leExtra.AddChild("Type", "LayerElementUV"); - leExtra.AddChild("TypedIndex", int32_t(lr)); - layerExtra.AddChild(leExtra); - layerExtra.Dump(outstream, binary, indent); - } - // finish the node record - indent = 1; - n.End(outstream, binary, indent, true); - } - - // aiMaterial - material_uids.clear(); - for (size_t i = 0; i < mScene->mNumMaterials; ++i) { - // it's all about this material - aiMaterial* m = mScene->mMaterials[i]; - - // these are used to receive material data - float f; aiColor3D c; - - // start the node record - FBX::Node n("Material"); - - int64_t uid = generate_uid(); - material_uids.push_back(uid); - n.AddProperty(uid); - - aiString name; - m->Get(AI_MATKEY_NAME, name); - n.AddProperty(name.C_Str() + FBX::SEPARATOR + "Material"); - - n.AddProperty(""); - - n.AddChild("Version", int32_t(102)); - f = 0; - m->Get(AI_MATKEY_SHININESS, f); - bool phong = (f > 0); - if (phong) { - n.AddChild("ShadingModel", "phong"); - } else { - n.AddChild("ShadingModel", "lambert"); - } - n.AddChild("MultiLayer", int32_t(0)); - - FBX::Node p("Properties70"); - - // materials exported using the FBX SDK have two sets of fields. - // there are the properties specified in the PropertyTemplate, - // which are those supported by the modernFBX SDK, - // and an extra set of properties with simpler names. - // The extra properties are a legacy material system from pre-2009. - // - // In the modern system, each property has "color" and "factor". - // Generally the interpretation of these seems to be - // that the colour is multiplied by the factor before use, - // but this is not always clear-cut. - // - // Usually assimp only stores the colour, - // so we can just leave the factors at the default "1.0". - - // first we can export the "standard" properties - if (m->Get(AI_MATKEY_COLOR_AMBIENT, c) == aiReturn_SUCCESS) { - p.AddP70colorA("AmbientColor", c.r, c.g, c.b); - //p.AddP70numberA("AmbientFactor", 1.0); - } - if (m->Get(AI_MATKEY_COLOR_DIFFUSE, c) == aiReturn_SUCCESS) { - p.AddP70colorA("DiffuseColor", c.r, c.g, c.b); - //p.AddP70numberA("DiffuseFactor", 1.0); - } - if (m->Get(AI_MATKEY_COLOR_TRANSPARENT, c) == aiReturn_SUCCESS) { - // "TransparentColor" / "TransparencyFactor"... - // thanks FBX, for your insightful interpretation of consistency - p.AddP70colorA("TransparentColor", c.r, c.g, c.b); - // TransparencyFactor defaults to 0.0, so set it to 1.0. - // note: Maya always sets this to 1.0, - // so we can't use it sensibly as "Opacity". - // In stead we rely on the legacy "Opacity" value, below. - // Blender also relies on "Opacity" not "TransparencyFactor", - // probably for a similar reason. - p.AddP70numberA("TransparencyFactor", 1.0); - } - if (m->Get(AI_MATKEY_COLOR_REFLECTIVE, c) == aiReturn_SUCCESS) { - p.AddP70colorA("ReflectionColor", c.r, c.g, c.b); - } - if (m->Get(AI_MATKEY_REFLECTIVITY, f) == aiReturn_SUCCESS) { - p.AddP70numberA("ReflectionFactor", f); - } - if (phong) { - if (m->Get(AI_MATKEY_COLOR_SPECULAR, c) == aiReturn_SUCCESS) { - p.AddP70colorA("SpecularColor", c.r, c.g, c.b); - } - if (m->Get(AI_MATKEY_SHININESS_STRENGTH, f) == aiReturn_SUCCESS) { - p.AddP70numberA("ShininessFactor", f); - } - if (m->Get(AI_MATKEY_SHININESS, f) == aiReturn_SUCCESS) { - p.AddP70numberA("ShininessExponent", f); - } - if (m->Get(AI_MATKEY_REFLECTIVITY, f) == aiReturn_SUCCESS) { - p.AddP70numberA("ReflectionFactor", f); - } - } - - // Now the legacy system. - // For safety let's include it. - // thrse values don't exist in the property template, - // and usually are completely ignored when loading. - // One notable exception is the "Opacity" property, - // which Blender uses as (1.0 - alpha). - c.r = 0.0f; c.g = 0.0f; c.b = 0.0f; - m->Get(AI_MATKEY_COLOR_EMISSIVE, c); - p.AddP70vector("Emissive", c.r, c.g, c.b); - c.r = 0.2f; c.g = 0.2f; c.b = 0.2f; - m->Get(AI_MATKEY_COLOR_AMBIENT, c); - p.AddP70vector("Ambient", c.r, c.g, c.b); - c.r = 0.8f; c.g = 0.8f; c.b = 0.8f; - m->Get(AI_MATKEY_COLOR_DIFFUSE, c); - p.AddP70vector("Diffuse", c.r, c.g, c.b); - // The FBX SDK determines "Opacity" from transparency colour (RGB) - // and factor (F) as: O = (1.0 - F * ((R + G + B) / 3)). - // However we actually have an opacity value, - // so we should take it from AI_MATKEY_OPACITY if possible. - // It might make more sense to use TransparencyFactor, - // but Blender actually loads "Opacity" correctly, so let's use it. - f = 1.0f; - if (m->Get(AI_MATKEY_COLOR_TRANSPARENT, c) == aiReturn_SUCCESS) { - f = 1.0f - ((c.r + c.g + c.b) / 3.0f); - } - m->Get(AI_MATKEY_OPACITY, f); - p.AddP70double("Opacity", f); - if (phong) { - // specular color is multiplied by shininess_strength - c.r = 0.2f; c.g = 0.2f; c.b = 0.2f; - m->Get(AI_MATKEY_COLOR_SPECULAR, c); - f = 1.0f; - m->Get(AI_MATKEY_SHININESS_STRENGTH, f); - p.AddP70vector("Specular", f*c.r, f*c.g, f*c.b); - f = 20.0f; - m->Get(AI_MATKEY_SHININESS, f); - p.AddP70double("Shininess", f); - // Legacy "Reflectivity" is F*F*((R+G+B)/3), - // where F is the proportion of light reflected (AKA reflectivity), - // and RGB is the reflective colour of the material. - // No idea why, but we might as well set it the same way. - f = 0.0f; - m->Get(AI_MATKEY_REFLECTIVITY, f); - c.r = 1.0f, c.g = 1.0f, c.b = 1.0f; - m->Get(AI_MATKEY_COLOR_REFLECTIVE, c); - p.AddP70double("Reflectivity", f*f*((c.r+c.g+c.b)/3.0)); - } - - n.AddChild(p); - - n.Dump(outstream, binary, indent); - } - - // we need to look up all the images we're using, - // so we can generate uids, and eliminate duplicates. - std::map<std::string, int64_t> uid_by_image; - for (size_t i = 0; i < mScene->mNumMaterials; ++i) { - aiString texpath; - aiMaterial* mat = mScene->mMaterials[i]; - for ( - size_t tt = aiTextureType_DIFFUSE; - tt < aiTextureType_UNKNOWN; - ++tt - ){ - const aiTextureType textype = static_cast<aiTextureType>(tt); - const size_t texcount = mat->GetTextureCount(textype); - for (size_t j = 0; j < texcount; ++j) { - mat->GetTexture(textype, (unsigned int)j, &texpath); - const std::string texstring = texpath.C_Str(); - auto elem = uid_by_image.find(texstring); - if (elem == uid_by_image.end()) { - uid_by_image[texstring] = generate_uid(); - } - } - } - } - - // FbxVideo - stores images used by textures. - for (const auto &it : uid_by_image) { - FBX::Node n("Video"); - const int64_t& uid = it.second; - const std::string name = ""; // TODO: ... name??? - n.AddProperties(uid, name + FBX::SEPARATOR + "Video", "Clip"); - n.AddChild("Type", "Clip"); - FBX::Node p("Properties70"); - // TODO: get full path... relative path... etc... ugh... - // for now just use the same path for everything, - // and hopefully one of them will work out. - std::string path = it.first; - // try get embedded texture - const aiTexture* embedded_texture = mScene->GetEmbeddedTexture(it.first.c_str()); - if (embedded_texture != nullptr) { - // change the path (use original filename, if available. If name is empty, concatenate texture index with file extension) - std::stringstream newPath; - if (embedded_texture->mFilename.length > 0) { - newPath << embedded_texture->mFilename.C_Str(); - } else if (embedded_texture->achFormatHint[0]) { - int texture_index = std::stoi(path.substr(1, path.size() - 1)); - newPath << texture_index << "." << embedded_texture->achFormatHint; - } - path = newPath.str(); - // embed the texture - size_t texture_size = static_cast<size_t>(embedded_texture->mWidth * std::max(embedded_texture->mHeight, 1u)); - if (binary) { - // embed texture as binary data - std::vector<uint8_t> tex_data; - tex_data.resize(texture_size); - memcpy(&tex_data[0], (char*)embedded_texture->pcData, texture_size); - n.AddChild("Content", tex_data); - } else { - // embed texture in base64 encoding - std::string encoded_texture = FBX::Util::EncodeBase64((char*)embedded_texture->pcData, texture_size); - n.AddChild("Content", encoded_texture); - } - } - p.AddP70("Path", "KString", "XRefUrl", "", path); - n.AddChild(p); - n.AddChild("UseMipMap", int32_t(0)); - n.AddChild("Filename", path); - n.AddChild("RelativeFilename", path); - n.Dump(outstream, binary, indent); - } - - // Textures - // referenced by material_index/texture_type pairs. - std::map<std::pair<size_t,size_t>,int64_t> texture_uids; - const std::map<aiTextureType,std::string> prop_name_by_tt = { - {aiTextureType_DIFFUSE, "DiffuseColor"}, - {aiTextureType_SPECULAR, "SpecularColor"}, - {aiTextureType_AMBIENT, "AmbientColor"}, - {aiTextureType_EMISSIVE, "EmissiveColor"}, - {aiTextureType_HEIGHT, "Bump"}, - {aiTextureType_NORMALS, "NormalMap"}, - {aiTextureType_SHININESS, "ShininessExponent"}, - {aiTextureType_OPACITY, "TransparentColor"}, - {aiTextureType_DISPLACEMENT, "DisplacementColor"}, - //{aiTextureType_LIGHTMAP, "???"}, - {aiTextureType_REFLECTION, "ReflectionColor"} - //{aiTextureType_UNKNOWN, ""} - }; - for (size_t i = 0; i < mScene->mNumMaterials; ++i) { - // textures are attached to materials - aiMaterial* mat = mScene->mMaterials[i]; - int64_t material_uid = material_uids[i]; - - for ( - size_t j = aiTextureType_DIFFUSE; - j < aiTextureType_UNKNOWN; - ++j - ) { - const aiTextureType tt = static_cast<aiTextureType>(j); - size_t n = mat->GetTextureCount(tt); - - if (n < 1) { // no texture of this type - continue; - } - - if (n > 1) { - // TODO: multilayer textures - std::stringstream err; - err << "Multilayer textures not supported (for now),"; - err << " skipping texture type " << j; - err << " of material " << i; - ASSIMP_LOG_WARN(err.str()); - } - - // get image path for this (single-image) texture - aiString tpath; - if (mat->GetTexture(tt, 0, &tpath) != aiReturn_SUCCESS) { - std::stringstream err; - err << "Failed to get texture 0 for texture of type " << tt; - err << " on material " << i; - err << ", however GetTextureCount returned 1."; - throw DeadlyExportError(err.str()); - } - const std::string texture_path(tpath.C_Str()); - - // get connected image uid - auto elem = uid_by_image.find(texture_path); - if (elem == uid_by_image.end()) { - // this should never happen - std::stringstream err; - err << "Failed to find video element for texture with path"; - err << " \"" << texture_path << "\""; - err << ", type " << j << ", material " << i; - throw DeadlyExportError(err.str()); - } - const int64_t image_uid = elem->second; - - // get the name of the material property to connect to - auto elem2 = prop_name_by_tt.find(tt); - if (elem2 == prop_name_by_tt.end()) { - // don't know how to handle this type of texture, - // so skip it. - std::stringstream err; - err << "Not sure how to handle texture of type " << j; - err << " on material " << i; - err << ", skipping..."; - ASSIMP_LOG_WARN(err.str()); - continue; - } - const std::string& prop_name = elem2->second; - - // generate a uid for this texture - const int64_t texture_uid = generate_uid(); - - // link the texture to the material - connections.emplace_back( - "C", "OP", texture_uid, material_uid, prop_name - ); - - // link the image data to the texture - connections.emplace_back("C", "OO", image_uid, texture_uid); - - // now write the actual texture node - FBX::Node tnode("Texture"); - // TODO: some way to determine texture name? - const std::string texture_name = "" + FBX::SEPARATOR + "Texture"; - tnode.AddProperties(texture_uid, texture_name, ""); - // there really doesn't seem to be a better type than this: - tnode.AddChild("Type", "TextureVideoClip"); - tnode.AddChild("Version", int32_t(202)); - tnode.AddChild("TextureName", texture_name); - FBX::Node p("Properties70"); - p.AddP70enum("CurrentTextureBlendMode", 0); // TODO: verify - //p.AddP70string("UVSet", ""); // TODO: how should this work? - p.AddP70bool("UseMaterial", 1); - tnode.AddChild(p); - // can't easily detrmine which texture path will be correct, - // so just store what we have in every field. - // these being incorrect is a common problem with FBX anyway. - tnode.AddChild("FileName", texture_path); - tnode.AddChild("RelativeFilename", texture_path); - tnode.AddChild("ModelUVTranslation", double(0.0), double(0.0)); - tnode.AddChild("ModelUVScaling", double(1.0), double(1.0)); - tnode.AddChild("Texture_Alpha_Source", "None"); - tnode.AddChild( - "Cropping", int32_t(0), int32_t(0), int32_t(0), int32_t(0) - ); - tnode.Dump(outstream, binary, indent); - } - } - - // bones. - // - // output structure: - // subset of node hierarchy that are "skeleton", - // i.e. do not have meshes but only bones. - // but.. i'm not sure how anyone could guarantee that... - // - // input... - // well, for each mesh it has "bones", - // and the bone names correspond to nodes. - // of course we also need the parent nodes, - // as they give some of the transform........ - // - // well. we can assume a sane input, i suppose. - // - // so input is the bone node hierarchy, - // with an extra thing for the transformation of the MESH in BONE space. - // - // output is a set of bone nodes, - // a "bindpose" which indicates the default local transform of all bones, - // and a set of "deformers". - // each deformer is parented to a mesh geometry, - // and has one or more "subdeformer"s as children. - // each subdeformer has one bone node as a child, - // and represents the influence of that bone on the grandparent mesh. - // the subdeformer has a list of indices, and weights, - // with indices specifying vertex indices, - // and weights specifying the corresponding influence of this bone. - // it also has Transform and TransformLink elements, - // specifying the transform of the MESH in BONE space, - // and the transformation of the BONE in WORLD space, - // likely in the bindpose. - // - // the input bone structure is different but similar, - // storing the number of weights for this bone, - // and an array of (vertex index, weight) pairs. - // - // one sticky point is that the number of vertices may not match, - // because assimp splits vertices by normal, uv, etc. - - // functor for aiNode sorting - struct SortNodeByName - { - bool operator()(const aiNode *lhs, const aiNode *rhs) const - { - return strcmp(lhs->mName.C_Str(), rhs->mName.C_Str()) < 0; - } - }; - - // first we should mark the skeleton for each mesh. - // the skeleton must include not only the aiBones, - // but also all their parent nodes. - // anything that affects the position of any bone node must be included. - // Use SorNodeByName to make sure the exported result will be the same across all systems - // Otherwise the aiNodes of the skeleton would be sorted based on the pointer address, which isn't consistent - std::vector<std::set<const aiNode*, SortNodeByName>> skeleton_by_mesh(mScene->mNumMeshes); - // at the same time we can build a list of all the skeleton nodes, - // which will be used later to mark them as type "limbNode". - std::unordered_set<const aiNode*> limbnodes; - - //actual bone nodes in fbx, without parenting-up - std::unordered_set<std::string> setAllBoneNamesInScene; - for(unsigned int m = 0; m < mScene->mNumMeshes; ++ m) - { - aiMesh* pMesh = mScene->mMeshes[m]; - for(unsigned int b = 0; b < pMesh->mNumBones; ++ b) - setAllBoneNamesInScene.insert(pMesh->mBones[b]->mName.data); - } - aiMatrix4x4 mxTransIdentity; - - // and a map of nodes by bone name, as finding them is annoying. - std::map<std::string,aiNode*> node_by_bone; - for (size_t mi = 0; mi < mScene->mNumMeshes; ++mi) { - const aiMesh* m = mScene->mMeshes[mi]; - std::set<const aiNode*, SortNodeByName> skeleton; - for (size_t bi =0; bi < m->mNumBones; ++bi) { - const aiBone* b = m->mBones[bi]; - const std::string name(b->mName.C_Str()); - auto elem = node_by_bone.find(name); - aiNode* n; - if (elem != node_by_bone.end()) { - n = elem->second; - } else { - n = mScene->mRootNode->FindNode(b->mName); - if (!n) { - // this should never happen - std::stringstream err; - err << "Failed to find node for bone: \"" << name << "\""; - throw DeadlyExportError(err.str()); - } - node_by_bone[name] = n; - limbnodes.insert(n); - } - skeleton.insert(n); - // mark all parent nodes as skeleton as well, - // up until we find the root node, - // or else the node containing the mesh, - // or else the parent of a node containig the mesh. - for ( - const aiNode* parent = n->mParent; - parent && parent != mScene->mRootNode; - parent = parent->mParent - ) { - // if we've already done this node we can skip it all - if (skeleton.count(parent)) { - break; - } - // ignore fbx transform nodes as these will be collapsed later - // TODO: cache this by aiNode* - const std::string node_name(parent->mName.C_Str()); - if (node_name.find(MAGIC_NODE_TAG) != std::string::npos) { - continue; - } - //not a bone in scene && no effect in transform - if(setAllBoneNamesInScene.find(node_name)==setAllBoneNamesInScene.end() - && parent->mTransformation == mxTransIdentity) { - continue; - } - // otherwise check if this is the root of the skeleton - bool end = false; - // is the mesh part of this node? - for (size_t i = 0; i < parent->mNumMeshes; ++i) { - if (parent->mMeshes[i] == mi) { - end = true; - break; - } - } - // is the mesh in one of the children of this node? - for (size_t j = 0; j < parent->mNumChildren; ++j) { - aiNode* child = parent->mChildren[j]; - for (size_t i = 0; i < child->mNumMeshes; ++i) { - if (child->mMeshes[i] == mi) { - end = true; - break; - } - } - if (end) { break; } - } - - // if it was the skeleton root we can finish here - if (end) { break; } - } - } - skeleton_by_mesh[mi] = skeleton; - } - - // we'll need the uids for the bone nodes, so generate them now - for (size_t i = 0; i < mScene->mNumMeshes; ++i) { - auto &s = skeleton_by_mesh[i]; - for (const aiNode* n : s) { - auto elem = node_uids.find(n); - if (elem == node_uids.end()) { - node_uids[n] = generate_uid(); - } - } - } - - // now, for each aiMesh, we need to export a deformer, - // and for each aiBone a subdeformer, - // which should have all the skinning info. - // these will need to be connected properly to the mesh, - // and we can do that all now. - for (size_t mi = 0; mi < mScene->mNumMeshes; ++mi) { - const aiMesh* m = mScene->mMeshes[mi]; - if (!m->HasBones()) { - continue; - } - // make a deformer for this mesh - int64_t deformer_uid = generate_uid(); - FBX::Node dnode("Deformer"); - dnode.AddProperties(deformer_uid, FBX::SEPARATOR + "Deformer", "Skin"); - dnode.AddChild("Version", int32_t(101)); - // "acuracy"... this is not a typo.... - dnode.AddChild("Link_DeformAcuracy", double(50)); - dnode.AddChild("SkinningType", "Linear"); // TODO: other modes? - dnode.Dump(outstream, binary, indent); - - // connect it - connections.emplace_back("C", "OO", deformer_uid, mesh_uids[mi]); - - //computed before - std::vector<int32_t>& vertex_indices = vVertexIndice[mi]; - - // TODO, FIXME: this won't work if anything is not in the bind pose. - // for now if such a situation is detected, we throw an exception. - std::set<const aiBone*> not_in_bind_pose; - std::set<const aiNode*> no_offset_matrix; - - // first get this mesh's position in world space, - // as we'll need it for each subdeformer. - // - // ...of course taking the position of the MESH doesn't make sense, - // as it can be instanced to many nodes. - // All we can do is assume no instancing, - // and take the first node we find that contains the mesh. - aiNode* mesh_node = get_node_for_mesh((unsigned int)mi, mScene->mRootNode); - aiMatrix4x4 mesh_xform = get_world_transform(mesh_node, mScene); - - // now make a subdeformer for each bone in the skeleton - const std::set<const aiNode*, SortNodeByName> skeleton= skeleton_by_mesh[mi]; - for (const aiNode* bone_node : skeleton) { - // if there's a bone for this node, find it - const aiBone* b = nullptr; - for (size_t bi = 0; bi < m->mNumBones; ++bi) { - // TODO: this probably should index by something else - const std::string name(m->mBones[bi]->mName.C_Str()); - if (node_by_bone[name] == bone_node) { - b = m->mBones[bi]; - break; - } - } - if (!b) { - no_offset_matrix.insert(bone_node); - } - - // start the subdeformer node - const int64_t subdeformer_uid = generate_uid(); - FBX::Node sdnode("Deformer"); - sdnode.AddProperties( - subdeformer_uid, FBX::SEPARATOR + "SubDeformer", "Cluster" - ); - sdnode.AddChild("Version", int32_t(100)); - sdnode.AddChild("UserData", "", ""); - - // add indices and weights, if any - if (b) { - std::vector<int32_t> subdef_indices; - std::vector<double> subdef_weights; - int32_t last_index = -1; - for (size_t wi = 0; wi < b->mNumWeights; ++wi) { - int32_t vi = vertex_indices[b->mWeights[wi].mVertexId]; - if (vi == last_index) { - // only for vertices we exported to fbx - // TODO, FIXME: this assumes identically-located vertices - // will always deform in the same way. - // as assimp doesn't store a separate list of "positions", - // there's not much that can be done about this - // other than assuming that identical position means - // identical vertex. - continue; - } - subdef_indices.push_back(vi); - subdef_weights.push_back(b->mWeights[wi].mWeight); - last_index = vi; - } - // yes, "indexes" - sdnode.AddChild("Indexes", subdef_indices); - sdnode.AddChild("Weights", subdef_weights); - } - - // transform is the transform of the mesh, but in bone space. - // if the skeleton is in the bind pose, - // we can take the inverse of the world-space bone transform - // and multiply by the world-space transform of the mesh. - aiMatrix4x4 bone_xform = get_world_transform(bone_node, mScene); - aiMatrix4x4 inverse_bone_xform = bone_xform; - inverse_bone_xform.Inverse(); - aiMatrix4x4 tr = inverse_bone_xform * mesh_xform; - - sdnode.AddChild("Transform", tr); - - - sdnode.AddChild("TransformLink", bone_xform); - // note: this means we ALWAYS rely on the mesh node transform - // being unchanged from the time the skeleton was bound. - // there's not really any way around this at the moment. - - // done - sdnode.Dump(outstream, binary, indent); - - // lastly, connect to the parent deformer - connections.emplace_back( - "C", "OO", subdeformer_uid, deformer_uid - ); - - // we also need to connect the limb node to the subdeformer. - connections.emplace_back( - "C", "OO", node_uids[bone_node], subdeformer_uid - ); - } - - // if we cannot create a valid FBX file, simply die. - // this will both prevent unnecessary bug reports, - // and tell the user what they can do to fix the situation - // (i.e. export their model in the bind pose). - if (no_offset_matrix.size() && not_in_bind_pose.size()) { - std::stringstream err; - err << "Not enough information to construct bind pose"; - err << " for mesh " << mi << "!"; - err << " Transform matrix for bone \""; - err << (*not_in_bind_pose.begin())->mName.C_Str() << "\""; - if (not_in_bind_pose.size() > 1) { - err << " (and " << not_in_bind_pose.size() - 1 << " more)"; - } - err << " does not match mOffsetMatrix,"; - err << " and node \""; - err << (*no_offset_matrix.begin())->mName.C_Str() << "\""; - if (no_offset_matrix.size() > 1) { - err << " (and " << no_offset_matrix.size() - 1 << " more)"; - } - err << " has no offset matrix to rely on."; - err << " Please ensure bones are in the bind pose to export."; - throw DeadlyExportError(err.str()); - } - - } - - // BindPose - // - // This is a legacy system, which should be unnecessary. - // - // Somehow including it slows file loading by the official FBX SDK, - // and as it can reconstruct it from the deformers anyway, - // this is not currently included. - // - // The code is kept here in case it's useful in the future, - // but it's pretty much a hack anyway, - // as assimp doesn't store bindpose information for full skeletons. - // - /*for (size_t mi = 0; mi < mScene->mNumMeshes; ++mi) { - aiMesh* mesh = mScene->mMeshes[mi]; - if (! mesh->HasBones()) { continue; } - int64_t bindpose_uid = generate_uid(); - FBX::Node bpnode("Pose"); - bpnode.AddProperty(bindpose_uid); - // note: this uid is never linked or connected to anything. - bpnode.AddProperty(FBX::SEPARATOR + "Pose"); // blank name - bpnode.AddProperty("BindPose"); - - bpnode.AddChild("Type", "BindPose"); - bpnode.AddChild("Version", int32_t(100)); - - aiNode* mesh_node = get_node_for_mesh(mi, mScene->mRootNode); - - // next get the whole skeleton for this mesh. - // we need it all to define the bindpose section. - // the FBX SDK will complain if it's missing, - // and also if parents of used bones don't have a subdeformer. - // order shouldn't matter. - std::set<aiNode*> skeleton; - for (size_t bi = 0; bi < mesh->mNumBones; ++bi) { - // bone node should have already been indexed - const aiBone* b = mesh->mBones[bi]; - const std::string bone_name(b->mName.C_Str()); - aiNode* parent = node_by_bone[bone_name]; - // insert all nodes down to the root or mesh node - while ( - parent - && parent != mScene->mRootNode - && parent != mesh_node - ) { - skeleton.insert(parent); - parent = parent->mParent; - } - } - - // number of pose nodes. includes one for the mesh itself. - bpnode.AddChild("NbPoseNodes", int32_t(1 + skeleton.size())); - - // the first pose node is always the mesh itself - FBX::Node pose("PoseNode"); - pose.AddChild("Node", mesh_uids[mi]); - aiMatrix4x4 mesh_node_xform = get_world_transform(mesh_node, mScene); - pose.AddChild("Matrix", mesh_node_xform); - bpnode.AddChild(pose); - - for (aiNode* bonenode : skeleton) { - // does this node have a uid yet? - int64_t node_uid; - auto node_uid_iter = node_uids.find(bonenode); - if (node_uid_iter != node_uids.end()) { - node_uid = node_uid_iter->second; - } else { - node_uid = generate_uid(); - node_uids[bonenode] = node_uid; - } - - // make a pose thingy - pose = FBX::Node("PoseNode"); - pose.AddChild("Node", node_uid); - aiMatrix4x4 node_xform = get_world_transform(bonenode, mScene); - pose.AddChild("Matrix", node_xform); - bpnode.AddChild(pose); - } - - // now write it - bpnode.Dump(outstream, binary, indent); - }*/ - - // TODO: cameras, lights - - // write nodes (i.e. model hierarchy) - // start at root node - WriteModelNodes( - outstream, mScene->mRootNode, 0, limbnodes - ); - - // animations - // - // in FBX there are: - // * AnimationStack - corresponds to an aiAnimation - // * AnimationLayer - a combinable animation component - // * AnimationCurveNode - links the property to be animated - // * AnimationCurve - defines animation data for a single property value - // - // the CurveNode also provides the default value for a property, - // such as the X, Y, Z coordinates for animatable translation. - // - // the Curve only specifies values for one component of the property, - // so there will be a separate AnimationCurve for X, Y, and Z. - // - // Assimp has: - // * aiAnimation - basically corresponds to an AnimationStack - // * aiNodeAnim - defines all animation for one aiNode - // * aiVectorKey/aiQuatKey - define the keyframe data for T/R/S - // - // assimp has no equivalent for AnimationLayer, - // and these are flattened on FBX import. - // we can assume there will be one per AnimationStack. - // - // the aiNodeAnim contains all animation data for a single aiNode, - // which will correspond to three AnimationCurveNode's: - // one each for translation, rotation and scale. - // The data for each of these will be put in 9 AnimationCurve's, - // T.X, T.Y, T.Z, R.X, R.Y, R.Z, etc. - - // AnimationStack / aiAnimation - std::vector<int64_t> animation_stack_uids(mScene->mNumAnimations); - for (size_t ai = 0; ai < mScene->mNumAnimations; ++ai) { - int64_t animstack_uid = generate_uid(); - animation_stack_uids[ai] = animstack_uid; - const aiAnimation* anim = mScene->mAnimations[ai]; - - FBX::Node asnode("AnimationStack"); - std::string name = anim->mName.C_Str() + FBX::SEPARATOR + "AnimStack"; - asnode.AddProperties(animstack_uid, name, ""); - FBX::Node p("Properties70"); - p.AddP70time("LocalStart", 0); // assimp doesn't store this - p.AddP70time("LocalStop", to_ktime(anim->mDuration, anim)); - p.AddP70time("ReferenceStart", 0); - p.AddP70time("ReferenceStop", to_ktime(anim->mDuration, anim)); - asnode.AddChild(p); - - // this node absurdly always pretends it has children - // (in this case it does, but just in case...) - asnode.force_has_children = true; - asnode.Dump(outstream, binary, indent); - - // note: animation stacks are not connected to anything - } - - // AnimationLayer - one per aiAnimation - std::vector<int64_t> animation_layer_uids(mScene->mNumAnimations); - for (size_t ai = 0; ai < mScene->mNumAnimations; ++ai) { - int64_t animlayer_uid = generate_uid(); - animation_layer_uids[ai] = animlayer_uid; - FBX::Node alnode("AnimationLayer"); - alnode.AddProperties(animlayer_uid, FBX::SEPARATOR + "AnimLayer", ""); - - // this node absurdly always pretends it has children - alnode.force_has_children = true; - alnode.Dump(outstream, binary, indent); - - // connect to the relevant animstack - connections.emplace_back( - "C", "OO", animlayer_uid, animation_stack_uids[ai] - ); - } - - // AnimCurveNode - three per aiNodeAnim - std::vector<std::vector<std::array<int64_t,3>>> curve_node_uids; - for (size_t ai = 0; ai < mScene->mNumAnimations; ++ai) { - const aiAnimation* anim = mScene->mAnimations[ai]; - const int64_t layer_uid = animation_layer_uids[ai]; - std::vector<std::array<int64_t,3>> nodeanim_uids; - for (size_t nai = 0; nai < anim->mNumChannels; ++nai) { - const aiNodeAnim* na = anim->mChannels[nai]; - // get the corresponding aiNode - const aiNode* node = mScene->mRootNode->FindNode(na->mNodeName); - // and its transform - const aiMatrix4x4 node_xfm = get_world_transform(node, mScene); - aiVector3D T, R, S; - node_xfm.Decompose(S, R, T); - - // AnimationCurveNode uids - std::array<int64_t,3> ids; - ids[0] = generate_uid(); // T - ids[1] = generate_uid(); // R - ids[2] = generate_uid(); // S - - // translation - WriteAnimationCurveNode(outstream, - ids[0], "T", T, "Lcl Translation", - layer_uid, node_uids[node] - ); - - // rotation - WriteAnimationCurveNode(outstream, - ids[1], "R", R, "Lcl Rotation", - layer_uid, node_uids[node] - ); - - // scale - WriteAnimationCurveNode(outstream, - ids[2], "S", S, "Lcl Scale", - layer_uid, node_uids[node] - ); - - // store the uids for later use - nodeanim_uids.push_back(ids); - } - curve_node_uids.push_back(nodeanim_uids); - } - - // AnimCurve - defines actual keyframe data. - // there's a separate curve for every component of every vector, - // for example a transform curvenode will have separate X/Y/Z AnimCurve's - for (size_t ai = 0; ai < mScene->mNumAnimations; ++ai) { - const aiAnimation* anim = mScene->mAnimations[ai]; - for (size_t nai = 0; nai < anim->mNumChannels; ++nai) { - const aiNodeAnim* na = anim->mChannels[nai]; - // get the corresponding aiNode - const aiNode* node = mScene->mRootNode->FindNode(na->mNodeName); - // and its transform - const aiMatrix4x4 node_xfm = get_world_transform(node, mScene); - aiVector3D T, R, S; - node_xfm.Decompose(S, R, T); - const std::array<int64_t,3>& ids = curve_node_uids[ai][nai]; - - std::vector<int64_t> times; - std::vector<float> xval, yval, zval; - - // position/translation - for (size_t ki = 0; ki < na->mNumPositionKeys; ++ki) { - const aiVectorKey& k = na->mPositionKeys[ki]; - times.push_back(to_ktime(k.mTime)); - xval.push_back(k.mValue.x); - yval.push_back(k.mValue.y); - zval.push_back(k.mValue.z); - } - // one curve each for X, Y, Z - WriteAnimationCurve(outstream, T.x, times, xval, ids[0], "d|X"); - WriteAnimationCurve(outstream, T.y, times, yval, ids[0], "d|Y"); - WriteAnimationCurve(outstream, T.z, times, zval, ids[0], "d|Z"); - - // rotation - times.clear(); xval.clear(); yval.clear(); zval.clear(); - for (size_t ki = 0; ki < na->mNumRotationKeys; ++ki) { - const aiQuatKey& k = na->mRotationKeys[ki]; - times.push_back(to_ktime(k.mTime)); - // TODO: aiQuaternion method to convert to Euler... - aiMatrix4x4 m(k.mValue.GetMatrix()); - aiVector3D qs, qr, qt; - m.Decompose(qs, qr, qt); - qr *= DEG; - xval.push_back(qr.x); - yval.push_back(qr.y); - zval.push_back(qr.z); - } - WriteAnimationCurve(outstream, R.x, times, xval, ids[1], "d|X"); - WriteAnimationCurve(outstream, R.y, times, yval, ids[1], "d|Y"); - WriteAnimationCurve(outstream, R.z, times, zval, ids[1], "d|Z"); - - // scaling/scale - times.clear(); xval.clear(); yval.clear(); zval.clear(); - for (size_t ki = 0; ki < na->mNumScalingKeys; ++ki) { - const aiVectorKey& k = na->mScalingKeys[ki]; - times.push_back(to_ktime(k.mTime)); - xval.push_back(k.mValue.x); - yval.push_back(k.mValue.y); - zval.push_back(k.mValue.z); - } - WriteAnimationCurve(outstream, S.x, times, xval, ids[2], "d|X"); - WriteAnimationCurve(outstream, S.y, times, yval, ids[2], "d|Y"); - WriteAnimationCurve(outstream, S.z, times, zval, ids[2], "d|Z"); - } - } - - indent = 0; - object_node.End(outstream, binary, indent, true); -} - -// convenience map of magic node name strings to FBX properties, -// including the expected type of transform. -const std::map<std::string,std::pair<std::string,char>> transform_types = { - {"Translation", {"Lcl Translation", 't'}}, - {"RotationOffset", {"RotationOffset", 't'}}, - {"RotationPivot", {"RotationPivot", 't'}}, - {"PreRotation", {"PreRotation", 'r'}}, - {"Rotation", {"Lcl Rotation", 'r'}}, - {"PostRotation", {"PostRotation", 'r'}}, - {"RotationPivotInverse", {"RotationPivotInverse", 'i'}}, - {"ScalingOffset", {"ScalingOffset", 't'}}, - {"ScalingPivot", {"ScalingPivot", 't'}}, - {"Scaling", {"Lcl Scaling", 's'}}, - {"ScalingPivotInverse", {"ScalingPivotInverse", 'i'}}, - {"GeometricScaling", {"GeometricScaling", 's'}}, - {"GeometricRotation", {"GeometricRotation", 'r'}}, - {"GeometricTranslation", {"GeometricTranslation", 't'}}, - {"GeometricTranslationInverse", {"GeometricTranslationInverse", 'i'}}, - {"GeometricRotationInverse", {"GeometricRotationInverse", 'i'}}, - {"GeometricScalingInverse", {"GeometricScalingInverse", 'i'}} -}; - -// write a single model node to the stream -void FBXExporter::WriteModelNode( - StreamWriterLE& outstream, - bool binary, - const aiNode* node, - int64_t node_uid, - const std::string& type, - const std::vector<std::pair<std::string,aiVector3D>>& transform_chain, - TransformInheritance inherit_type -){ - const aiVector3D zero = {0, 0, 0}; - const aiVector3D one = {1, 1, 1}; - FBX::Node m("Model"); - std::string name = node->mName.C_Str() + FBX::SEPARATOR + "Model"; - m.AddProperties(node_uid, name, type); - m.AddChild("Version", int32_t(232)); - FBX::Node p("Properties70"); - p.AddP70bool("RotationActive", 1); - p.AddP70int("DefaultAttributeIndex", 0); - p.AddP70enum("InheritType", inherit_type); - if (transform_chain.empty()) { - // decompose 4x4 transform matrix into TRS - aiVector3D t, r, s; - node->mTransformation.Decompose(s, r, t); - if (t != zero) { - p.AddP70( - "Lcl Translation", "Lcl Translation", "", "A", - double(t.x), double(t.y), double(t.z) - ); - } - if (r != zero) { - p.AddP70( - "Lcl Rotation", "Lcl Rotation", "", "A", - double(DEG*r.x), double(DEG*r.y), double(DEG*r.z) - ); - } - if (s != one) { - p.AddP70( - "Lcl Scaling", "Lcl Scaling", "", "A", - double(s.x), double(s.y), double(s.z) - ); - } - } else { - // apply the transformation chain. - // these transformation elements are created when importing FBX, - // which has a complex transformation hierarchy for each node. - // as such we can bake the hierarchy back into the node on export. - for (auto &item : transform_chain) { - auto elem = transform_types.find(item.first); - if (elem == transform_types.end()) { - // then this is a bug - std::stringstream err; - err << "unrecognized FBX transformation type: "; - err << item.first; - throw DeadlyExportError(err.str()); - } - const std::string &name = elem->second.first; - const aiVector3D &v = item.second; - if (name.compare(0, 4, "Lcl ") == 0) { - // special handling for animatable properties - p.AddP70( - name, name, "", "A", - double(v.x), double(v.y), double(v.z) - ); - } else { - p.AddP70vector(name, v.x, v.y, v.z); - } - } - } - m.AddChild(p); - - // not sure what these are for, - // but they seem to be omnipresent - m.AddChild("Shading", FBXExportProperty(true)); - m.AddChild("Culling", FBXExportProperty("CullingOff")); - - m.Dump(outstream, binary, 1); -} - -// wrapper for WriteModelNodes to create and pass a blank transform chain -void FBXExporter::WriteModelNodes( - StreamWriterLE& s, - const aiNode* node, - int64_t parent_uid, - const std::unordered_set<const aiNode*>& limbnodes -) { - std::vector<std::pair<std::string,aiVector3D>> chain; - WriteModelNodes(s, node, parent_uid, limbnodes, chain); -} - -void FBXExporter::WriteModelNodes( - StreamWriterLE& outstream, - const aiNode* node, - int64_t parent_uid, - const std::unordered_set<const aiNode*>& limbnodes, - std::vector<std::pair<std::string,aiVector3D>>& transform_chain -) { - // first collapse any expanded transformation chains created by FBX import. - std::string node_name(node->mName.C_Str()); - if (node_name.find(MAGIC_NODE_TAG) != std::string::npos) { - auto pos = node_name.find(MAGIC_NODE_TAG) + MAGIC_NODE_TAG.size() + 1; - std::string type_name = node_name.substr(pos); - auto elem = transform_types.find(type_name); - if (elem == transform_types.end()) { - // then this is a bug and should be fixed - std::stringstream err; - err << "unrecognized FBX transformation node"; - err << " of type " << type_name << " in node " << node_name; - throw DeadlyExportError(err.str()); - } - aiVector3D t, r, s; - node->mTransformation.Decompose(s, r, t); - switch (elem->second.second) { - case 'i': // inverse - // we don't need to worry about the inverse matrices - break; - case 't': // translation - transform_chain.emplace_back(elem->first, t); - break; - case 'r': // rotation - r *= float(DEG); - transform_chain.emplace_back(elem->first, r); - break; - case 's': // scale - transform_chain.emplace_back(elem->first, s); - break; - default: - // this should never happen - std::stringstream err; - err << "unrecognized FBX transformation type code: "; - err << elem->second.second; - throw DeadlyExportError(err.str()); - } - // now continue on to any child nodes - for (unsigned i = 0; i < node->mNumChildren; ++i) { - WriteModelNodes( - outstream, - node->mChildren[i], - parent_uid, - limbnodes, - transform_chain - ); - } - return; - } - - int64_t node_uid = 0; - // generate uid and connect to parent, if not the root node, - if (node != mScene->mRootNode) { - auto elem = node_uids.find(node); - if (elem != node_uids.end()) { - node_uid = elem->second; - } else { - node_uid = generate_uid(); - node_uids[node] = node_uid; - } - connections.emplace_back("C", "OO", node_uid, parent_uid); - } - - // what type of node is this? - if (node == mScene->mRootNode) { - // handled later - } else if (node->mNumMeshes == 1) { - // connect to child mesh, which should have been written previously - connections.emplace_back( - "C", "OO", mesh_uids[node->mMeshes[0]], node_uid - ); - // also connect to the material for the child mesh - connections.emplace_back( - "C", "OO", - material_uids[mScene->mMeshes[node->mMeshes[0]]->mMaterialIndex], - node_uid - ); - // write model node - WriteModelNode( - outstream, binary, node, node_uid, "Mesh", transform_chain - ); - } else if (limbnodes.count(node)) { - WriteModelNode( - outstream, binary, node, node_uid, "LimbNode", transform_chain - ); - // we also need to write a nodeattribute to mark it as a skeleton - int64_t node_attribute_uid = generate_uid(); - FBX::Node na("NodeAttribute"); - na.AddProperties( - node_attribute_uid, FBX::SEPARATOR + "NodeAttribute", "LimbNode" - ); - na.AddChild("TypeFlags", FBXExportProperty("Skeleton")); - na.Dump(outstream, binary, 1); - // and connect them - connections.emplace_back("C", "OO", node_attribute_uid, node_uid); - } else { - // generate a null node so we can add children to it - WriteModelNode( - outstream, binary, node, node_uid, "Null", transform_chain - ); - } - - // if more than one child mesh, make nodes for each mesh - if (node->mNumMeshes > 1 || node == mScene->mRootNode) { - for (size_t i = 0; i < node->mNumMeshes; ++i) { - // make a new model node - int64_t new_node_uid = generate_uid(); - // connect to parent node - connections.emplace_back("C", "OO", new_node_uid, node_uid); - // connect to child mesh, which should have been written previously - connections.emplace_back( - "C", "OO", mesh_uids[node->mMeshes[i]], new_node_uid - ); - // also connect to the material for the child mesh - connections.emplace_back( - "C", "OO", - material_uids[ - mScene->mMeshes[node->mMeshes[i]]->mMaterialIndex - ], - new_node_uid - ); - // write model node - FBX::Node m("Model"); - // take name from mesh name, if it exists - std::string name = mScene->mMeshes[node->mMeshes[i]]->mName.C_Str(); - name += FBX::SEPARATOR + "Model"; - m.AddProperties(new_node_uid, name, "Mesh"); - m.AddChild("Version", int32_t(232)); - FBX::Node p("Properties70"); - p.AddP70enum("InheritType", 1); - m.AddChild(p); - m.Dump(outstream, binary, 1); - } - } - - // now recurse into children - for (size_t i = 0; i < node->mNumChildren; ++i) { - WriteModelNodes( - outstream, node->mChildren[i], node_uid, limbnodes - ); - } -} - - -void FBXExporter::WriteAnimationCurveNode( - StreamWriterLE& outstream, - int64_t uid, - const std::string& name, // "T", "R", or "S" - aiVector3D default_value, - std::string property_name, // "Lcl Translation" etc - int64_t layer_uid, - int64_t node_uid -) { - FBX::Node n("AnimationCurveNode"); - n.AddProperties(uid, name + FBX::SEPARATOR + "AnimCurveNode", ""); - FBX::Node p("Properties70"); - p.AddP70numberA("d|X", default_value.x); - p.AddP70numberA("d|Y", default_value.y); - p.AddP70numberA("d|Z", default_value.z); - n.AddChild(p); - n.Dump(outstream, binary, 1); - // connect to layer - this->connections.emplace_back("C", "OO", uid, layer_uid); - // connect to bone - this->connections.emplace_back("C", "OP", uid, node_uid, property_name); -} - - -void FBXExporter::WriteAnimationCurve( - StreamWriterLE& outstream, - double default_value, - const std::vector<int64_t>& times, - const std::vector<float>& values, - int64_t curvenode_uid, - const std::string& property_link // "d|X", "d|Y", etc -) { - FBX::Node n("AnimationCurve"); - int64_t curve_uid = generate_uid(); - n.AddProperties(curve_uid, FBX::SEPARATOR + "AnimCurve", ""); - n.AddChild("Default", default_value); - n.AddChild("KeyVer", int32_t(4009)); - n.AddChild("KeyTime", times); - n.AddChild("KeyValueFloat", values); - // TODO: keyattr flags and data (STUB for now) - n.AddChild("KeyAttrFlags", std::vector<int32_t>{0}); - n.AddChild("KeyAttrDataFloat", std::vector<float>{0,0,0,0}); - n.AddChild( - "KeyAttrRefCount", - std::vector<int32_t>{static_cast<int32_t>(times.size())} - ); - n.Dump(outstream, binary, 1); - this->connections.emplace_back( - "C", "OP", curve_uid, curvenode_uid, property_link - ); -} - - -void FBXExporter::WriteConnections () -{ - // we should have completed the connection graph already, - // so basically just dump it here - if (!binary) { - WriteAsciiSectionHeader("Object connections"); - } - // TODO: comments with names in the ascii version - FBX::Node conn("Connections"); - StreamWriterLE outstream(outfile); - conn.Begin(outstream, binary, 0); - conn.BeginChildren(outstream, binary, 0); - for (auto &n : connections) { - n.Dump(outstream, binary, 1); - } - conn.End(outstream, binary, 0, !connections.empty()); - connections.clear(); -} - -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER -#endif // ASSIMP_BUILD_NO_EXPORT diff --git a/thirdparty/assimp/code/FBX/FBXExporter.h b/thirdparty/assimp/code/FBX/FBXExporter.h deleted file mode 100644 index 1ae727eda9..0000000000 --- a/thirdparty/assimp/code/FBX/FBXExporter.h +++ /dev/null @@ -1,178 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXExporter.h -* Declares the exporter class to write a scene to an fbx file -*/ -#ifndef AI_FBXEXPORTER_H_INC -#define AI_FBXEXPORTER_H_INC - -#ifndef ASSIMP_BUILD_NO_FBX_EXPORTER - -#include "FBXExportNode.h" // FBX::Node -#include "FBXCommon.h" // FBX::TransformInheritance - -#include <assimp/types.h> -//#include <assimp/material.h> -#include <assimp/StreamWriter.h> // StreamWriterLE -#include <assimp/Exceptional.h> // DeadlyExportError - -#include <vector> -#include <map> -#include <unordered_set> -#include <memory> // shared_ptr -#include <sstream> // stringstream - -struct aiScene; -struct aiNode; -//struct aiMaterial; - -namespace Assimp -{ - class IOSystem; - class IOStream; - class ExportProperties; - - // --------------------------------------------------------------------- - /** Helper class to export a given scene to an FBX file. */ - // --------------------------------------------------------------------- - class FBXExporter - { - public: - /// Constructor for a specific scene to export - FBXExporter(const aiScene* pScene, const ExportProperties* pProperties); - - // call one of these methods to export - void ExportBinary(const char* pFile, IOSystem* pIOSystem); - void ExportAscii(const char* pFile, IOSystem* pIOSystem); - - private: - bool binary; // whether current export is in binary or ascii format - const aiScene* mScene; // the scene to export - const ExportProperties* mProperties; // currently unused - std::shared_ptr<IOStream> outfile; // file to write to - - std::vector<FBX::Node> connections; // connection storage - - std::vector<int64_t> mesh_uids; - std::vector<int64_t> material_uids; - std::map<const aiNode*,int64_t> node_uids; - - // this crude unique-ID system is actually fine - int64_t last_uid = 999999; - int64_t generate_uid() { return ++last_uid; } - - // binary files have a specific header and footer, - // in addition to the actual data - void WriteBinaryHeader(); - void WriteBinaryFooter(); - - // ascii files have a comment at the top - void WriteAsciiHeader(); - - // WriteAllNodes does the actual export. - // It just calls all the Write<Section> methods below in order. - void WriteAllNodes(); - - // Methods to write individual sections. - // The order here matches the order inside an FBX file. - // Each method corresponds to a top-level FBX section, - // except WriteHeader which also includes some binary-only sections - // and WriteFooter which is binary data only. - void WriteHeaderExtension(); - // WriteFileId(); // binary-only, included in WriteHeader - // WriteCreationTime(); // binary-only, included in WriteHeader - // WriteCreator(); // binary-only, included in WriteHeader - void WriteGlobalSettings(); - void WriteDocuments(); - void WriteReferences(); - void WriteDefinitions(); - void WriteObjects(); - void WriteConnections(); - // WriteTakes(); // deprecated since at least 2015 (fbx 7.4) - - // helpers - void WriteAsciiSectionHeader(const std::string& title); - void WriteModelNodes( - Assimp::StreamWriterLE& s, - const aiNode* node, - int64_t parent_uid, - const std::unordered_set<const aiNode*>& limbnodes - ); - void WriteModelNodes( // usually don't call this directly - StreamWriterLE& s, - const aiNode* node, - int64_t parent_uid, - const std::unordered_set<const aiNode*>& limbnodes, - std::vector<std::pair<std::string,aiVector3D>>& transform_chain - ); - void WriteModelNode( // nor this - StreamWriterLE& s, - bool binary, - const aiNode* node, - int64_t node_uid, - const std::string& type, - const std::vector<std::pair<std::string,aiVector3D>>& xfm_chain, - FBX::TransformInheritance ti_type=FBX::TransformInheritance_RSrs - ); - void WriteAnimationCurveNode( - StreamWriterLE& outstream, - int64_t uid, - const std::string& name, // "T", "R", or "S" - aiVector3D default_value, - std::string property_name, // "Lcl Translation" etc - int64_t animation_layer_uid, - int64_t node_uid - ); - void WriteAnimationCurve( - StreamWriterLE& outstream, - double default_value, - const std::vector<int64_t>& times, - const std::vector<float>& values, - int64_t curvenode_id, - const std::string& property_link // "d|X", "d|Y", etc - ); - }; -} - -#endif // ASSIMP_BUILD_NO_FBX_EXPORTER - -#endif // AI_FBXEXPORTER_H_INC diff --git a/thirdparty/assimp/code/FBX/FBXImportSettings.h b/thirdparty/assimp/code/FBX/FBXImportSettings.h deleted file mode 100644 index 1a4c80f8b2..0000000000 --- a/thirdparty/assimp/code/FBX/FBXImportSettings.h +++ /dev/null @@ -1,164 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXImportSettings.h - * @brief FBX importer runtime configuration - */ -#ifndef INCLUDED_AI_FBX_IMPORTSETTINGS_H -#define INCLUDED_AI_FBX_IMPORTSETTINGS_H - -namespace Assimp { -namespace FBX { - -/** FBX import settings, parts of which are publicly accessible via their corresponding AI_CONFIG constants */ -struct ImportSettings -{ - ImportSettings() - : strictMode(true) - , readAllLayers(true) - , readAllMaterials(false) - , readMaterials(true) - , readTextures(true) - , readCameras(true) - , readLights(true) - , readAnimations(true) - , readWeights(true) - , preservePivots(true) - , optimizeEmptyAnimationCurves(true) - , useLegacyEmbeddedTextureNaming(false) - , removeEmptyBones( true ) - , convertToMeters( false ) { - // empty - } - - - /** enable strict mode: - * - only accept fbx 2012, 2013 files - * - on the slightest error, give up. - * - * Basically, strict mode means that the fbx file will actually - * be validated. Strict mode is off by default. */ - bool strictMode; - - /** specifies whether all geometry layers are read and scanned for - * usable data channels. The FBX spec indicates that many readers - * will only read the first channel and that this is in some way - * the recommended way- in reality, however, it happens a lot that - * vertex data is spread among multiple layers. The default - * value for this option is true.*/ - bool readAllLayers; - - /** specifies whether all materials are read, or only those that - * are referenced by at least one mesh. Reading all materials - * may make FBX reading a lot slower since all objects - * need to be processed . - * This bit is ignored unless readMaterials=true*/ - bool readAllMaterials; - - - /** import materials (true) or skip them and assign a default - * material. The default value is true.*/ - bool readMaterials; - - /** import embedded textures? Default value is true.*/ - bool readTextures; - - /** import cameras? Default value is true.*/ - bool readCameras; - - /** import light sources? Default value is true.*/ - bool readLights; - - /** import animations (i.e. animation curves, the node - * skeleton is always imported). Default value is true. */ - bool readAnimations; - - /** read bones (vertex weights and deform info). - * Default value is true. */ - bool readWeights; - - /** preserve transformation pivots and offsets. Since these can - * not directly be represented in assimp, additional dummy - * nodes will be generated. Note that settings this to false - * can make animation import a lot slower. The default value - * is true. - * - * The naming scheme for the generated nodes is: - * <OriginalName>_$AssimpFbx$_<TransformName> - * - * where <TransformName> is one of - * RotationPivot - * RotationOffset - * PreRotation - * PostRotation - * ScalingPivot - * ScalingOffset - * Translation - * Scaling - * Rotation - **/ - bool preservePivots; - - /** do not import animation curves that specify a constant - * values matching the corresponding node transformation. - * The default value is true. */ - bool optimizeEmptyAnimationCurves; - - /** use legacy naming for embedded textures eg: (*0, *1, *2) - */ - bool useLegacyEmbeddedTextureNaming; - - /** Empty bones shall be removed - */ - bool removeEmptyBones; - - /** Set to true to perform a conversion from cm to meter after the import - */ - bool convertToMeters; -}; - - -} // !FBX -} // !Assimp - -#endif - diff --git a/thirdparty/assimp/code/FBX/FBXImporter.cpp b/thirdparty/assimp/code/FBX/FBXImporter.cpp deleted file mode 100644 index afcc1ddc78..0000000000 --- a/thirdparty/assimp/code/FBX/FBXImporter.cpp +++ /dev/null @@ -1,198 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. -r -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXImporter.cpp - * @brief Implementation of the FBX importer. - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXImporter.h" - -#include "FBXConverter.h" -#include "FBXDocument.h" -#include "FBXParser.h" -#include "FBXTokenizer.h" -#include "FBXUtil.h" - -#include <assimp/MemoryIOWrapper.h> -#include <assimp/StreamReader.h> -#include <assimp/importerdesc.h> -#include <assimp/Importer.hpp> - -namespace Assimp { - -template <> -const char *LogFunctions<FBXImporter>::Prefix() { - static auto prefix = "FBX: "; - return prefix; -} - -} // namespace Assimp - -using namespace Assimp; -using namespace Assimp::Formatter; -using namespace Assimp::FBX; - -namespace { - -static const aiImporterDesc desc = { - "Autodesk FBX Importer", - "", - "", - "", - aiImporterFlags_SupportTextFlavour, - 0, - 0, - 0, - 0, - "fbx" -}; -} - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by #Importer -FBXImporter::FBXImporter() { -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FBXImporter::~FBXImporter() { -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the class can handle the format of the given file. -bool FBXImporter::CanRead(const std::string &pFile, IOSystem *pIOHandler, bool checkSig) const { - const std::string &extension = GetExtension(pFile); - if (extension == std::string(desc.mFileExtensions)) { - return true; - } - - else if ((!extension.length() || checkSig) && pIOHandler) { - // at least ASCII-FBX files usually have a 'FBX' somewhere in their head - const char *tokens[] = { "fbx" }; - return SearchFileHeaderForToken(pIOHandler, pFile, tokens, 1); - } - return false; -} - -// ------------------------------------------------------------------------------------------------ -// List all extensions handled by this loader -const aiImporterDesc *FBXImporter::GetInfo() const { - return &desc; -} - -// ------------------------------------------------------------------------------------------------ -// Setup configuration properties for the loader -void FBXImporter::SetupProperties(const Importer *pImp) { - settings.readAllLayers = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_ALL_GEOMETRY_LAYERS, true); - settings.readAllMaterials = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_ALL_MATERIALS, false); - settings.readMaterials = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_MATERIALS, true); - settings.readTextures = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_TEXTURES, true); - settings.readCameras = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_CAMERAS, true); - settings.readLights = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_LIGHTS, true); - settings.readAnimations = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_READ_ANIMATIONS, true); - settings.strictMode = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_STRICT_MODE, false); - settings.preservePivots = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_PRESERVE_PIVOTS, true); - settings.optimizeEmptyAnimationCurves = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_OPTIMIZE_EMPTY_ANIMATION_CURVES, true); - settings.useLegacyEmbeddedTextureNaming = pImp->GetPropertyBool(AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING, false); - settings.removeEmptyBones = pImp->GetPropertyBool(AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES, true); - settings.convertToMeters = pImp->GetPropertyBool(AI_CONFIG_FBX_CONVERT_TO_M, false); -} - -// ------------------------------------------------------------------------------------------------ -// Imports the given file into the given scene structure. -void FBXImporter::InternReadFile(const std::string &pFile, aiScene *pScene, IOSystem *pIOHandler) { - std::unique_ptr<IOStream> stream(pIOHandler->Open(pFile, "rb")); - if (!stream) { - ThrowException("Could not open file for reading"); - } - - // read entire file into memory - no streaming for this, fbx - // files can grow large, but the assimp output data structure - // then becomes very large, too. Assimp doesn't support - // streaming for its output data structures so the net win with - // streaming input data would be very low. - std::vector<char> contents; - contents.resize(stream->FileSize() + 1); - stream->Read(&*contents.begin(), 1, contents.size() - 1); - contents[contents.size() - 1] = 0; - const char *const begin = &*contents.begin(); - - // broadphase tokenizing pass in which we identify the core - // syntax elements of FBX (brackets, commas, key:value mappings) - TokenList tokens; - try { - - bool is_binary = false; - if (!strncmp(begin, "Kaydara FBX Binary", 18)) { - is_binary = true; - TokenizeBinary(tokens, begin, contents.size()); - } else { - Tokenize(tokens, begin); - } - - // use this information to construct a very rudimentary - // parse-tree representing the FBX scope structure - Parser parser(tokens, is_binary); - - // take the raw parse-tree and convert it to a FBX DOM - Document doc(parser, settings); - - // convert the FBX DOM to aiScene - ConvertToAssimpScene(pScene, doc, settings.removeEmptyBones); - - // size relative to cm - float size_relative_to_cm = doc.GlobalSettings().UnitScaleFactor(); - - // Set FBX file scale is relative to CM must be converted to M for - // assimp universal format (M) - SetFileScale(size_relative_to_cm * 0.01f); - - std::for_each(tokens.begin(), tokens.end(), Util::delete_fun<Token>()); - } catch (std::exception &) { - std::for_each(tokens.begin(), tokens.end(), Util::delete_fun<Token>()); - throw; - } -} - -#endif // !ASSIMP_BUILD_NO_FBX_IMPORTER diff --git a/thirdparty/assimp/code/FBX/FBXImporter.h b/thirdparty/assimp/code/FBX/FBXImporter.h deleted file mode 100644 index c365b2cddf..0000000000 --- a/thirdparty/assimp/code/FBX/FBXImporter.h +++ /dev/null @@ -1,100 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXImporter.h - * @brief Declaration of the FBX main importer class - */ -#ifndef INCLUDED_AI_FBX_IMPORTER_H -#define INCLUDED_AI_FBX_IMPORTER_H - -#include <assimp/BaseImporter.h> -#include <assimp/LogAux.h> - -#include "FBXImportSettings.h" - -namespace Assimp { - -// TinyFormatter.h -namespace Formatter { - template <typename T,typename TR, typename A> class basic_formatter; - typedef class basic_formatter< char, std::char_traits<char>, std::allocator<char> > format; -} - -// ------------------------------------------------------------------------------------------- -/** Load the Autodesk FBX file format. - - See http://en.wikipedia.org/wiki/FBX -*/ -// ------------------------------------------------------------------------------------------- -class FBXImporter : public BaseImporter, public LogFunctions<FBXImporter> -{ -public: - FBXImporter(); - virtual ~FBXImporter(); - - // -------------------- - bool CanRead( const std::string& pFile, - IOSystem* pIOHandler, - bool checkSig - ) const; - -protected: - - // -------------------- - const aiImporterDesc* GetInfo () const; - - // -------------------- - void SetupProperties(const Importer* pImp); - - // -------------------- - void InternReadFile( const std::string& pFile, - aiScene* pScene, - IOSystem* pIOHandler - ); - -private: - FBX::ImportSettings settings; -}; // !class FBXImporter - -} // end of namespace Assimp -#endif // !INCLUDED_AI_FBX_IMPORTER_H - diff --git a/thirdparty/assimp/code/FBX/FBXMaterial.cpp b/thirdparty/assimp/code/FBX/FBXMaterial.cpp deleted file mode 100644 index f43a8b84b0..0000000000 --- a/thirdparty/assimp/code/FBX/FBXMaterial.cpp +++ /dev/null @@ -1,405 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXMaterial.cpp - * @brief Assimp::FBX::Material and Assimp::FBX::Texture implementation - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXImporter.h" -#include "FBXImportSettings.h" -#include "FBXDocumentUtil.h" -#include "FBXProperties.h" -#include <assimp/ByteSwapper.h> - -#include <algorithm> // std::transform -#include "FBXUtil.h" - -namespace Assimp { -namespace FBX { - - using namespace Util; - -// ------------------------------------------------------------------------------------------------ -Material::Material(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -{ - const Scope& sc = GetRequiredScope(element); - - const Element* const ShadingModel = sc["ShadingModel"]; - const Element* const MultiLayer = sc["MultiLayer"]; - - if(MultiLayer) { - multilayer = !!ParseTokenAsInt(GetRequiredToken(*MultiLayer,0)); - } - - if(ShadingModel) { - shading = ParseTokenAsString(GetRequiredToken(*ShadingModel,0)); - } - else { - DOMWarning("shading mode not specified, assuming phong",&element); - shading = "phong"; - } - - std::string templateName; - - // lower-case shading because Blender (for example) writes "Phong" - std::transform(shading.begin(), shading.end(), shading.begin(), ::tolower); - if(shading == "phong") { - templateName = "Material.FbxSurfacePhong"; - } - else if(shading == "lambert") { - templateName = "Material.FbxSurfaceLambert"; - } - else { - DOMWarning("shading mode not recognized: " + shading,&element); - } - - props = GetPropertyTable(doc,templateName,element,sc); - - // resolve texture links - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID()); - for(const Connection* con : conns) { - - // texture link to properties, not objects - if (!con->PropertyName().length()) { - continue; - } - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for texture link, ignoring",&element); - continue; - } - - const Texture* const tex = dynamic_cast<const Texture*>(ob); - if(!tex) { - const LayeredTexture* const layeredTexture = dynamic_cast<const LayeredTexture*>(ob); - if(!layeredTexture) { - DOMWarning("source object for texture link is not a texture or layered texture, ignoring",&element); - continue; - } - const std::string& prop = con->PropertyName(); - if (layeredTextures.find(prop) != layeredTextures.end()) { - DOMWarning("duplicate layered texture link: " + prop,&element); - } - - layeredTextures[prop] = layeredTexture; - ((LayeredTexture*)layeredTexture)->fillTexture(doc); - } - else - { - const std::string& prop = con->PropertyName(); - if (textures.find(prop) != textures.end()) { - DOMWarning("duplicate texture link: " + prop,&element); - } - - textures[prop] = tex; - } - - } -} - - -// ------------------------------------------------------------------------------------------------ -Material::~Material() -{ -} - - -// ------------------------------------------------------------------------------------------------ -Texture::Texture(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -, uvScaling(1.0f,1.0f) -, media(0) -{ - const Scope& sc = GetRequiredScope(element); - - const Element* const Type = sc["Type"]; - const Element* const FileName = sc["FileName"]; - const Element* const RelativeFilename = sc["RelativeFilename"]; - const Element* const ModelUVTranslation = sc["ModelUVTranslation"]; - const Element* const ModelUVScaling = sc["ModelUVScaling"]; - const Element* const Texture_Alpha_Source = sc["Texture_Alpha_Source"]; - const Element* const Cropping = sc["Cropping"]; - - if(Type) { - type = ParseTokenAsString(GetRequiredToken(*Type,0)); - } - - if(FileName) { - fileName = ParseTokenAsString(GetRequiredToken(*FileName,0)); - } - - if(RelativeFilename) { - relativeFileName = ParseTokenAsString(GetRequiredToken(*RelativeFilename,0)); - } - - if(ModelUVTranslation) { - uvTrans = aiVector2D(ParseTokenAsFloat(GetRequiredToken(*ModelUVTranslation,0)), - ParseTokenAsFloat(GetRequiredToken(*ModelUVTranslation,1)) - ); - } - - if(ModelUVScaling) { - uvScaling = aiVector2D(ParseTokenAsFloat(GetRequiredToken(*ModelUVScaling,0)), - ParseTokenAsFloat(GetRequiredToken(*ModelUVScaling,1)) - ); - } - - if(Cropping) { - crop[0] = ParseTokenAsInt(GetRequiredToken(*Cropping,0)); - crop[1] = ParseTokenAsInt(GetRequiredToken(*Cropping,1)); - crop[2] = ParseTokenAsInt(GetRequiredToken(*Cropping,2)); - crop[3] = ParseTokenAsInt(GetRequiredToken(*Cropping,3)); - } - else { - // vc8 doesn't support the crop() syntax in initialization lists - // (and vc9 WARNS about the new (i.e. compliant) behaviour). - crop[0] = crop[1] = crop[2] = crop[3] = 0; - } - - if(Texture_Alpha_Source) { - alphaSource = ParseTokenAsString(GetRequiredToken(*Texture_Alpha_Source,0)); - } - - props = GetPropertyTable(doc,"Texture.FbxFileTexture",element,sc); - - // 3DS Max and FBX SDK use "Scaling" and "Translation" instead of "ModelUVScaling" and "ModelUVTranslation". Use these properties if available. - bool ok; - const aiVector3D& scaling = PropertyGet<aiVector3D>(*props, "Scaling", ok); - if (ok) { - uvScaling.x = scaling.x; - uvScaling.y = scaling.y; - } - - const aiVector3D& trans = PropertyGet<aiVector3D>(*props, "Translation", ok); - if (ok) { - uvTrans.x = trans.x; - uvTrans.y = trans.y; - } - - // resolve video links - if(doc.Settings().readTextures) { - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID()); - for(const Connection* con : conns) { - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for texture link, ignoring",&element); - continue; - } - - const Video* const video = dynamic_cast<const Video*>(ob); - if(video) { - media = video; - } - } - } -} - - -Texture::~Texture() -{ - -} - -LayeredTexture::LayeredTexture(uint64_t id, const Element& element, const Document& /*doc*/, const std::string& name) -: Object(id,element,name) -,blendMode(BlendMode_Modulate) -,alpha(1) -{ - const Scope& sc = GetRequiredScope(element); - - const Element* const BlendModes = sc["BlendModes"]; - const Element* const Alphas = sc["Alphas"]; - - - if(BlendModes!=0) - { - blendMode = (BlendMode)ParseTokenAsInt(GetRequiredToken(*BlendModes,0)); - } - if(Alphas!=0) - { - alpha = ParseTokenAsFloat(GetRequiredToken(*Alphas,0)); - } -} - -LayeredTexture::~LayeredTexture() -{ - -} - -void LayeredTexture::fillTexture(const Document& doc) -{ - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID()); - for(size_t i = 0; i < conns.size();++i) - { - const Connection* con = conns.at(i); - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for texture link, ignoring",&element); - continue; - } - - const Texture* const tex = dynamic_cast<const Texture*>(ob); - - textures.push_back(tex); - } -} - - -// ------------------------------------------------------------------------------------------------ -Video::Video(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -, contentLength(0) -, content(0) -{ - const Scope& sc = GetRequiredScope(element); - - const Element* const Type = sc["Type"]; - const Element* const FileName = sc.FindElementCaseInsensitive("FileName"); //some files retain the information as "Filename", others "FileName", who knows - const Element* const RelativeFilename = sc["RelativeFilename"]; - const Element* const Content = sc["Content"]; - - if(Type) { - type = ParseTokenAsString(GetRequiredToken(*Type,0)); - } - - if(FileName) { - fileName = ParseTokenAsString(GetRequiredToken(*FileName,0)); - } - - if(RelativeFilename) { - relativeFileName = ParseTokenAsString(GetRequiredToken(*RelativeFilename,0)); - } - - if(Content && !Content->Tokens().empty()) { - //this field is omitted when the embedded texture is already loaded, let's ignore if it's not found - try { - const Token& token = GetRequiredToken(*Content, 0); - const char* data = token.begin(); - if (!token.IsBinary()) { - if (*data != '"') { - DOMError("embedded content is not surrounded by quotation marks", &element); - } - else { - size_t targetLength = 0; - auto numTokens = Content->Tokens().size(); - // First time compute size (it could be large like 64Gb and it is good to allocate it once) - for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) - { - const Token& dataToken = GetRequiredToken(*Content, tokenIdx); - size_t tokenLength = dataToken.end() - dataToken.begin() - 2; // ignore double quotes - const char* base64data = dataToken.begin() + 1; - const size_t outLength = Util::ComputeDecodedSizeBase64(base64data, tokenLength); - if (outLength == 0) - { - DOMError("Corrupted embedded content found", &element); - } - targetLength += outLength; - } - if (targetLength == 0) - { - DOMError("Corrupted embedded content found", &element); - } - content = new uint8_t[targetLength]; - contentLength = static_cast<uint64_t>(targetLength); - size_t dst_offset = 0; - for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) - { - const Token& dataToken = GetRequiredToken(*Content, tokenIdx); - size_t tokenLength = dataToken.end() - dataToken.begin() - 2; // ignore double quotes - const char* base64data = dataToken.begin() + 1; - dst_offset += Util::DecodeBase64(base64data, tokenLength, content + dst_offset, targetLength - dst_offset); - } - if (targetLength != dst_offset) - { - delete[] content; - contentLength = 0; - DOMError("Corrupted embedded content found", &element); - } - } - } - else if (static_cast<size_t>(token.end() - data) < 5) { - DOMError("binary data array is too short, need five (5) bytes for type signature and element count", &element); - } - else if (*data != 'R') { - DOMWarning("video content is not raw binary data, ignoring", &element); - } - else { - // read number of elements - uint32_t len = 0; - ::memcpy(&len, data + 1, sizeof(len)); - AI_SWAP4(len); - - contentLength = len; - - content = new uint8_t[len]; - ::memcpy(content, data + 5, len); - } - } catch (const runtime_error& runtimeError) - { - //we don't need the content data for contents that has already been loaded - ASSIMP_LOG_DEBUG_F("Caught exception in FBXMaterial (likely because content was already loaded): ", - runtimeError.what()); - } - } - - props = GetPropertyTable(doc,"Video.FbxVideo",element,sc); -} - - -Video::~Video() -{ - if(content) { - delete[] content; - } -} - -} //!FBX -} //!Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXMeshGeometry.cpp b/thirdparty/assimp/code/FBX/FBXMeshGeometry.cpp deleted file mode 100644 index 1386e2383c..0000000000 --- a/thirdparty/assimp/code/FBX/FBXMeshGeometry.cpp +++ /dev/null @@ -1,709 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXMeshGeometry.cpp - * @brief Assimp::FBX::MeshGeometry implementation - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include <functional> - -#include "FBXMeshGeometry.h" -#include "FBXDocument.h" -#include "FBXImporter.h" -#include "FBXImportSettings.h" -#include "FBXDocumentUtil.h" - - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -Geometry::Geometry(uint64_t id, const Element& element, const std::string& name, const Document& doc) - : Object(id, element, name) - , skin() -{ - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),"Deformer"); - for(const Connection* con : conns) { - const Skin* const sk = ProcessSimpleConnection<Skin>(*con, false, "Skin -> Geometry", element); - if(sk) { - skin = sk; - } - const BlendShape* const bsp = ProcessSimpleConnection<BlendShape>(*con, false, "BlendShape -> Geometry", element); - if (bsp) { - blendShapes.push_back(bsp); - } - } -} - -// ------------------------------------------------------------------------------------------------ -Geometry::~Geometry() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<const BlendShape*>& Geometry::GetBlendShapes() const { - return blendShapes; -} - -// ------------------------------------------------------------------------------------------------ -const Skin* Geometry::DeformerSkin() const { - return skin; -} - -// ------------------------------------------------------------------------------------------------ -MeshGeometry::MeshGeometry(uint64_t id, const Element& element, const std::string& name, const Document& doc) -: Geometry(id, element,name, doc) -{ - const Scope* sc = element.Compound(); - if (!sc) { - DOMError("failed to read Geometry object (class: Mesh), no data scope found"); - } - - // must have Mesh elements: - const Element& Vertices = GetRequiredElement(*sc,"Vertices",&element); - const Element& PolygonVertexIndex = GetRequiredElement(*sc,"PolygonVertexIndex",&element); - - // optional Mesh elements: - const ElementCollection& Layer = sc->GetCollection("Layer"); - - std::vector<aiVector3D> tempVerts; - ParseVectorDataArray(tempVerts,Vertices); - - if(tempVerts.empty()) { - FBXImporter::LogWarn("encountered mesh with no vertices"); - } - - std::vector<int> tempFaces; - ParseVectorDataArray(tempFaces,PolygonVertexIndex); - - if(tempFaces.empty()) { - FBXImporter::LogWarn("encountered mesh with no faces"); - } - - m_vertices.reserve(tempFaces.size()); - m_faces.reserve(tempFaces.size() / 3); - - m_mapping_offsets.resize(tempVerts.size()); - m_mapping_counts.resize(tempVerts.size(),0); - m_mappings.resize(tempFaces.size()); - - const size_t vertex_count = tempVerts.size(); - - // generate output vertices, computing an adjacency table to - // preserve the mapping from fbx indices to *this* indexing. - unsigned int count = 0; - for(int index : tempFaces) { - const int absi = index < 0 ? (-index - 1) : index; - if(static_cast<size_t>(absi) >= vertex_count) { - DOMError("polygon vertex index out of range",&PolygonVertexIndex); - } - - m_vertices.push_back(tempVerts[absi]); - ++count; - - ++m_mapping_counts[absi]; - - if (index < 0) { - m_faces.push_back(count); - count = 0; - } - } - - unsigned int cursor = 0; - for (size_t i = 0, e = tempVerts.size(); i < e; ++i) { - m_mapping_offsets[i] = cursor; - cursor += m_mapping_counts[i]; - - m_mapping_counts[i] = 0; - } - - cursor = 0; - for(int index : tempFaces) { - const int absi = index < 0 ? (-index - 1) : index; - m_mappings[m_mapping_offsets[absi] + m_mapping_counts[absi]++] = cursor++; - } - - // if settings.readAllLayers is true: - // * read all layers, try to load as many vertex channels as possible - // if settings.readAllLayers is false: - // * read only the layer with index 0, but warn about any further layers - for (ElementMap::const_iterator it = Layer.first; it != Layer.second; ++it) { - const TokenList& tokens = (*it).second->Tokens(); - - const char* err; - const int index = ParseTokenAsInt(*tokens[0], err); - if(err) { - DOMError(err,&element); - } - - if(doc.Settings().readAllLayers || index == 0) { - const Scope& layer = GetRequiredScope(*(*it).second); - ReadLayer(layer); - } - else { - FBXImporter::LogWarn("ignoring additional geometry layers"); - } - } -} - -// ------------------------------------------------------------------------------------------------ -MeshGeometry::~MeshGeometry() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& MeshGeometry::GetVertices() const { - return m_vertices; -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& MeshGeometry::GetNormals() const { - return m_normals; -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& MeshGeometry::GetTangents() const { - return m_tangents; -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& MeshGeometry::GetBinormals() const { - return m_binormals; -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<unsigned int>& MeshGeometry::GetFaceIndexCounts() const { - return m_faces; -} - -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector2D>& MeshGeometry::GetTextureCoords( unsigned int index ) const { - static const std::vector<aiVector2D> empty; - return index >= AI_MAX_NUMBER_OF_TEXTURECOORDS ? empty : m_uvs[ index ]; -} - -std::string MeshGeometry::GetTextureCoordChannelName( unsigned int index ) const { - return index >= AI_MAX_NUMBER_OF_TEXTURECOORDS ? "" : m_uvNames[ index ]; -} - -const std::vector<aiColor4D>& MeshGeometry::GetVertexColors( unsigned int index ) const { - static const std::vector<aiColor4D> empty; - return index >= AI_MAX_NUMBER_OF_COLOR_SETS ? empty : m_colors[ index ]; -} - -const MatIndexArray& MeshGeometry::GetMaterialIndices() const { - return m_materials; -} -// ------------------------------------------------------------------------------------------------ -const unsigned int* MeshGeometry::ToOutputVertexIndex( unsigned int in_index, unsigned int& count ) const { - if ( in_index >= m_mapping_counts.size() ) { - return NULL; - } - - ai_assert( m_mapping_counts.size() == m_mapping_offsets.size() ); - count = m_mapping_counts[ in_index ]; - - ai_assert( m_mapping_offsets[ in_index ] + count <= m_mappings.size() ); - - return &m_mappings[ m_mapping_offsets[ in_index ] ]; -} - -// ------------------------------------------------------------------------------------------------ -unsigned int MeshGeometry::FaceForVertexIndex( unsigned int in_index ) const { - ai_assert( in_index < m_vertices.size() ); - - // in the current conversion pattern this will only be needed if - // weights are present, so no need to always pre-compute this table - if ( m_facesVertexStartIndices.empty() ) { - m_facesVertexStartIndices.resize( m_faces.size() + 1, 0 ); - - std::partial_sum( m_faces.begin(), m_faces.end(), m_facesVertexStartIndices.begin() + 1 ); - m_facesVertexStartIndices.pop_back(); - } - - ai_assert( m_facesVertexStartIndices.size() == m_faces.size() ); - const std::vector<unsigned int>::iterator it = std::upper_bound( - m_facesVertexStartIndices.begin(), - m_facesVertexStartIndices.end(), - in_index - ); - - return static_cast< unsigned int >( std::distance( m_facesVertexStartIndices.begin(), it - 1 ) ); -} - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadLayer(const Scope& layer) -{ - const ElementCollection& LayerElement = layer.GetCollection("LayerElement"); - for (ElementMap::const_iterator eit = LayerElement.first; eit != LayerElement.second; ++eit) { - const Scope& elayer = GetRequiredScope(*(*eit).second); - - ReadLayerElement(elayer); - } -} - - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadLayerElement(const Scope& layerElement) -{ - const Element& Type = GetRequiredElement(layerElement,"Type"); - const Element& TypedIndex = GetRequiredElement(layerElement,"TypedIndex"); - - const std::string& type = ParseTokenAsString(GetRequiredToken(Type,0)); - const int typedIndex = ParseTokenAsInt(GetRequiredToken(TypedIndex,0)); - - const Scope& top = GetRequiredScope(element); - const ElementCollection candidates = top.GetCollection(type); - - for (ElementMap::const_iterator it = candidates.first; it != candidates.second; ++it) { - const int index = ParseTokenAsInt(GetRequiredToken(*(*it).second,0)); - if(index == typedIndex) { - ReadVertexData(type,typedIndex,GetRequiredScope(*(*it).second)); - return; - } - } - - FBXImporter::LogError(Formatter::format("failed to resolve vertex layer element: ") - << type << ", index: " << typedIndex); -} - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadVertexData(const std::string& type, int index, const Scope& source) -{ - const std::string& MappingInformationType = ParseTokenAsString(GetRequiredToken( - GetRequiredElement(source,"MappingInformationType"),0) - ); - - const std::string& ReferenceInformationType = ParseTokenAsString(GetRequiredToken( - GetRequiredElement(source,"ReferenceInformationType"),0) - ); - - if (type == "LayerElementUV") { - if(index >= AI_MAX_NUMBER_OF_TEXTURECOORDS) { - FBXImporter::LogError(Formatter::format("ignoring UV layer, maximum number of UV channels exceeded: ") - << index << " (limit is " << AI_MAX_NUMBER_OF_TEXTURECOORDS << ")" ); - return; - } - - const Element* Name = source["Name"]; - m_uvNames[index] = ""; - if(Name) { - m_uvNames[index] = ParseTokenAsString(GetRequiredToken(*Name,0)); - } - - ReadVertexDataUV(m_uvs[index],source, - MappingInformationType, - ReferenceInformationType - ); - } - else if (type == "LayerElementMaterial") { - if (m_materials.size() > 0) { - FBXImporter::LogError("ignoring additional material layer"); - return; - } - - std::vector<int> temp_materials; - - ReadVertexDataMaterials(temp_materials,source, - MappingInformationType, - ReferenceInformationType - ); - - // sometimes, there will be only negative entries. Drop the material - // layer in such a case (I guess it means a default material should - // be used). This is what the converter would do anyway, and it - // avoids losing the material if there are more material layers - // coming of which at least one contains actual data (did observe - // that with one test file). - const size_t count_neg = std::count_if(temp_materials.begin(),temp_materials.end(),[](int n) { return n < 0; }); - if(count_neg == temp_materials.size()) { - FBXImporter::LogWarn("ignoring dummy material layer (all entries -1)"); - return; - } - - std::swap(temp_materials, m_materials); - } - else if (type == "LayerElementNormal") { - if (m_normals.size() > 0) { - FBXImporter::LogError("ignoring additional normal layer"); - return; - } - - ReadVertexDataNormals(m_normals,source, - MappingInformationType, - ReferenceInformationType - ); - } - else if (type == "LayerElementTangent") { - if (m_tangents.size() > 0) { - FBXImporter::LogError("ignoring additional tangent layer"); - return; - } - - ReadVertexDataTangents(m_tangents,source, - MappingInformationType, - ReferenceInformationType - ); - } - else if (type == "LayerElementBinormal") { - if (m_binormals.size() > 0) { - FBXImporter::LogError("ignoring additional binormal layer"); - return; - } - - ReadVertexDataBinormals(m_binormals,source, - MappingInformationType, - ReferenceInformationType - ); - } - else if (type == "LayerElementColor") { - if(index >= AI_MAX_NUMBER_OF_COLOR_SETS) { - FBXImporter::LogError(Formatter::format("ignoring vertex color layer, maximum number of color sets exceeded: ") - << index << " (limit is " << AI_MAX_NUMBER_OF_COLOR_SETS << ")" ); - return; - } - - ReadVertexDataColors(m_colors[index],source, - MappingInformationType, - ReferenceInformationType - ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Lengthy utility function to read and resolve a FBX vertex data array - that is, the -// output is in polygon vertex order. This logic is used for reading normals, UVs, colors, -// tangents .. -template <typename T> -void ResolveVertexDataArray(std::vector<T>& data_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType, - const char* dataElementName, - const char* indexDataElementName, - size_t vertex_count, - const std::vector<unsigned int>& mapping_counts, - const std::vector<unsigned int>& mapping_offsets, - const std::vector<unsigned int>& mappings) -{ - bool isDirect = ReferenceInformationType == "Direct"; - bool isIndexToDirect = ReferenceInformationType == "IndexToDirect"; - - // fall-back to direct data if there is no index data element - if ( isIndexToDirect && !HasElement( source, indexDataElementName ) ) { - isDirect = true; - isIndexToDirect = false; - } - - // handle permutations of Mapping and Reference type - it would be nice to - // deal with this more elegantly and with less redundancy, but right - // now it seems unavoidable. - if (MappingInformationType == "ByVertice" && isDirect) { - if (!HasElement(source, dataElementName)) { - return; - } - std::vector<T> tempData; - ParseVectorDataArray(tempData, GetRequiredElement(source, dataElementName)); - - data_out.resize(vertex_count); - for (size_t i = 0, e = tempData.size(); i < e; ++i) { - - const unsigned int istart = mapping_offsets[i], iend = istart + mapping_counts[i]; - for (unsigned int j = istart; j < iend; ++j) { - data_out[mappings[j]] = tempData[i]; - } - } - } - else if (MappingInformationType == "ByVertice" && isIndexToDirect) { - std::vector<T> tempData; - ParseVectorDataArray(tempData, GetRequiredElement(source, dataElementName)); - - data_out.resize(vertex_count); - - std::vector<int> uvIndices; - ParseVectorDataArray(uvIndices,GetRequiredElement(source,indexDataElementName)); - for (size_t i = 0, e = uvIndices.size(); i < e; ++i) { - - const unsigned int istart = mapping_offsets[i], iend = istart + mapping_counts[i]; - for (unsigned int j = istart; j < iend; ++j) { - if (static_cast<size_t>(uvIndices[i]) >= tempData.size()) { - DOMError("index out of range",&GetRequiredElement(source,indexDataElementName)); - } - data_out[mappings[j]] = tempData[uvIndices[i]]; - } - } - } - else if (MappingInformationType == "ByPolygonVertex" && isDirect) { - std::vector<T> tempData; - ParseVectorDataArray(tempData, GetRequiredElement(source, dataElementName)); - - if (tempData.size() != vertex_count) { - FBXImporter::LogError(Formatter::format("length of input data unexpected for ByPolygon mapping: ") - << tempData.size() << ", expected " << vertex_count - ); - return; - } - - data_out.swap(tempData); - } - else if (MappingInformationType == "ByPolygonVertex" && isIndexToDirect) { - std::vector<T> tempData; - ParseVectorDataArray(tempData, GetRequiredElement(source, dataElementName)); - - data_out.resize(vertex_count); - - std::vector<int> uvIndices; - ParseVectorDataArray(uvIndices,GetRequiredElement(source,indexDataElementName)); - - if (uvIndices.size() != vertex_count) { - FBXImporter::LogError("length of input data unexpected for ByPolygonVertex mapping"); - return; - } - - const T empty; - unsigned int next = 0; - for(int i : uvIndices) { - if ( -1 == i ) { - data_out[ next++ ] = empty; - continue; - } - if (static_cast<size_t>(i) >= tempData.size()) { - DOMError("index out of range",&GetRequiredElement(source,indexDataElementName)); - } - - data_out[next++] = tempData[i]; - } - } - else { - FBXImporter::LogError(Formatter::format("ignoring vertex data channel, access type not implemented: ") - << MappingInformationType << "," << ReferenceInformationType); - } -} - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadVertexDataNormals(std::vector<aiVector3D>& normals_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - ResolveVertexDataArray(normals_out,source,MappingInformationType,ReferenceInformationType, - "Normals", - "NormalsIndex", - m_vertices.size(), - m_mapping_counts, - m_mapping_offsets, - m_mappings); -} - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadVertexDataUV(std::vector<aiVector2D>& uv_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - ResolveVertexDataArray(uv_out,source,MappingInformationType,ReferenceInformationType, - "UV", - "UVIndex", - m_vertices.size(), - m_mapping_counts, - m_mapping_offsets, - m_mappings); -} - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadVertexDataColors(std::vector<aiColor4D>& colors_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - ResolveVertexDataArray(colors_out,source,MappingInformationType,ReferenceInformationType, - "Colors", - "ColorIndex", - m_vertices.size(), - m_mapping_counts, - m_mapping_offsets, - m_mappings); -} - -// ------------------------------------------------------------------------------------------------ -static const char *TangentIndexToken = "TangentIndex"; -static const char *TangentsIndexToken = "TangentsIndex"; - -void MeshGeometry::ReadVertexDataTangents(std::vector<aiVector3D>& tangents_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - const char * str = source.Elements().count( "Tangents" ) > 0 ? "Tangents" : "Tangent"; - const char * strIdx = source.Elements().count( "Tangents" ) > 0 ? TangentsIndexToken : TangentIndexToken; - ResolveVertexDataArray(tangents_out,source,MappingInformationType,ReferenceInformationType, - str, - strIdx, - m_vertices.size(), - m_mapping_counts, - m_mapping_offsets, - m_mappings); -} - -// ------------------------------------------------------------------------------------------------ -static const std::string BinormalIndexToken = "BinormalIndex"; -static const std::string BinormalsIndexToken = "BinormalsIndex"; - -void MeshGeometry::ReadVertexDataBinormals(std::vector<aiVector3D>& binormals_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - const char * str = source.Elements().count( "Binormals" ) > 0 ? "Binormals" : "Binormal"; - const char * strIdx = source.Elements().count( "Binormals" ) > 0 ? BinormalsIndexToken.c_str() : BinormalIndexToken.c_str(); - ResolveVertexDataArray(binormals_out,source,MappingInformationType,ReferenceInformationType, - str, - strIdx, - m_vertices.size(), - m_mapping_counts, - m_mapping_offsets, - m_mappings); -} - - -// ------------------------------------------------------------------------------------------------ -void MeshGeometry::ReadVertexDataMaterials(std::vector<int>& materials_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType) -{ - const size_t face_count = m_faces.size(); - if( 0 == face_count ) - { - return; - } - - // materials are handled separately. First of all, they are assigned per-face - // and not per polyvert. Secondly, ReferenceInformationType=IndexToDirect - // has a slightly different meaning for materials. - ParseVectorDataArray(materials_out,GetRequiredElement(source,"Materials")); - - if (MappingInformationType == "AllSame") { - // easy - same material for all faces - if (materials_out.empty()) { - FBXImporter::LogError(Formatter::format("expected material index, ignoring")); - return; - } else if (materials_out.size() > 1) { - FBXImporter::LogWarn(Formatter::format("expected only a single material index, ignoring all except the first one")); - materials_out.clear(); - } - - materials_out.resize(m_vertices.size()); - std::fill(materials_out.begin(), materials_out.end(), materials_out.at(0)); - } else if (MappingInformationType == "ByPolygon" && ReferenceInformationType == "IndexToDirect") { - materials_out.resize(face_count); - - if(materials_out.size() != face_count) { - FBXImporter::LogError(Formatter::format("length of input data unexpected for ByPolygon mapping: ") - << materials_out.size() << ", expected " << face_count - ); - return; - } - } else { - FBXImporter::LogError(Formatter::format("ignoring material assignments, access type not implemented: ") - << MappingInformationType << "," << ReferenceInformationType); - } -} -// ------------------------------------------------------------------------------------------------ -ShapeGeometry::ShapeGeometry(uint64_t id, const Element& element, const std::string& name, const Document& doc) -: Geometry(id, element, name, doc) { - const Scope *sc = element.Compound(); - if (nullptr == sc) { - DOMError("failed to read Geometry object (class: Shape), no data scope found"); - } - const Element& Indexes = GetRequiredElement(*sc, "Indexes", &element); - const Element& Normals = GetRequiredElement(*sc, "Normals", &element); - const Element& Vertices = GetRequiredElement(*sc, "Vertices", &element); - ParseVectorDataArray(m_indices, Indexes); - ParseVectorDataArray(m_vertices, Vertices); - ParseVectorDataArray(m_normals, Normals); -} - -// ------------------------------------------------------------------------------------------------ -ShapeGeometry::~ShapeGeometry() { - // empty -} -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& ShapeGeometry::GetVertices() const { - return m_vertices; -} -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& ShapeGeometry::GetNormals() const { - return m_normals; -} -// ------------------------------------------------------------------------------------------------ -const std::vector<unsigned int>& ShapeGeometry::GetIndices() const { - return m_indices; -} -// ------------------------------------------------------------------------------------------------ -LineGeometry::LineGeometry(uint64_t id, const Element& element, const std::string& name, const Document& doc) - : Geometry(id, element, name, doc) -{ - const Scope* sc = element.Compound(); - if (!sc) { - DOMError("failed to read Geometry object (class: Line), no data scope found"); - } - const Element& Points = GetRequiredElement(*sc, "Points", &element); - const Element& PointsIndex = GetRequiredElement(*sc, "PointsIndex", &element); - ParseVectorDataArray(m_vertices, Points); - ParseVectorDataArray(m_indices, PointsIndex); -} - -// ------------------------------------------------------------------------------------------------ -LineGeometry::~LineGeometry() { - // empty -} -// ------------------------------------------------------------------------------------------------ -const std::vector<aiVector3D>& LineGeometry::GetVertices() const { - return m_vertices; -} -// ------------------------------------------------------------------------------------------------ -const std::vector<int>& LineGeometry::GetIndices() const { - return m_indices; -} -} // !FBX -} // !Assimp -#endif - diff --git a/thirdparty/assimp/code/FBX/FBXMeshGeometry.h b/thirdparty/assimp/code/FBX/FBXMeshGeometry.h deleted file mode 100644 index d6d4512177..0000000000 --- a/thirdparty/assimp/code/FBX/FBXMeshGeometry.h +++ /dev/null @@ -1,235 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXImporter.h -* @brief Declaration of the FBX main importer class -*/ -#ifndef INCLUDED_AI_FBX_MESHGEOMETRY_H -#define INCLUDED_AI_FBX_MESHGEOMETRY_H - -#include "FBXParser.h" -#include "FBXDocument.h" - -namespace Assimp { -namespace FBX { - -/** - * DOM base class for all kinds of FBX geometry - */ -class Geometry : public Object -{ -public: - Geometry( uint64_t id, const Element& element, const std::string& name, const Document& doc ); - virtual ~Geometry(); - - /** Get the Skin attached to this geometry or NULL */ - const Skin* DeformerSkin() const; - - /** Get the BlendShape attached to this geometry or NULL */ - const std::vector<const BlendShape*>& GetBlendShapes() const; - -private: - const Skin* skin; - std::vector<const BlendShape*> blendShapes; - -}; - -typedef std::vector<int> MatIndexArray; - - -/** - * DOM class for FBX geometry of type "Mesh" - */ -class MeshGeometry : public Geometry -{ -public: - /** The class constructor */ - MeshGeometry( uint64_t id, const Element& element, const std::string& name, const Document& doc ); - - /** The class destructor */ - virtual ~MeshGeometry(); - - /** Get a list of all vertex points, non-unique*/ - const std::vector<aiVector3D>& GetVertices() const; - - /** Get a list of all vertex normals or an empty array if - * no normals are specified. */ - const std::vector<aiVector3D>& GetNormals() const; - - /** Get a list of all vertex tangents or an empty array - * if no tangents are specified */ - const std::vector<aiVector3D>& GetTangents() const; - - /** Get a list of all vertex bi-normals or an empty array - * if no bi-normals are specified */ - const std::vector<aiVector3D>& GetBinormals() const; - - /** Return list of faces - each entry denotes a face and specifies - * how many vertices it has. Vertices are taken from the - * vertex data arrays in sequential order. */ - const std::vector<unsigned int>& GetFaceIndexCounts() const; - - /** Get a UV coordinate slot, returns an empty array if - * the requested slot does not exist. */ - const std::vector<aiVector2D>& GetTextureCoords( unsigned int index ) const; - - /** Get a UV coordinate slot, returns an empty array if - * the requested slot does not exist. */ - std::string GetTextureCoordChannelName( unsigned int index ) const; - - /** Get a vertex color coordinate slot, returns an empty array if - * the requested slot does not exist. */ - const std::vector<aiColor4D>& GetVertexColors( unsigned int index ) const; - - /** Get per-face-vertex material assignments */ - const MatIndexArray& GetMaterialIndices() const; - - /** Convert from a fbx file vertex index (for example from a #Cluster weight) or NULL - * if the vertex index is not valid. */ - const unsigned int* ToOutputVertexIndex( unsigned int in_index, unsigned int& count ) const; - - /** Determine the face to which a particular output vertex index belongs. - * This mapping is always unique. */ - unsigned int FaceForVertexIndex( unsigned int in_index ) const; -private: - void ReadLayer( const Scope& layer ); - void ReadLayerElement( const Scope& layerElement ); - void ReadVertexData( const std::string& type, int index, const Scope& source ); - - void ReadVertexDataUV( std::vector<aiVector2D>& uv_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - - void ReadVertexDataNormals( std::vector<aiVector3D>& normals_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - - void ReadVertexDataColors( std::vector<aiColor4D>& colors_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - - void ReadVertexDataTangents( std::vector<aiVector3D>& tangents_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - - void ReadVertexDataBinormals( std::vector<aiVector3D>& binormals_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - - void ReadVertexDataMaterials( MatIndexArray& materials_out, const Scope& source, - const std::string& MappingInformationType, - const std::string& ReferenceInformationType ); - -private: - // cached data arrays - MatIndexArray m_materials; - std::vector<aiVector3D> m_vertices; - std::vector<unsigned int> m_faces; - mutable std::vector<unsigned int> m_facesVertexStartIndices; - std::vector<aiVector3D> m_tangents; - std::vector<aiVector3D> m_binormals; - std::vector<aiVector3D> m_normals; - - std::string m_uvNames[ AI_MAX_NUMBER_OF_TEXTURECOORDS ]; - std::vector<aiVector2D> m_uvs[ AI_MAX_NUMBER_OF_TEXTURECOORDS ]; - std::vector<aiColor4D> m_colors[ AI_MAX_NUMBER_OF_COLOR_SETS ]; - - std::vector<unsigned int> m_mapping_counts; - std::vector<unsigned int> m_mapping_offsets; - std::vector<unsigned int> m_mappings; -}; - -/** -* DOM class for FBX geometry of type "Shape" -*/ -class ShapeGeometry : public Geometry -{ -public: - /** The class constructor */ - ShapeGeometry(uint64_t id, const Element& element, const std::string& name, const Document& doc); - - /** The class destructor */ - virtual ~ShapeGeometry(); - - /** Get a list of all vertex points, non-unique*/ - const std::vector<aiVector3D>& GetVertices() const; - - /** Get a list of all vertex normals or an empty array if - * no normals are specified. */ - const std::vector<aiVector3D>& GetNormals() const; - - /** Return list of vertex indices. */ - const std::vector<unsigned int>& GetIndices() const; - -private: - std::vector<aiVector3D> m_vertices; - std::vector<aiVector3D> m_normals; - std::vector<unsigned int> m_indices; -}; -/** -* DOM class for FBX geometry of type "Line" -*/ -class LineGeometry : public Geometry -{ -public: - /** The class constructor */ - LineGeometry(uint64_t id, const Element& element, const std::string& name, const Document& doc); - - /** The class destructor */ - virtual ~LineGeometry(); - - /** Get a list of all vertex points, non-unique*/ - const std::vector<aiVector3D>& GetVertices() const; - - /** Return list of vertex indices. */ - const std::vector<int>& GetIndices() const; - -private: - std::vector<aiVector3D> m_vertices; - std::vector<int> m_indices; -}; - -} -} - -#endif // INCLUDED_AI_FBX_MESHGEOMETRY_H - diff --git a/thirdparty/assimp/code/FBX/FBXModel.cpp b/thirdparty/assimp/code/FBX/FBXModel.cpp deleted file mode 100644 index 589af36ac7..0000000000 --- a/thirdparty/assimp/code/FBX/FBXModel.cpp +++ /dev/null @@ -1,153 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXModel.cpp - * @brief Assimp::FBX::Model implementation - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXMeshGeometry.h" -#include "FBXDocument.h" -#include "FBXImporter.h" -#include "FBXDocumentUtil.h" - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -Model::Model(uint64_t id, const Element& element, const Document& doc, const std::string& name) - : Object(id,element,name) - , shading("Y") -{ - const Scope& sc = GetRequiredScope(element); - const Element* const Shading = sc["Shading"]; - const Element* const Culling = sc["Culling"]; - - if(Shading) { - shading = GetRequiredToken(*Shading,0).StringContents(); - } - - if (Culling) { - culling = ParseTokenAsString(GetRequiredToken(*Culling,0)); - } - - props = GetPropertyTable(doc,"Model.FbxNode",element,sc); - ResolveLinks(element,doc); -} - -// ------------------------------------------------------------------------------------------------ -Model::~Model() -{ - -} - -// ------------------------------------------------------------------------------------------------ -void Model::ResolveLinks(const Element& element, const Document& doc) -{ - const char* const arr[] = {"Geometry","Material","NodeAttribute"}; - - // resolve material - const std::vector<const Connection*>& conns = doc.GetConnectionsByDestinationSequenced(ID(),arr, 3); - - materials.reserve(conns.size()); - geometry.reserve(conns.size()); - attributes.reserve(conns.size()); - for(const Connection* con : conns) { - - // material and geometry links should be Object-Object connections - if (con->PropertyName().length()) { - continue; - } - - const Object* const ob = con->SourceObject(); - if(!ob) { - DOMWarning("failed to read source object for incoming Model link, ignoring",&element); - continue; - } - - const Material* const mat = dynamic_cast<const Material*>(ob); - if(mat) { - materials.push_back(mat); - continue; - } - - const Geometry* const geo = dynamic_cast<const Geometry*>(ob); - if(geo) { - geometry.push_back(geo); - continue; - } - - const NodeAttribute* const att = dynamic_cast<const NodeAttribute*>(ob); - if(att) { - attributes.push_back(att); - continue; - } - - DOMWarning("source object for model link is neither Material, NodeAttribute nor Geometry, ignoring",&element); - continue; - } -} - -// ------------------------------------------------------------------------------------------------ -bool Model::IsNull() const -{ - const std::vector<const NodeAttribute*>& attrs = GetAttributes(); - for(const NodeAttribute* att : attrs) { - - const Null* null_tag = dynamic_cast<const Null*>(att); - if(null_tag) { - return true; - } - } - - return false; -} - - -} //!FBX -} //!Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXNodeAttribute.cpp b/thirdparty/assimp/code/FBX/FBXNodeAttribute.cpp deleted file mode 100644 index b72e5637ee..0000000000 --- a/thirdparty/assimp/code/FBX/FBXNodeAttribute.cpp +++ /dev/null @@ -1,170 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXNoteAttribute.cpp - * @brief Assimp::FBX::NodeAttribute (and subclasses) implementation - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXImporter.h" -#include "FBXDocumentUtil.h" - -namespace Assimp { -namespace FBX { - -using namespace Util; - -// ------------------------------------------------------------------------------------------------ -NodeAttribute::NodeAttribute(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -, props() -{ - const Scope& sc = GetRequiredScope(element); - - const std::string& classname = ParseTokenAsString(GetRequiredToken(element,2)); - - // hack on the deriving type but Null/LimbNode attributes are the only case in which - // the property table is by design absent and no warning should be generated - // for it. - const bool is_null_or_limb = !strcmp(classname.c_str(), "Null") || !strcmp(classname.c_str(), "LimbNode"); - props = GetPropertyTable(doc,"NodeAttribute.Fbx" + classname,element,sc, is_null_or_limb); -} - - -// ------------------------------------------------------------------------------------------------ -NodeAttribute::~NodeAttribute() -{ - // empty -} - - -// ------------------------------------------------------------------------------------------------ -CameraSwitcher::CameraSwitcher(uint64_t id, const Element& element, const Document& doc, const std::string& name) - : NodeAttribute(id,element,doc,name) -{ - const Scope& sc = GetRequiredScope(element); - const Element* const CameraId = sc["CameraId"]; - const Element* const CameraName = sc["CameraName"]; - const Element* const CameraIndexName = sc["CameraIndexName"]; - - if(CameraId) { - cameraId = ParseTokenAsInt(GetRequiredToken(*CameraId,0)); - } - - if(CameraName) { - cameraName = GetRequiredToken(*CameraName,0).StringContents(); - } - - if(CameraIndexName && CameraIndexName->Tokens().size()) { - cameraIndexName = GetRequiredToken(*CameraIndexName,0).StringContents(); - } -} - -// ------------------------------------------------------------------------------------------------ -CameraSwitcher::~CameraSwitcher() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -Camera::Camera(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: NodeAttribute(id,element,doc,name) -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -Camera::~Camera() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -Light::Light(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: NodeAttribute(id,element,doc,name) -{ - // empty -} - - -// ------------------------------------------------------------------------------------------------ -Light::~Light() -{ -} - - -// ------------------------------------------------------------------------------------------------ -Null::Null(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: NodeAttribute(id,element,doc,name) -{ - -} - - -// ------------------------------------------------------------------------------------------------ -Null::~Null() -{ - -} - - -// ------------------------------------------------------------------------------------------------ -LimbNode::LimbNode(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: NodeAttribute(id,element,doc,name) -{ - -} - - -// ------------------------------------------------------------------------------------------------ -LimbNode::~LimbNode() -{ - -} - -} -} - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXParser.cpp b/thirdparty/assimp/code/FBX/FBXParser.cpp deleted file mode 100644 index 4a9346040d..0000000000 --- a/thirdparty/assimp/code/FBX/FBXParser.cpp +++ /dev/null @@ -1,1309 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXParser.cpp - * @brief Implementation of the FBX parser and the rudimentary DOM that we use - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#ifdef ASSIMP_BUILD_NO_OWN_ZLIB -# include <zlib.h> -#else -# include "../contrib/zlib/zlib.h" -#endif - -#include "FBXTokenizer.h" -#include "FBXParser.h" -#include "FBXUtil.h" - -#include <assimp/ParsingUtils.h> -#include <assimp/fast_atof.h> -#include <assimp/ByteSwapper.h> - -#include <iostream> - -using namespace Assimp; -using namespace Assimp::FBX; - -namespace { - - // ------------------------------------------------------------------------------------------------ - // signal parse error, this is always unrecoverable. Throws DeadlyImportError. - AI_WONT_RETURN void ParseError(const std::string& message, const Token& token) AI_WONT_RETURN_SUFFIX; - AI_WONT_RETURN void ParseError(const std::string& message, const Token& token) - { - throw DeadlyImportError(Util::AddTokenText("FBX-Parser",message,&token)); - } - - // ------------------------------------------------------------------------------------------------ - AI_WONT_RETURN void ParseError(const std::string& message, const Element* element = NULL) AI_WONT_RETURN_SUFFIX; - AI_WONT_RETURN void ParseError(const std::string& message, const Element* element) - { - if(element) { - ParseError(message,element->KeyToken()); - } - throw DeadlyImportError("FBX-Parser " + message); - } - - - // ------------------------------------------------------------------------------------------------ - void ParseError(const std::string& message, TokenPtr token) - { - if(token) { - ParseError(message, *token); - } - ParseError(message); - } - - // Initially, we did reinterpret_cast, breaking strict aliasing rules. - // This actually caused trouble on Android, so let's be safe this time. - // https://github.com/assimp/assimp/issues/24 - template <typename T> - T SafeParse(const char* data, const char* end) { - // Actual size validation happens during Tokenization so - // this is valid as an assertion. - (void)(end); - ai_assert(static_cast<size_t>(end - data) >= sizeof(T)); - T result = static_cast<T>(0); - ::memcpy(&result, data, sizeof(T)); - return result; - } -} - -namespace Assimp { -namespace FBX { - -// ------------------------------------------------------------------------------------------------ -Element::Element(const Token& key_token, Parser& parser) -: key_token(key_token) -{ - TokenPtr n = nullptr; - do { - n = parser.AdvanceToNextToken(); - if(!n) { - ParseError("unexpected end of file, expected closing bracket",parser.LastToken()); - } - - if (n->Type() == TokenType_DATA) { - tokens.push_back(n); - TokenPtr prev = n; - n = parser.AdvanceToNextToken(); - if(!n) { - ParseError("unexpected end of file, expected bracket, comma or key",parser.LastToken()); - } - - const TokenType ty = n->Type(); - - // some exporters are missing a comma on the next line - if (ty == TokenType_DATA && prev->Type() == TokenType_DATA && (n->Line() == prev->Line() + 1)) { - tokens.push_back(n); - continue; - } - - if (ty != TokenType_OPEN_BRACKET && ty != TokenType_CLOSE_BRACKET && ty != TokenType_COMMA && ty != TokenType_KEY) { - ParseError("unexpected token; expected bracket, comma or key",n); - } - } - - if (n->Type() == TokenType_OPEN_BRACKET) { - compound.reset(new Scope(parser)); - - // current token should be a TOK_CLOSE_BRACKET - n = parser.CurrentToken(); - ai_assert(n); - - if (n->Type() != TokenType_CLOSE_BRACKET) { - ParseError("expected closing bracket",n); - } - - parser.AdvanceToNextToken(); - return; - } - } - while(n->Type() != TokenType_KEY && n->Type() != TokenType_CLOSE_BRACKET); -} - -// ------------------------------------------------------------------------------------------------ -Element::~Element() -{ - // no need to delete tokens, they are owned by the parser -} - -// ------------------------------------------------------------------------------------------------ -Scope::Scope(Parser& parser,bool topLevel) -{ - if(!topLevel) { - TokenPtr t = parser.CurrentToken(); - if (t->Type() != TokenType_OPEN_BRACKET) { - ParseError("expected open bracket",t); - } - } - - TokenPtr n = parser.AdvanceToNextToken(); - if(n == NULL) { - ParseError("unexpected end of file"); - } - - // note: empty scopes are allowed - while(n->Type() != TokenType_CLOSE_BRACKET) { - if (n->Type() != TokenType_KEY) { - ParseError("unexpected token, expected TOK_KEY",n); - } - - const std::string& str = n->StringContents(); - elements.insert(ElementMap::value_type(str,new_Element(*n,parser))); - - // Element() should stop at the next Key token (or right after a Close token) - n = parser.CurrentToken(); - if(n == NULL) { - if (topLevel) { - return; - } - ParseError("unexpected end of file",parser.LastToken()); - } - } -} - -// ------------------------------------------------------------------------------------------------ -Scope::~Scope() -{ - for(ElementMap::value_type& v : elements) { - delete v.second; - } -} - -// ------------------------------------------------------------------------------------------------ -Parser::Parser (const TokenList& tokens, bool is_binary) -: tokens(tokens) -, last() -, current() -, cursor(tokens.begin()) -, is_binary(is_binary) -{ - root.reset(new Scope(*this,true)); -} - -// ------------------------------------------------------------------------------------------------ -Parser::~Parser() -{ - // empty -} - -// ------------------------------------------------------------------------------------------------ -TokenPtr Parser::AdvanceToNextToken() -{ - last = current; - if (cursor == tokens.end()) { - current = NULL; - } else { - current = *cursor++; - } - return current; -} - -// ------------------------------------------------------------------------------------------------ -TokenPtr Parser::CurrentToken() const -{ - return current; -} - -// ------------------------------------------------------------------------------------------------ -TokenPtr Parser::LastToken() const -{ - return last; -} - -// ------------------------------------------------------------------------------------------------ -uint64_t ParseTokenAsID(const Token& t, const char*& err_out) -{ - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return 0L; - } - - if(t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'L') { - err_out = "failed to parse ID, unexpected data type, expected L(ong) (binary)"; - return 0L; - } - - BE_NCONST uint64_t id = SafeParse<uint64_t>(data+1, t.end()); - AI_SWAP8(id); - return id; - } - - // XXX: should use size_t here - unsigned int length = static_cast<unsigned int>(t.end() - t.begin()); - ai_assert(length > 0); - - const char* out = nullptr; - const uint64_t id = strtoul10_64(t.begin(),&out,&length); - if (out > t.end()) { - err_out = "failed to parse ID (text)"; - return 0L; - } - - return id; -} - -// ------------------------------------------------------------------------------------------------ -size_t ParseTokenAsDim(const Token& t, const char*& err_out) -{ - // same as ID parsing, except there is a trailing asterisk - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return 0; - } - - if(t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'L') { - err_out = "failed to parse ID, unexpected data type, expected L(ong) (binary)"; - return 0; - } - - BE_NCONST uint64_t id = SafeParse<uint64_t>(data+1, t.end()); - AI_SWAP8(id); - return static_cast<size_t>(id); - } - - if(*t.begin() != '*') { - err_out = "expected asterisk before array dimension"; - return 0; - } - - // XXX: should use size_t here - unsigned int length = static_cast<unsigned int>(t.end() - t.begin()); - if(length == 0) { - err_out = "expected valid integer number after asterisk"; - return 0; - } - - const char* out = nullptr; - const size_t id = static_cast<size_t>(strtoul10_64(t.begin() + 1,&out,&length)); - if (out > t.end()) { - err_out = "failed to parse ID"; - return 0; - } - - return id; -} - - -// ------------------------------------------------------------------------------------------------ -float ParseTokenAsFloat(const Token& t, const char*& err_out) -{ - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return 0.0f; - } - - if(t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'F' && data[0] != 'D') { - err_out = "failed to parse F(loat) or D(ouble), unexpected data type (binary)"; - return 0.0f; - } - - if (data[0] == 'F') { - return SafeParse<float>(data+1, t.end()); - } - else { - return static_cast<float>( SafeParse<double>(data+1, t.end()) ); - } - } - - // need to copy the input string to a temporary buffer - // first - next in the fbx token stream comes ',', - // which fast_atof could interpret as decimal point. -#define MAX_FLOAT_LENGTH 31 - char temp[MAX_FLOAT_LENGTH + 1]; - const size_t length = static_cast<size_t>(t.end()-t.begin()); - std::copy(t.begin(),t.end(),temp); - temp[std::min(static_cast<size_t>(MAX_FLOAT_LENGTH),length)] = '\0'; - - return fast_atof(temp); -} - - -// ------------------------------------------------------------------------------------------------ -int ParseTokenAsInt(const Token& t, const char*& err_out) -{ - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return 0; - } - - if(t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'I') { - err_out = "failed to parse I(nt), unexpected data type (binary)"; - return 0; - } - - BE_NCONST int32_t ival = SafeParse<int32_t>(data+1, t.end()); - AI_SWAP4(ival); - return static_cast<int>(ival); - } - - ai_assert(static_cast<size_t>(t.end() - t.begin()) > 0); - - const char* out; - const int intval = strtol10(t.begin(),&out); - if (out != t.end()) { - err_out = "failed to parse ID"; - return 0; - } - - return intval; -} - - -// ------------------------------------------------------------------------------------------------ -int64_t ParseTokenAsInt64(const Token& t, const char*& err_out) -{ - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return 0L; - } - - if (t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'L') { - err_out = "failed to parse Int64, unexpected data type"; - return 0L; - } - - BE_NCONST int64_t id = SafeParse<int64_t>(data + 1, t.end()); - AI_SWAP8(id); - return id; - } - - // XXX: should use size_t here - unsigned int length = static_cast<unsigned int>(t.end() - t.begin()); - ai_assert(length > 0); - - const char* out = nullptr; - const int64_t id = strtol10_64(t.begin(), &out, &length); - if (out > t.end()) { - err_out = "failed to parse Int64 (text)"; - return 0L; - } - - return id; -} - -// ------------------------------------------------------------------------------------------------ -std::string ParseTokenAsString(const Token& t, const char*& err_out) -{ - err_out = NULL; - - if (t.Type() != TokenType_DATA) { - err_out = "expected TOK_DATA token"; - return ""; - } - - if(t.IsBinary()) - { - const char* data = t.begin(); - if (data[0] != 'S') { - err_out = "failed to parse S(tring), unexpected data type (binary)"; - return ""; - } - - // read string length - BE_NCONST int32_t len = SafeParse<int32_t>(data+1, t.end()); - AI_SWAP4(len); - - ai_assert(t.end() - data == 5 + len); - return std::string(data + 5, len); - } - - const size_t length = static_cast<size_t>(t.end() - t.begin()); - if(length < 2) { - err_out = "token is too short to hold a string"; - return ""; - } - - const char* s = t.begin(), *e = t.end() - 1; - if (*s != '\"' || *e != '\"') { - err_out = "expected double quoted string"; - return ""; - } - - return std::string(s+1,length-2); -} - - -namespace { - -// ------------------------------------------------------------------------------------------------ -// read the type code and element count of a binary data array and stop there -void ReadBinaryDataArrayHead(const char*& data, const char* end, char& type, uint32_t& count, - const Element& el) -{ - if (static_cast<size_t>(end-data) < 5) { - ParseError("binary data array is too short, need five (5) bytes for type signature and element count",&el); - } - - // data type - type = *data; - - // read number of elements - BE_NCONST uint32_t len = SafeParse<uint32_t>(data+1, end); - AI_SWAP4(len); - - count = len; - data += 5; -} - - -// ------------------------------------------------------------------------------------------------ -// read binary data array, assume cursor points to the 'compression mode' field (i.e. behind the header) -void ReadBinaryDataArray(char type, uint32_t count, const char*& data, const char* end, - std::vector<char>& buff, - const Element& /*el*/) -{ - BE_NCONST uint32_t encmode = SafeParse<uint32_t>(data, end); - AI_SWAP4(encmode); - data += 4; - - // next comes the compressed length - BE_NCONST uint32_t comp_len = SafeParse<uint32_t>(data, end); - AI_SWAP4(comp_len); - data += 4; - - ai_assert(data + comp_len == end); - - // determine the length of the uncompressed data by looking at the type signature - uint32_t stride = 0; - switch(type) - { - case 'f': - case 'i': - stride = 4; - break; - - case 'd': - case 'l': - stride = 8; - break; - - default: - ai_assert(false); - }; - - const uint32_t full_length = stride * count; - buff.resize(full_length); - - if(encmode == 0) { - ai_assert(full_length == comp_len); - - // plain data, no compression - std::copy(data, end, buff.begin()); - } - else if(encmode == 1) { - // zlib/deflate, next comes ZIP head (0x78 0x01) - // see http://www.ietf.org/rfc/rfc1950.txt - - z_stream zstream; - zstream.opaque = Z_NULL; - zstream.zalloc = Z_NULL; - zstream.zfree = Z_NULL; - zstream.data_type = Z_BINARY; - - // http://hewgill.com/journal/entries/349-how-to-decompress-gzip-stream-with-zlib - if(Z_OK != inflateInit(&zstream)) { - ParseError("failure initializing zlib"); - } - - zstream.next_in = reinterpret_cast<Bytef*>( const_cast<char*>(data) ); - zstream.avail_in = comp_len; - - zstream.avail_out = static_cast<uInt>(buff.size()); - zstream.next_out = reinterpret_cast<Bytef*>(&*buff.begin()); - const int ret = inflate(&zstream, Z_FINISH); - - if (ret != Z_STREAM_END && ret != Z_OK) { - ParseError("failure decompressing compressed data section"); - } - - // terminate zlib - inflateEnd(&zstream); - } -#ifdef ASSIMP_BUILD_DEBUG - else { - // runtime check for this happens at tokenization stage - ai_assert(false); - } -#endif - - data += comp_len; - ai_assert(data == end); -} - -} // !anon - - -// ------------------------------------------------------------------------------------------------ -// read an array of float3 tuples -void ParseVectorDataArray(std::vector<aiVector3D>& out, const Element& el) -{ - out.resize( 0 ); - - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(count % 3 != 0) { - ParseError("number of floats is not a multiple of three (3) (binary)",&el); - } - - if(!count) { - return; - } - - if (type != 'd' && type != 'f') { - ParseError("expected float or double array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); - - const uint32_t count3 = count / 3; - out.reserve(count3); - - if (type == 'd') { - const double* d = reinterpret_cast<const double*>(&buff[0]); - for (unsigned int i = 0; i < count3; ++i, d += 3) { - out.push_back(aiVector3D(static_cast<ai_real>(d[0]), - static_cast<ai_real>(d[1]), - static_cast<ai_real>(d[2]))); - } - // for debugging - /*for ( size_t i = 0; i < out.size(); i++ ) { - aiVector3D vec3( out[ i ] ); - std::stringstream stream; - stream << " vec3.x = " << vec3.x << " vec3.y = " << vec3.y << " vec3.z = " << vec3.z << std::endl; - DefaultLogger::get()->info( stream.str() ); - }*/ - } - else if (type == 'f') { - const float* f = reinterpret_cast<const float*>(&buff[0]); - for (unsigned int i = 0; i < count3; ++i, f += 3) { - out.push_back(aiVector3D(f[0],f[1],f[2])); - } - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // may throw bad_alloc if the input is rubbish, but this need - // not to be prevented - importing would fail but we wouldn't - // crash since assimp handles this case properly. - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - if (a.Tokens().size() % 3 != 0) { - ParseError("number of floats is not a multiple of three (3)",&el); - } - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - aiVector3D v; - v.x = ParseTokenAsFloat(**it++); - v.y = ParseTokenAsFloat(**it++); - v.z = ParseTokenAsFloat(**it++); - - out.push_back(v); - } -} - - -// ------------------------------------------------------------------------------------------------ -// read an array of color4 tuples -void ParseVectorDataArray(std::vector<aiColor4D>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(count % 4 != 0) { - ParseError("number of floats is not a multiple of four (4) (binary)",&el); - } - - if(!count) { - return; - } - - if (type != 'd' && type != 'f') { - ParseError("expected float or double array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); - - const uint32_t count4 = count / 4; - out.reserve(count4); - - if (type == 'd') { - const double* d = reinterpret_cast<const double*>(&buff[0]); - for (unsigned int i = 0; i < count4; ++i, d += 4) { - out.push_back(aiColor4D(static_cast<float>(d[0]), - static_cast<float>(d[1]), - static_cast<float>(d[2]), - static_cast<float>(d[3]))); - } - } - else if (type == 'f') { - const float* f = reinterpret_cast<const float*>(&buff[0]); - for (unsigned int i = 0; i < count4; ++i, f += 4) { - out.push_back(aiColor4D(f[0],f[1],f[2],f[3])); - } - } - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() above - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - if (a.Tokens().size() % 4 != 0) { - ParseError("number of floats is not a multiple of four (4)",&el); - } - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - aiColor4D v; - v.r = ParseTokenAsFloat(**it++); - v.g = ParseTokenAsFloat(**it++); - v.b = ParseTokenAsFloat(**it++); - v.a = ParseTokenAsFloat(**it++); - - out.push_back(v); - } -} - - -// ------------------------------------------------------------------------------------------------ -// read an array of float2 tuples -void ParseVectorDataArray(std::vector<aiVector2D>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(count % 2 != 0) { - ParseError("number of floats is not a multiple of two (2) (binary)",&el); - } - - if(!count) { - return; - } - - if (type != 'd' && type != 'f') { - ParseError("expected float or double array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); - - const uint32_t count2 = count / 2; - out.reserve(count2); - - if (type == 'd') { - const double* d = reinterpret_cast<const double*>(&buff[0]); - for (unsigned int i = 0; i < count2; ++i, d += 2) { - out.push_back(aiVector2D(static_cast<float>(d[0]), - static_cast<float>(d[1]))); - } - } - else if (type == 'f') { - const float* f = reinterpret_cast<const float*>(&buff[0]); - for (unsigned int i = 0; i < count2; ++i, f += 2) { - out.push_back(aiVector2D(f[0],f[1])); - } - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() above - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - if (a.Tokens().size() % 2 != 0) { - ParseError("number of floats is not a multiple of two (2)",&el); - } - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - aiVector2D v; - v.x = ParseTokenAsFloat(**it++); - v.y = ParseTokenAsFloat(**it++); - - out.push_back(v); - } -} - - -// ------------------------------------------------------------------------------------------------ -// read an array of ints -void ParseVectorDataArray(std::vector<int>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(!count) { - return; - } - - if (type != 'i') { - ParseError("expected int array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * 4); - - out.reserve(count); - - const int32_t* ip = reinterpret_cast<const int32_t*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++ip) { - BE_NCONST int32_t val = *ip; - AI_SWAP4(val); - out.push_back(val); - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - const int ival = ParseTokenAsInt(**it++); - out.push_back(ival); - } -} - - -// ------------------------------------------------------------------------------------------------ -// read an array of floats -void ParseVectorDataArray(std::vector<float>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(!count) { - return; - } - - if (type != 'd' && type != 'f') { - ParseError("expected float or double array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * (type == 'd' ? 8 : 4)); - - if (type == 'd') { - const double* d = reinterpret_cast<const double*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++d) { - out.push_back(static_cast<float>(*d)); - } - } - else if (type == 'f') { - const float* f = reinterpret_cast<const float*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++f) { - out.push_back(*f); - } - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - const float ival = ParseTokenAsFloat(**it++); - out.push_back(ival); - } -} - -// ------------------------------------------------------------------------------------------------ -// read an array of uints -void ParseVectorDataArray(std::vector<unsigned int>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(!count) { - return; - } - - if (type != 'i') { - ParseError("expected (u)int array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * 4); - - out.reserve(count); - - const int32_t* ip = reinterpret_cast<const int32_t*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++ip) { - BE_NCONST int32_t val = *ip; - if(val < 0) { - ParseError("encountered negative integer index (binary)"); - } - - AI_SWAP4(val); - out.push_back(val); - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - const int ival = ParseTokenAsInt(**it++); - if(ival < 0) { - ParseError("encountered negative integer index"); - } - out.push_back(static_cast<unsigned int>(ival)); - } -} - - -// ------------------------------------------------------------------------------------------------ -// read an array of uint64_ts -void ParseVectorDataArray(std::vector<uint64_t>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if(tok.empty()) { - ParseError("unexpected empty element",&el); - } - - if(tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if(!count) { - return; - } - - if (type != 'l') { - ParseError("expected long array (binary)",&el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * 8); - - out.reserve(count); - - const uint64_t* ip = reinterpret_cast<const uint64_t*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++ip) { - BE_NCONST uint64_t val = *ip; - AI_SWAP8(val); - out.push_back(val); - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope,"a",&el); - - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end; ) { - const uint64_t ival = ParseTokenAsID(**it++); - - out.push_back(ival); - } -} - -// ------------------------------------------------------------------------------------------------ -// read an array of int64_ts -void ParseVectorDataArray(std::vector<int64_t>& out, const Element& el) -{ - out.resize( 0 ); - const TokenList& tok = el.Tokens(); - if (tok.empty()) { - ParseError("unexpected empty element", &el); - } - - if (tok[0]->IsBinary()) { - const char* data = tok[0]->begin(), *end = tok[0]->end(); - - char type; - uint32_t count; - ReadBinaryDataArrayHead(data, end, type, count, el); - - if (!count) { - return; - } - - if (type != 'l') { - ParseError("expected long array (binary)", &el); - } - - std::vector<char> buff; - ReadBinaryDataArray(type, count, data, end, buff, el); - - ai_assert(data == end); - ai_assert(buff.size() == count * 8); - - out.reserve(count); - - const int64_t* ip = reinterpret_cast<const int64_t*>(&buff[0]); - for (unsigned int i = 0; i < count; ++i, ++ip) { - BE_NCONST int64_t val = *ip; - AI_SWAP8(val); - out.push_back(val); - } - - return; - } - - const size_t dim = ParseTokenAsDim(*tok[0]); - - // see notes in ParseVectorDataArray() - out.reserve(dim); - - const Scope& scope = GetRequiredScope(el); - const Element& a = GetRequiredElement(scope, "a", &el); - - for (TokenList::const_iterator it = a.Tokens().begin(), end = a.Tokens().end(); it != end;) { - const int64_t ival = ParseTokenAsInt64(**it++); - - out.push_back(ival); - } -} - -// ------------------------------------------------------------------------------------------------ -aiMatrix4x4 ReadMatrix(const Element& element) -{ - std::vector<float> values; - ParseVectorDataArray(values,element); - - if(values.size() != 16) { - ParseError("expected 16 matrix elements"); - } - - aiMatrix4x4 result; - - - result.a1 = values[0]; - result.a2 = values[1]; - result.a3 = values[2]; - result.a4 = values[3]; - - result.b1 = values[4]; - result.b2 = values[5]; - result.b3 = values[6]; - result.b4 = values[7]; - - result.c1 = values[8]; - result.c2 = values[9]; - result.c3 = values[10]; - result.c4 = values[11]; - - result.d1 = values[12]; - result.d2 = values[13]; - result.d3 = values[14]; - result.d4 = values[15]; - - result.Transpose(); - return result; -} - - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsString() with ParseError handling -std::string ParseTokenAsString(const Token& t) -{ - const char* err; - const std::string& i = ParseTokenAsString(t,err); - if(err) { - ParseError(err,t); - } - return i; -} - -bool HasElement( const Scope& sc, const std::string& index ) { - const Element* el = sc[ index ]; - if ( nullptr == el ) { - return false; - } - - return true; -} - -// ------------------------------------------------------------------------------------------------ -// extract a required element from a scope, abort if the element cannot be found -const Element& GetRequiredElement(const Scope& sc, const std::string& index, const Element* element /*= NULL*/) -{ - const Element* el = sc[index]; - if(!el) { - ParseError("did not find required element \"" + index + "\"",element); - } - return *el; -} - - -// ------------------------------------------------------------------------------------------------ -// extract required compound scope -const Scope& GetRequiredScope(const Element& el) -{ - const Scope* const s = el.Compound(); - if(!s) { - ParseError("expected compound scope",&el); - } - - return *s; -} - - -// ------------------------------------------------------------------------------------------------ -// get token at a particular index -const Token& GetRequiredToken(const Element& el, unsigned int index) -{ - const TokenList& t = el.Tokens(); - if(index >= t.size()) { - ParseError(Formatter::format( "missing token at index " ) << index,&el); - } - - return *t[index]; -} - - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsID() with ParseError handling -uint64_t ParseTokenAsID(const Token& t) -{ - const char* err; - const uint64_t i = ParseTokenAsID(t,err); - if(err) { - ParseError(err,t); - } - return i; -} - - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsDim() with ParseError handling -size_t ParseTokenAsDim(const Token& t) -{ - const char* err; - const size_t i = ParseTokenAsDim(t,err); - if(err) { - ParseError(err,t); - } - return i; -} - - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsFloat() with ParseError handling -float ParseTokenAsFloat(const Token& t) -{ - const char* err; - const float i = ParseTokenAsFloat(t,err); - if(err) { - ParseError(err,t); - } - return i; -} - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsInt() with ParseError handling -int ParseTokenAsInt(const Token& t) -{ - const char* err; - const int i = ParseTokenAsInt(t,err); - if(err) { - ParseError(err,t); - } - return i; -} - -// ------------------------------------------------------------------------------------------------ -// wrapper around ParseTokenAsInt64() with ParseError handling -int64_t ParseTokenAsInt64(const Token& t) -{ - const char* err; - const int64_t i = ParseTokenAsInt64(t, err); - if (err) { - ParseError(err, t); - } - return i; -} - -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXParser.h b/thirdparty/assimp/code/FBX/FBXParser.h deleted file mode 100644 index 7b0cf72039..0000000000 --- a/thirdparty/assimp/code/FBX/FBXParser.h +++ /dev/null @@ -1,235 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXParser.h - * @brief FBX parsing code - */ -#ifndef INCLUDED_AI_FBX_PARSER_H -#define INCLUDED_AI_FBX_PARSER_H - -#include <stdint.h> -#include <map> -#include <memory> -#include <assimp/LogAux.h> -#include <assimp/fast_atof.h> - -#include "FBXCompileConfig.h" -#include "FBXTokenizer.h" - -namespace Assimp { -namespace FBX { - -class Scope; -class Parser; -class Element; - -// XXX should use C++11's unique_ptr - but assimp's need to keep working with 03 -typedef std::vector< Scope* > ScopeList; -typedef std::fbx_unordered_multimap< std::string, Element* > ElementMap; - -typedef std::pair<ElementMap::const_iterator,ElementMap::const_iterator> ElementCollection; - -# define new_Scope new Scope -# define new_Element new Element - - -/** FBX data entity that consists of a key:value tuple. - * - * Example: - * @verbatim - * AnimationCurve: 23, "AnimCurve::", "" { - * [..] - * } - * @endverbatim - * - * As can be seen in this sample, elements can contain nested #Scope - * as their trailing member. **/ -class Element -{ -public: - Element(const Token& key_token, Parser& parser); - ~Element(); - - const Scope* Compound() const { - return compound.get(); - } - - const Token& KeyToken() const { - return key_token; - } - - const TokenList& Tokens() const { - return tokens; - } - -private: - const Token& key_token; - TokenList tokens; - std::unique_ptr<Scope> compound; -}; - -/** FBX data entity that consists of a 'scope', a collection - * of not necessarily unique #Element instances. - * - * Example: - * @verbatim - * GlobalSettings: { - * Version: 1000 - * Properties70: - * [...] - * } - * @endverbatim */ -class Scope -{ -public: - Scope(Parser& parser, bool topLevel = false); - ~Scope(); - - const Element* operator[] (const std::string& index) const { - ElementMap::const_iterator it = elements.find(index); - return it == elements.end() ? NULL : (*it).second; - } - - const Element* FindElementCaseInsensitive(const std::string& elementName) const { - const char* elementNameCStr = elementName.c_str(); - for (auto element = elements.begin(); element != elements.end(); ++element) - { - if (!ASSIMP_strincmp(element->first.c_str(), elementNameCStr, MAXLEN)) { - return element->second; - } - } - return NULL; - } - - ElementCollection GetCollection(const std::string& index) const { - return elements.equal_range(index); - } - - const ElementMap& Elements() const { - return elements; - } - -private: - ElementMap elements; -}; - -/** FBX parsing class, takes a list of input tokens and generates a hierarchy - * of nested #Scope instances, representing the fbx DOM.*/ -class Parser -{ -public: - /** Parse given a token list. Does not take ownership of the tokens - - * the objects must persist during the entire parser lifetime */ - Parser (const TokenList& tokens,bool is_binary); - ~Parser(); - - const Scope& GetRootScope() const { - return *root.get(); - } - - bool IsBinary() const { - return is_binary; - } - -private: - friend class Scope; - friend class Element; - - TokenPtr AdvanceToNextToken(); - TokenPtr LastToken() const; - TokenPtr CurrentToken() const; - -private: - const TokenList& tokens; - - TokenPtr last, current; - TokenList::const_iterator cursor; - std::unique_ptr<Scope> root; - - const bool is_binary; -}; - - -/* token parsing - this happens when building the DOM out of the parse-tree*/ -uint64_t ParseTokenAsID(const Token& t, const char*& err_out); -size_t ParseTokenAsDim(const Token& t, const char*& err_out); - -float ParseTokenAsFloat(const Token& t, const char*& err_out); -int ParseTokenAsInt(const Token& t, const char*& err_out); -int64_t ParseTokenAsInt64(const Token& t, const char*& err_out); -std::string ParseTokenAsString(const Token& t, const char*& err_out); - -/* wrapper around ParseTokenAsXXX() with DOMError handling */ -uint64_t ParseTokenAsID(const Token& t); -size_t ParseTokenAsDim(const Token& t); -float ParseTokenAsFloat(const Token& t); -int ParseTokenAsInt(const Token& t); -int64_t ParseTokenAsInt64(const Token& t); -std::string ParseTokenAsString(const Token& t); - -/* read data arrays */ -void ParseVectorDataArray(std::vector<aiVector3D>& out, const Element& el); -void ParseVectorDataArray(std::vector<aiColor4D>& out, const Element& el); -void ParseVectorDataArray(std::vector<aiVector2D>& out, const Element& el); -void ParseVectorDataArray(std::vector<int>& out, const Element& el); -void ParseVectorDataArray(std::vector<float>& out, const Element& el); -void ParseVectorDataArray(std::vector<unsigned int>& out, const Element& el); -void ParseVectorDataArray(std::vector<uint64_t>& out, const Element& e); -void ParseVectorDataArray(std::vector<int64_t>& out, const Element& el); - -bool HasElement( const Scope& sc, const std::string& index ); - -// extract a required element from a scope, abort if the element cannot be found -const Element& GetRequiredElement(const Scope& sc, const std::string& index, const Element* element = NULL); - -// extract required compound scope -const Scope& GetRequiredScope(const Element& el); -// get token at a particular index -const Token& GetRequiredToken(const Element& el, unsigned int index); - -// read a 4x4 matrix from an array of 16 floats -aiMatrix4x4 ReadMatrix(const Element& element); - -} // ! FBX -} // ! Assimp - -#endif // ! INCLUDED_AI_FBX_PARSER_H diff --git a/thirdparty/assimp/code/FBX/FBXProperties.cpp b/thirdparty/assimp/code/FBX/FBXProperties.cpp deleted file mode 100644 index 8d7036b6a9..0000000000 --- a/thirdparty/assimp/code/FBX/FBXProperties.cpp +++ /dev/null @@ -1,235 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXProperties.cpp - * @brief Implementation of the FBX dynamic properties system - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -#include "FBXTokenizer.h" -#include "FBXParser.h" -#include "FBXDocument.h" -#include "FBXDocumentUtil.h" -#include "FBXProperties.h" - -namespace Assimp { -namespace FBX { - - using namespace Util; - -// ------------------------------------------------------------------------------------------------ -Property::Property() -{ -} - -// ------------------------------------------------------------------------------------------------ -Property::~Property() -{ -} - -namespace { - -// ------------------------------------------------------------------------------------------------ -// read a typed property out of a FBX element. The return value is NULL if the property cannot be read. -Property* ReadTypedProperty(const Element& element) -{ - ai_assert(element.KeyToken().StringContents() == "P"); - - const TokenList& tok = element.Tokens(); - ai_assert(tok.size() >= 5); - - const std::string& s = ParseTokenAsString(*tok[1]); - const char* const cs = s.c_str(); - if (!strcmp(cs,"KString")) { - return new TypedProperty<std::string>(ParseTokenAsString(*tok[4])); - } - else if (!strcmp(cs,"bool") || !strcmp(cs,"Bool")) { - return new TypedProperty<bool>(ParseTokenAsInt(*tok[4]) != 0); - } - else if (!strcmp(cs, "int") || !strcmp(cs, "Int") || !strcmp(cs, "enum") || !strcmp(cs, "Enum")) { - return new TypedProperty<int>(ParseTokenAsInt(*tok[4])); - } - else if (!strcmp(cs, "ULongLong")) { - return new TypedProperty<uint64_t>(ParseTokenAsID(*tok[4])); - } - else if (!strcmp(cs, "KTime")) { - return new TypedProperty<int64_t>(ParseTokenAsInt64(*tok[4])); - } - else if (!strcmp(cs,"Vector3D") || - !strcmp(cs,"ColorRGB") || - !strcmp(cs,"Vector") || - !strcmp(cs,"Color") || - !strcmp(cs,"Lcl Translation") || - !strcmp(cs,"Lcl Rotation") || - !strcmp(cs,"Lcl Scaling") - ) { - return new TypedProperty<aiVector3D>(aiVector3D( - ParseTokenAsFloat(*tok[4]), - ParseTokenAsFloat(*tok[5]), - ParseTokenAsFloat(*tok[6])) - ); - } - else if (!strcmp(cs,"double") || !strcmp(cs,"Number") || !strcmp(cs,"Float") || !strcmp(cs,"FieldOfView") || !strcmp( cs, "UnitScaleFactor" ) ) { - return new TypedProperty<float>(ParseTokenAsFloat(*tok[4])); - } - return NULL; -} - - -// ------------------------------------------------------------------------------------------------ -// peek into an element and check if it contains a FBX property, if so return its name. -std::string PeekPropertyName(const Element& element) -{ - ai_assert(element.KeyToken().StringContents() == "P"); - const TokenList& tok = element.Tokens(); - if(tok.size() < 4) { - return ""; - } - - return ParseTokenAsString(*tok[0]); -} - -} //! anon - - -// ------------------------------------------------------------------------------------------------ -PropertyTable::PropertyTable() -: templateProps() -, element() -{ -} - -// ------------------------------------------------------------------------------------------------ -PropertyTable::PropertyTable(const Element& element, std::shared_ptr<const PropertyTable> templateProps) -: templateProps(templateProps) -, element(&element) -{ - const Scope& scope = GetRequiredScope(element); - for(const ElementMap::value_type& v : scope.Elements()) { - if(v.first != "P") { - DOMWarning("expected only P elements in property table",v.second); - continue; - } - - const std::string& name = PeekPropertyName(*v.second); - if(!name.length()) { - DOMWarning("could not read property name",v.second); - continue; - } - - LazyPropertyMap::const_iterator it = lazyProps.find(name); - if (it != lazyProps.end()) { - DOMWarning("duplicate property name, will hide previous value: " + name,v.second); - continue; - } - - lazyProps[name] = v.second; - } -} - - -// ------------------------------------------------------------------------------------------------ -PropertyTable::~PropertyTable() -{ - for(PropertyMap::value_type& v : props) { - delete v.second; - } -} - - -// ------------------------------------------------------------------------------------------------ -const Property* PropertyTable::Get(const std::string& name) const -{ - PropertyMap::const_iterator it = props.find(name); - if (it == props.end()) { - // hasn't been parsed yet? - LazyPropertyMap::const_iterator lit = lazyProps.find(name); - if(lit != lazyProps.end()) { - props[name] = ReadTypedProperty(*(*lit).second); - it = props.find(name); - - ai_assert(it != props.end()); - } - - if (it == props.end()) { - // check property template - if(templateProps) { - return templateProps->Get(name); - } - - return NULL; - } - } - - return (*it).second; -} - -DirectPropertyMap PropertyTable::GetUnparsedProperties() const -{ - DirectPropertyMap result; - - // Loop through all the lazy properties (which is all the properties) - for(const LazyPropertyMap::value_type& element : lazyProps) { - - // Skip parsed properties - if (props.end() != props.find(element.first)) continue; - - // Read the element's value. - // Wrap the naked pointer (since the call site is required to acquire ownership) - // std::unique_ptr from C++11 would be preferred both as a wrapper and a return value. - std::shared_ptr<Property> prop = std::shared_ptr<Property>(ReadTypedProperty(*element.second)); - - // Element could not be read. Skip it. - if (!prop) continue; - - // Add to result - result[element.first] = prop; - } - - return result; -} - -} //! FBX -} //! Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXProperties.h b/thirdparty/assimp/code/FBX/FBXProperties.h deleted file mode 100644 index 58755542fc..0000000000 --- a/thirdparty/assimp/code/FBX/FBXProperties.h +++ /dev/null @@ -1,185 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXProperties.h - * @brief FBX dynamic properties - */ -#ifndef INCLUDED_AI_FBX_PROPERTIES_H -#define INCLUDED_AI_FBX_PROPERTIES_H - -#include "FBXCompileConfig.h" -#include <memory> -#include <string> - -namespace Assimp { -namespace FBX { - -// Forward declarations -class Element; - -/** Represents a dynamic property. Type info added by deriving classes, - * see #TypedProperty. - Example: - @verbatim - P: "ShininessExponent", "double", "Number", "",0.5 - @endvebatim -*/ -class Property { -protected: - Property(); - -public: - virtual ~Property(); - -public: - template <typename T> - const T* As() const { - return dynamic_cast<const T*>(this); - } -}; - -template<typename T> -class TypedProperty : public Property { -public: - explicit TypedProperty(const T& value) - : value(value) { - // empty - } - - const T& Value() const { - return value; - } - -private: - T value; -}; - - -typedef std::fbx_unordered_map<std::string,std::shared_ptr<Property> > DirectPropertyMap; -typedef std::fbx_unordered_map<std::string,const Property*> PropertyMap; -typedef std::fbx_unordered_map<std::string,const Element*> LazyPropertyMap; - -/** - * Represents a property table as can be found in the newer FBX files (Properties60, Properties70) - */ -class PropertyTable { -public: - // in-memory property table with no source element - PropertyTable(); - PropertyTable(const Element& element, std::shared_ptr<const PropertyTable> templateProps); - ~PropertyTable(); - - const Property* Get(const std::string& name) const; - - // PropertyTable's need not be coupled with FBX elements so this can be NULL - const Element* GetElement() const { - return element; - } - - const PropertyTable* TemplateProps() const { - return templateProps.get(); - } - - DirectPropertyMap GetUnparsedProperties() const; - -private: - LazyPropertyMap lazyProps; - mutable PropertyMap props; - const std::shared_ptr<const PropertyTable> templateProps; - const Element* const element; -}; - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -T PropertyGet(const PropertyTable& in, const std::string& name, const T& defaultValue) { - const Property* const prop = in.Get(name); - if( nullptr == prop) { - return defaultValue; - } - - // strong typing, no need to be lenient - const TypedProperty<T>* const tprop = prop->As< TypedProperty<T> >(); - if( nullptr == tprop) { - return defaultValue; - } - - return tprop->Value(); -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -T PropertyGet(const PropertyTable& in, const std::string& name, bool& result, bool useTemplate=false ) { - const Property* prop = in.Get(name); - if( nullptr == prop) { - if ( ! useTemplate ) { - result = false; - return T(); - } - const PropertyTable* templ = in.TemplateProps(); - if ( nullptr == templ ) { - result = false; - return T(); - } - prop = templ->Get(name); - if ( nullptr == prop ) { - result = false; - return T(); - } - } - - // strong typing, no need to be lenient - const TypedProperty<T>* const tprop = prop->As< TypedProperty<T> >(); - if( nullptr == tprop) { - result = false; - return T(); - } - - result = true; - return tprop->Value(); -} - -} //! FBX -} //! Assimp - -#endif // INCLUDED_AI_FBX_PROPERTIES_H diff --git a/thirdparty/assimp/code/FBX/FBXTokenizer.cpp b/thirdparty/assimp/code/FBX/FBXTokenizer.cpp deleted file mode 100644 index 252cce3557..0000000000 --- a/thirdparty/assimp/code/FBX/FBXTokenizer.cpp +++ /dev/null @@ -1,248 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXTokenizer.cpp - * @brief Implementation of the FBX broadphase lexer - */ - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -// tab width for logging columns -#define ASSIMP_FBX_TAB_WIDTH 4 - -#include <assimp/ParsingUtils.h> - -#include "FBXTokenizer.h" -#include "FBXUtil.h" -#include <assimp/Exceptional.h> - -namespace Assimp { -namespace FBX { - -// ------------------------------------------------------------------------------------------------ -Token::Token(const char* sbegin, const char* send, TokenType type, unsigned int line, unsigned int column) - : -#ifdef DEBUG - contents(sbegin, static_cast<size_t>(send-sbegin)), -#endif - sbegin(sbegin) - , send(send) - , type(type) - , line(line) - , column(column) -{ - ai_assert(sbegin); - ai_assert(send); - - // tokens must be of non-zero length - ai_assert(static_cast<size_t>(send-sbegin) > 0); -} - -// ------------------------------------------------------------------------------------------------ -Token::~Token() -{ -} - -namespace { - -// ------------------------------------------------------------------------------------------------ -// signal tokenization error, this is always unrecoverable. Throws DeadlyImportError. -AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column) AI_WONT_RETURN_SUFFIX; -AI_WONT_RETURN void TokenizeError(const std::string& message, unsigned int line, unsigned int column) -{ - throw DeadlyImportError(Util::AddLineAndColumn("FBX-Tokenize",message,line,column)); -} - - -// process a potential data token up to 'cur', adding it to 'output_tokens'. -// ------------------------------------------------------------------------------------------------ -void ProcessDataToken( TokenList& output_tokens, const char*& start, const char*& end, - unsigned int line, - unsigned int column, - TokenType type = TokenType_DATA, - bool must_have_token = false) -{ - if (start && end) { - // sanity check: - // tokens should have no whitespace outside quoted text and [start,end] should - // properly delimit the valid range. - bool in_double_quotes = false; - for (const char* c = start; c != end + 1; ++c) { - if (*c == '\"') { - in_double_quotes = !in_double_quotes; - } - - if (!in_double_quotes && IsSpaceOrNewLine(*c)) { - TokenizeError("unexpected whitespace in token", line, column); - } - } - - if (in_double_quotes) { - TokenizeError("non-terminated double quotes", line, column); - } - - output_tokens.push_back(new_Token(start,end + 1,type,line,column)); - } - else if (must_have_token) { - TokenizeError("unexpected character, expected data token", line, column); - } - - start = end = NULL; -} - -} - -// ------------------------------------------------------------------------------------------------ -void Tokenize(TokenList& output_tokens, const char* input) -{ - ai_assert(input); - - // line and column numbers numbers are one-based - unsigned int line = 1; - unsigned int column = 1; - - bool comment = false; - bool in_double_quotes = false; - bool pending_data_token = false; - - const char* token_begin = NULL, *token_end = NULL; - for (const char* cur = input;*cur;column += (*cur == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1), ++cur) { - const char c = *cur; - - if (IsLineEnd(c)) { - comment = false; - - column = 0; - ++line; - } - - if(comment) { - continue; - } - - if(in_double_quotes) { - if (c == '\"') { - in_double_quotes = false; - token_end = cur; - - ProcessDataToken(output_tokens,token_begin,token_end,line,column); - pending_data_token = false; - } - continue; - } - - switch(c) - { - case '\"': - if (token_begin) { - TokenizeError("unexpected double-quote", line, column); - } - token_begin = cur; - in_double_quotes = true; - continue; - - case ';': - ProcessDataToken(output_tokens,token_begin,token_end,line,column); - comment = true; - continue; - - case '{': - ProcessDataToken(output_tokens,token_begin,token_end, line, column); - output_tokens.push_back(new_Token(cur,cur+1,TokenType_OPEN_BRACKET,line,column)); - continue; - - case '}': - ProcessDataToken(output_tokens,token_begin,token_end,line,column); - output_tokens.push_back(new_Token(cur,cur+1,TokenType_CLOSE_BRACKET,line,column)); - continue; - - case ',': - if (pending_data_token) { - ProcessDataToken(output_tokens,token_begin,token_end,line,column,TokenType_DATA,true); - } - output_tokens.push_back(new_Token(cur,cur+1,TokenType_COMMA,line,column)); - continue; - - case ':': - if (pending_data_token) { - ProcessDataToken(output_tokens,token_begin,token_end,line,column,TokenType_KEY,true); - } - else { - TokenizeError("unexpected colon", line, column); - } - continue; - } - - if (IsSpaceOrNewLine(c)) { - - if (token_begin) { - // peek ahead and check if the next token is a colon in which - // case this counts as KEY token. - TokenType type = TokenType_DATA; - for (const char* peek = cur; *peek && IsSpaceOrNewLine(*peek); ++peek) { - if (*peek == ':') { - type = TokenType_KEY; - cur = peek; - break; - } - } - - ProcessDataToken(output_tokens,token_begin,token_end,line,column,type); - } - - pending_data_token = false; - } - else { - token_end = cur; - if (!token_begin) { - token_begin = cur; - } - - pending_data_token = true; - } - } -} - -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/FBX/FBXTokenizer.h b/thirdparty/assimp/code/FBX/FBXTokenizer.h deleted file mode 100644 index afa588a470..0000000000 --- a/thirdparty/assimp/code/FBX/FBXTokenizer.h +++ /dev/null @@ -1,187 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXTokenizer.h - * @brief FBX lexer - */ -#ifndef INCLUDED_AI_FBX_TOKENIZER_H -#define INCLUDED_AI_FBX_TOKENIZER_H - -#include "FBXCompileConfig.h" -#include <assimp/ai_assert.h> -#include <vector> -#include <string> - -namespace Assimp { -namespace FBX { - -/** Rough classification for text FBX tokens used for constructing the - * basic scope hierarchy. */ -enum TokenType -{ - // { - TokenType_OPEN_BRACKET = 0, - - // } - TokenType_CLOSE_BRACKET, - - // '"blablubb"', '2', '*14' - very general token class, - // further processing happens at a later stage. - TokenType_DATA, - - // - TokenType_BINARY_DATA, - - // , - TokenType_COMMA, - - // blubb: - TokenType_KEY -}; - - -/** Represents a single token in a FBX file. Tokens are - * classified by the #TokenType enumerated types. - * - * Offers iterator protocol. Tokens are immutable. */ -class Token -{ -private: - static const unsigned int BINARY_MARKER = static_cast<unsigned int>(-1); - -public: - /** construct a textual token */ - Token(const char* sbegin, const char* send, TokenType type, unsigned int line, unsigned int column); - - /** construct a binary token */ - Token(const char* sbegin, const char* send, TokenType type, size_t offset); - - ~Token(); - -public: - std::string StringContents() const { - return std::string(begin(),end()); - } - - bool IsBinary() const { - return column == BINARY_MARKER; - } - - const char* begin() const { - return sbegin; - } - - const char* end() const { - return send; - } - - TokenType Type() const { - return type; - } - - size_t Offset() const { - ai_assert(IsBinary()); - return offset; - } - - unsigned int Line() const { - ai_assert(!IsBinary()); - return static_cast<unsigned int>(line); - } - - unsigned int Column() const { - ai_assert(!IsBinary()); - return column; - } - -private: - -#ifdef DEBUG - // full string copy for the sole purpose that it nicely appears - // in msvc's debugger window. - const std::string contents; -#endif - - - const char* const sbegin; - const char* const send; - const TokenType type; - - union { - size_t line; - size_t offset; - }; - const unsigned int column; -}; - -// XXX should use C++11's unique_ptr - but assimp's need to keep working with 03 -typedef const Token* TokenPtr; -typedef std::vector< TokenPtr > TokenList; - -#define new_Token new Token - - -/** Main FBX tokenizer function. Transform input buffer into a list of preprocessed tokens. - * - * Skips over comments and generates line and column numbers. - * - * @param output_tokens Receives a list of all tokens in the input data. - * @param input_buffer Textual input buffer to be processed, 0-terminated. - * @throw DeadlyImportError if something goes wrong */ -void Tokenize(TokenList& output_tokens, const char* input); - - -/** Tokenizer function for binary FBX files. - * - * Emits a token list suitable for direct parsing. - * - * @param output_tokens Receives a list of all tokens in the input data. - * @param input_buffer Binary input buffer to be processed. - * @param length Length of input buffer, in bytes. There is no 0-terminal. - * @throw DeadlyImportError if something goes wrong */ -void TokenizeBinary(TokenList& output_tokens, const char* input, size_t length); - - -} // ! FBX -} // ! Assimp - -#endif // ! INCLUDED_AI_FBX_PARSER_H diff --git a/thirdparty/assimp/code/FBX/FBXUtil.cpp b/thirdparty/assimp/code/FBX/FBXUtil.cpp deleted file mode 100644 index c10e057c8c..0000000000 --- a/thirdparty/assimp/code/FBX/FBXUtil.cpp +++ /dev/null @@ -1,243 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FBXUtil.cpp - * @brief Implementation of internal FBX utility functions - */ - -#include "FBXUtil.h" -#include "FBXTokenizer.h" - -#include <assimp/TinyFormatter.h> -#include <string> -#include <cstring> - -#ifndef ASSIMP_BUILD_NO_FBX_IMPORTER - -namespace Assimp { -namespace FBX { -namespace Util { - -// ------------------------------------------------------------------------------------------------ -const char* TokenTypeString(TokenType t) -{ - switch(t) { - case TokenType_OPEN_BRACKET: - return "TOK_OPEN_BRACKET"; - - case TokenType_CLOSE_BRACKET: - return "TOK_CLOSE_BRACKET"; - - case TokenType_DATA: - return "TOK_DATA"; - - case TokenType_COMMA: - return "TOK_COMMA"; - - case TokenType_KEY: - return "TOK_KEY"; - - case TokenType_BINARY_DATA: - return "TOK_BINARY_DATA"; - } - - ai_assert(false); - return ""; -} - - -// ------------------------------------------------------------------------------------------------ -std::string AddOffset(const std::string& prefix, const std::string& text, size_t offset) -{ - return static_cast<std::string>( (Formatter::format() << prefix << " (offset 0x" << std::hex << offset << ") " << text) ); -} - -// ------------------------------------------------------------------------------------------------ -std::string AddLineAndColumn(const std::string& prefix, const std::string& text, unsigned int line, unsigned int column) -{ - return static_cast<std::string>( (Formatter::format() << prefix << " (line " << line << " << col " << column << ") " << text) ); -} - -// ------------------------------------------------------------------------------------------------ -std::string AddTokenText(const std::string& prefix, const std::string& text, const Token* tok) -{ - if(tok->IsBinary()) { - return static_cast<std::string>( (Formatter::format() << prefix << - " (" << TokenTypeString(tok->Type()) << - ", offset 0x" << std::hex << tok->Offset() << ") " << - text) ); - } - - return static_cast<std::string>( (Formatter::format() << prefix << - " (" << TokenTypeString(tok->Type()) << - ", line " << tok->Line() << - ", col " << tok->Column() << ") " << - text) ); -} - -// Generated by this formula: T["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[i]] = i; -static const uint8_t base64DecodeTable[128] = { - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 255, 255, 255, - 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, - 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255 -}; - -uint8_t DecodeBase64(char ch) -{ - const auto idx = static_cast<uint8_t>(ch); - if (idx > 127) - return 255; - return base64DecodeTable[idx]; -} - -size_t ComputeDecodedSizeBase64(const char* in, size_t inLength) -{ - if (inLength < 2) - { - return 0; - } - const size_t equals = size_t(in[inLength - 1] == '=') + size_t(in[inLength - 2] == '='); - const size_t full_length = (inLength * 3) >> 2; // div by 4 - if (full_length < equals) - { - return 0; - } - return full_length - equals; -} - -size_t DecodeBase64(const char* in, size_t inLength, uint8_t* out, size_t maxOutLength) -{ - if (maxOutLength == 0 || inLength < 2) { - return 0; - } - const size_t realLength = inLength - size_t(in[inLength - 1] == '=') - size_t(in[inLength - 2] == '='); - size_t dst_offset = 0; - int val = 0, valb = -8; - for (size_t src_offset = 0; src_offset < realLength; ++src_offset) - { - const uint8_t table_value = Util::DecodeBase64(in[src_offset]); - if (table_value == 255) - { - return 0; - } - val = (val << 6) + table_value; - valb += 6; - if (valb >= 0) - { - out[dst_offset++] = static_cast<uint8_t>((val >> valb) & 0xFF); - valb -= 8; - val &= 0xFFF; - } - } - return dst_offset; -} - -static const char to_base64_string[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; -char EncodeBase64(char byte) -{ - return to_base64_string[(size_t)byte]; -} - -/** Encodes a block of 4 bytes to base64 encoding -* -* @param bytes Bytes to encode. -* @param out_string String to write encoded values to. -* @param string_pos Position in out_string.*/ -void EncodeByteBlock(const char* bytes, std::string& out_string, size_t string_pos) -{ - char b0 = (bytes[0] & 0xFC) >> 2; - char b1 = (bytes[0] & 0x03) << 4 | ((bytes[1] & 0xF0) >> 4); - char b2 = (bytes[1] & 0x0F) << 2 | ((bytes[2] & 0xC0) >> 6); - char b3 = (bytes[2] & 0x3F); - - out_string[string_pos + 0] = EncodeBase64(b0); - out_string[string_pos + 1] = EncodeBase64(b1); - out_string[string_pos + 2] = EncodeBase64(b2); - out_string[string_pos + 3] = EncodeBase64(b3); -} - -std::string EncodeBase64(const char* data, size_t length) -{ - // calculate extra bytes needed to get a multiple of 3 - size_t extraBytes = 3 - length % 3; - - // number of base64 bytes - size_t encodedBytes = 4 * (length + extraBytes) / 3; - - std::string encoded_string(encodedBytes, '='); - - // read blocks of 3 bytes - for (size_t ib3 = 0; ib3 < length / 3; ib3++) - { - const size_t iByte = ib3 * 3; - const size_t iEncodedByte = ib3 * 4; - const char* currData = &data[iByte]; - - EncodeByteBlock(currData, encoded_string, iEncodedByte); - } - - // if size of data is not a multiple of 3, also encode the final bytes (and add zeros where needed) - if (extraBytes > 0) - { - char finalBytes[4] = { 0,0,0,0 }; - memcpy(&finalBytes[0], &data[length - length % 3], length % 3); - - const size_t iEncodedByte = encodedBytes - 4; - EncodeByteBlock(&finalBytes[0], encoded_string, iEncodedByte); - - // add '=' at the end - for (size_t i = 0; i < 4 * extraBytes / 3; i++) - encoded_string[encodedBytes - i - 1] = '='; - } - return encoded_string; -} - -} // !Util -} // !FBX -} // !Assimp - -#endif diff --git a/thirdparty/assimp/code/Material/MaterialSystem.cpp b/thirdparty/assimp/code/Material/MaterialSystem.cpp deleted file mode 100644 index 0be6e9f7bb..0000000000 --- a/thirdparty/assimp/code/Material/MaterialSystem.cpp +++ /dev/null @@ -1,632 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file MaterialSystem.cpp - * @brief Implementation of the material system of the library - */ - -#include <assimp/Hash.h> -#include <assimp/fast_atof.h> -#include <assimp/ParsingUtils.h> -#include "MaterialSystem.h" -#include <assimp/types.h> -#include <assimp/material.h> -#include <assimp/DefaultLogger.hpp> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Get a specific property from a material -aiReturn aiGetMaterialProperty(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - const aiMaterialProperty** pPropOut) -{ - ai_assert( pMat != NULL ); - ai_assert( pKey != NULL ); - ai_assert( pPropOut != NULL ); - - /* Just search for a property with exactly this name .. - * could be improved by hashing, but it's possibly - * no worth the effort (we're bound to C structures, - * thus std::map or derivates are not applicable. */ - for ( unsigned int i = 0; i < pMat->mNumProperties; ++i ) { - aiMaterialProperty* prop = pMat->mProperties[i]; - - if (prop /* just for safety ... */ - && 0 == strcmp( prop->mKey.data, pKey ) - && (UINT_MAX == type || prop->mSemantic == type) /* UINT_MAX is a wild-card, but this is undocumented :-) */ - && (UINT_MAX == index || prop->mIndex == index)) - { - *pPropOut = pMat->mProperties[i]; - return AI_SUCCESS; - } - } - *pPropOut = NULL; - return AI_FAILURE; -} - -// ------------------------------------------------------------------------------------------------ -// Get an array of floating-point values from the material. -aiReturn aiGetMaterialFloatArray(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - ai_real* pOut, - unsigned int* pMax) -{ - ai_assert( pOut != nullptr ); - ai_assert( pMat != nullptr ); - - const aiMaterialProperty* prop; - aiGetMaterialProperty(pMat,pKey,type,index, (const aiMaterialProperty**) &prop); - if ( nullptr == prop) { - return AI_FAILURE; - } - - // data is given in floats, convert to ai_real - unsigned int iWrite = 0; - if( aiPTI_Float == prop->mType || aiPTI_Buffer == prop->mType) { - iWrite = prop->mDataLength / sizeof(float); - if (pMax) { - iWrite = std::min(*pMax,iWrite); ; - } - - for (unsigned int a = 0; a < iWrite; ++a) { - pOut[ a ] = static_cast<ai_real> ( reinterpret_cast<float*>(prop->mData)[a] ); - } - - if (pMax) { - *pMax = iWrite; - } - } - // data is given in doubles, convert to float - else if( aiPTI_Double == prop->mType) { - iWrite = prop->mDataLength / sizeof(double); - if (pMax) { - iWrite = std::min(*pMax,iWrite); ; - } - for (unsigned int a = 0; a < iWrite;++a) { - pOut[a] = static_cast<ai_real> ( reinterpret_cast<double*>(prop->mData)[a] ); - } - if (pMax) { - *pMax = iWrite; - } - } - // data is given in ints, convert to float - else if( aiPTI_Integer == prop->mType) { - iWrite = prop->mDataLength / sizeof(int32_t); - if (pMax) { - iWrite = std::min(*pMax,iWrite); ; - } - for (unsigned int a = 0; a < iWrite;++a) { - pOut[a] = static_cast<ai_real> ( reinterpret_cast<int32_t*>(prop->mData)[a] ); - } - if (pMax) { - *pMax = iWrite; - } - } - // a string ... read floats separated by spaces - else { - if (pMax) { - iWrite = *pMax; - } - // strings are zero-terminated with a 32 bit length prefix, so this is safe - const char *cur = prop->mData + 4; - ai_assert( prop->mDataLength >= 5 ); - ai_assert( !prop->mData[ prop->mDataLength - 1 ] ); - for ( unsigned int a = 0; ;++a) { - cur = fast_atoreal_move<ai_real>(cur,pOut[a]); - if ( a==iWrite-1 ) { - break; - } - if ( !IsSpace(*cur) ) { - ASSIMP_LOG_ERROR("Material property" + std::string(pKey) + - " is a string; failed to parse a float array out of it."); - return AI_FAILURE; - } - } - - if (pMax) { - *pMax = iWrite; - } - } - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -// Get an array if integers from the material -aiReturn aiGetMaterialIntegerArray(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - int* pOut, - unsigned int* pMax) -{ - ai_assert( pOut != NULL ); - ai_assert( pMat != NULL ); - - const aiMaterialProperty* prop; - aiGetMaterialProperty(pMat,pKey,type,index,(const aiMaterialProperty**) &prop); - if (!prop) { - return AI_FAILURE; - } - - // data is given in ints, simply copy it - unsigned int iWrite = 0; - if( aiPTI_Integer == prop->mType || aiPTI_Buffer == prop->mType) { - iWrite = std::max(static_cast<unsigned int>(prop->mDataLength / sizeof(int32_t)), 1u); - if (pMax) { - iWrite = std::min(*pMax,iWrite); - } - if (1 == prop->mDataLength) { - // bool type, 1 byte - *pOut = static_cast<int>(*prop->mData); - } - else { - for (unsigned int a = 0; a < iWrite;++a) { - pOut[a] = static_cast<int>(reinterpret_cast<int32_t*>(prop->mData)[a]); - } - } - if (pMax) { - *pMax = iWrite; - } - } - // data is given in floats convert to int - else if( aiPTI_Float == prop->mType) { - iWrite = prop->mDataLength / sizeof(float); - if (pMax) { - iWrite = std::min(*pMax,iWrite); ; - } - for (unsigned int a = 0; a < iWrite;++a) { - pOut[a] = static_cast<int>(reinterpret_cast<float*>(prop->mData)[a]); - } - if (pMax) { - *pMax = iWrite; - } - } - // it is a string ... no way to read something out of this - else { - if (pMax) { - iWrite = *pMax; - } - // strings are zero-terminated with a 32 bit length prefix, so this is safe - const char *cur = prop->mData+4; - ai_assert( prop->mDataLength >= 5 ); - ai_assert( !prop->mData[ prop->mDataLength - 1 ] ); - for (unsigned int a = 0; ;++a) { - pOut[a] = strtol10(cur,&cur); - if(a==iWrite-1) { - break; - } - if(!IsSpace(*cur)) { - ASSIMP_LOG_ERROR("Material property" + std::string(pKey) + - " is a string; failed to parse an integer array out of it."); - return AI_FAILURE; - } - } - - if (pMax) { - *pMax = iWrite; - } - } - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -// Get a color (3 or 4 floats) from the material -aiReturn aiGetMaterialColor(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - aiColor4D* pOut) -{ - unsigned int iMax = 4; - const aiReturn eRet = aiGetMaterialFloatArray(pMat,pKey,type,index,(ai_real*)pOut,&iMax); - - // if no alpha channel is defined: set it to 1.0 - if (3 == iMax) { - pOut->a = 1.0; - } - - return eRet; -} - -// ------------------------------------------------------------------------------------------------ -// Get a aiUVTransform (4 floats) from the material -aiReturn aiGetMaterialUVTransform(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - aiUVTransform* pOut) -{ - unsigned int iMax = 4; - return aiGetMaterialFloatArray(pMat,pKey,type,index,(ai_real*)pOut,&iMax); -} - -// ------------------------------------------------------------------------------------------------ -// Get a string from the material -aiReturn aiGetMaterialString(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - aiString* pOut) -{ - ai_assert (pOut != NULL); - - const aiMaterialProperty* prop; - aiGetMaterialProperty(pMat,pKey,type,index,(const aiMaterialProperty**)&prop); - if (!prop) { - return AI_FAILURE; - } - - if( aiPTI_String == prop->mType) { - ai_assert(prop->mDataLength>=5); - - // The string is stored as 32 but length prefix followed by zero-terminated UTF8 data - pOut->length = static_cast<unsigned int>(*reinterpret_cast<uint32_t*>(prop->mData)); - - ai_assert( pOut->length+1+4==prop->mDataLength ); - ai_assert( !prop->mData[ prop->mDataLength - 1 ] ); - memcpy(pOut->data,prop->mData+4,pOut->length+1); - } - else { - // TODO - implement lexical cast as well - ASSIMP_LOG_ERROR("Material property" + std::string(pKey) + - " was found, but is no string" ); - return AI_FAILURE; - } - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -// Get the number of textures on a particular texture stack -unsigned int aiGetMaterialTextureCount(const C_STRUCT aiMaterial* pMat, - C_ENUM aiTextureType type) -{ - ai_assert (pMat != NULL); - - // Textures are always stored with ascending indices (ValidateDS provides a check, so we don't need to do it again) - unsigned int max = 0; - for (unsigned int i = 0; i < pMat->mNumProperties;++i) { - aiMaterialProperty* prop = pMat->mProperties[i]; - - if ( prop /* just a sanity check ... */ - && 0 == strcmp( prop->mKey.data, _AI_MATKEY_TEXTURE_BASE ) - && prop->mSemantic == type) { - - max = std::max(max,prop->mIndex+1); - } - } - return max; -} - -// ------------------------------------------------------------------------------------------------ -aiReturn aiGetMaterialTexture(const C_STRUCT aiMaterial* mat, - aiTextureType type, - unsigned int index, - C_STRUCT aiString* path, - aiTextureMapping* _mapping /*= NULL*/, - unsigned int* uvindex /*= NULL*/, - ai_real* blend /*= NULL*/, - aiTextureOp* op /*= NULL*/, - aiTextureMapMode* mapmode /*= NULL*/, - unsigned int* flags /*= NULL*/ - ) -{ - ai_assert( NULL != mat ); - ai_assert( NULL != path ); - - // Get the path to the texture - if (AI_SUCCESS != aiGetMaterialString(mat,AI_MATKEY_TEXTURE(type,index),path)) { - return AI_FAILURE; - } - - // Determine mapping type - int mapping_ = static_cast<int>(aiTextureMapping_UV); - aiGetMaterialInteger(mat,AI_MATKEY_MAPPING(type,index), &mapping_); - aiTextureMapping mapping = static_cast<aiTextureMapping>(mapping_); - if (_mapping) - *_mapping = mapping; - - // Get UV index - if (aiTextureMapping_UV == mapping && uvindex) { - aiGetMaterialInteger(mat,AI_MATKEY_UVWSRC(type,index),(int*)uvindex); - } - // Get blend factor - if (blend) { - aiGetMaterialFloat(mat,AI_MATKEY_TEXBLEND(type,index),blend); - } - // Get texture operation - if (op){ - aiGetMaterialInteger(mat,AI_MATKEY_TEXOP(type,index),(int*)op); - } - // Get texture mapping modes - if (mapmode) { - aiGetMaterialInteger(mat,AI_MATKEY_MAPPINGMODE_U(type,index),(int*)&mapmode[0]); - aiGetMaterialInteger(mat,AI_MATKEY_MAPPINGMODE_V(type,index),(int*)&mapmode[1]); - } - // Get texture flags - if (flags){ - aiGetMaterialInteger(mat,AI_MATKEY_TEXFLAGS(type,index),(int*)flags); - } - - return AI_SUCCESS; -} - - -static const unsigned int DefaultNumAllocated = 5; - -// ------------------------------------------------------------------------------------------------ -// Construction. Actually the one and only way to get an aiMaterial instance -aiMaterial::aiMaterial() -: mProperties( nullptr ) -, mNumProperties( 0 ) -, mNumAllocated( DefaultNumAllocated ) { - // Allocate 5 entries by default - mProperties = new aiMaterialProperty*[ DefaultNumAllocated ]; -} - -// ------------------------------------------------------------------------------------------------ -aiMaterial::~aiMaterial() -{ - Clear(); - - delete[] mProperties; -} - -// ------------------------------------------------------------------------------------------------ -aiString aiMaterial::GetName() { - aiString name; - Get(AI_MATKEY_NAME, name); - - return name; -} - -// ------------------------------------------------------------------------------------------------ -void aiMaterial::Clear() -{ - for ( unsigned int i = 0; i < mNumProperties; ++i ) { - // delete this entry - delete mProperties[ i ]; - AI_DEBUG_INVALIDATE_PTR(mProperties[i]); - } - mNumProperties = 0; - - // The array remains allocated, we just invalidated its contents -} - -// ------------------------------------------------------------------------------------------------ -aiReturn aiMaterial::RemoveProperty ( const char* pKey,unsigned int type, unsigned int index ) -{ - ai_assert( nullptr != pKey ); - - for (unsigned int i = 0; i < mNumProperties;++i) { - aiMaterialProperty* prop = mProperties[i]; - - if (prop && !strcmp( prop->mKey.data, pKey ) && - prop->mSemantic == type && prop->mIndex == index) - { - // Delete this entry - delete mProperties[i]; - - // collapse the array behind --. - --mNumProperties; - for (unsigned int a = i; a < mNumProperties;++a) { - mProperties[a] = mProperties[a+1]; - } - return AI_SUCCESS; - } - } - - return AI_FAILURE; -} - -// ------------------------------------------------------------------------------------------------ -aiReturn aiMaterial::AddBinaryProperty (const void* pInput, - unsigned int pSizeInBytes, - const char* pKey, - unsigned int type, - unsigned int index, - aiPropertyTypeInfo pType - ) -{ - ai_assert( pInput != NULL ); - ai_assert( pKey != NULL ); - ai_assert( 0 != pSizeInBytes ); - - if ( 0 == pSizeInBytes ) { - - } - - // first search the list whether there is already an entry with this key - unsigned int iOutIndex( UINT_MAX ); - for ( unsigned int i = 0; i < mNumProperties; ++i ) { - aiMaterialProperty *prop( mProperties[ i ] ); - - if (prop /* just for safety */ && !strcmp( prop->mKey.data, pKey ) && - prop->mSemantic == type && prop->mIndex == index){ - - delete mProperties[i]; - iOutIndex = i; - } - } - - // Allocate a new material property - aiMaterialProperty* pcNew = new aiMaterialProperty(); - - // .. and fill it - pcNew->mType = pType; - pcNew->mSemantic = type; - pcNew->mIndex = index; - - pcNew->mDataLength = pSizeInBytes; - pcNew->mData = new char[pSizeInBytes]; - memcpy (pcNew->mData,pInput,pSizeInBytes); - - pcNew->mKey.length = ::strlen(pKey); - ai_assert ( MAXLEN > pcNew->mKey.length); - strcpy( pcNew->mKey.data, pKey ); - - if (UINT_MAX != iOutIndex) { - mProperties[iOutIndex] = pcNew; - return AI_SUCCESS; - } - - // resize the array ... double the storage allocated - if (mNumProperties == mNumAllocated) { - const unsigned int iOld = mNumAllocated; - mNumAllocated *= 2; - - aiMaterialProperty** ppTemp; - try { - ppTemp = new aiMaterialProperty*[mNumAllocated]; - } catch (std::bad_alloc&) { - delete pcNew; - return AI_OUTOFMEMORY; - } - - // just copy all items over; then replace the old array - memcpy (ppTemp,mProperties,iOld * sizeof(void*)); - - delete[] mProperties; - mProperties = ppTemp; - } - // push back ... - mProperties[mNumProperties++] = pcNew; - - return AI_SUCCESS; -} - -// ------------------------------------------------------------------------------------------------ -aiReturn aiMaterial::AddProperty (const aiString* pInput, - const char* pKey, - unsigned int type, - unsigned int index) -{ - ai_assert(sizeof(ai_uint32)==4); - return AddBinaryProperty(pInput, - static_cast<unsigned int>(pInput->length+1+4), - pKey, - type, - index, - aiPTI_String); -} - -// ------------------------------------------------------------------------------------------------ -uint32_t Assimp::ComputeMaterialHash(const aiMaterial* mat, bool includeMatName /*= false*/) -{ - uint32_t hash = 1503; // magic start value, chosen to be my birthday :-) - for ( unsigned int i = 0; i < mat->mNumProperties; ++i ) { - aiMaterialProperty* prop; - - // Exclude all properties whose first character is '?' from the hash - // See doc for aiMaterialProperty. - if ((prop = mat->mProperties[i]) && (includeMatName || prop->mKey.data[0] != '?')) { - - hash = SuperFastHash(prop->mKey.data,(unsigned int)prop->mKey.length,hash); - hash = SuperFastHash(prop->mData,prop->mDataLength,hash); - - // Combine the semantic and the index with the hash - hash = SuperFastHash((const char*)&prop->mSemantic,sizeof(unsigned int),hash); - hash = SuperFastHash((const char*)&prop->mIndex,sizeof(unsigned int),hash); - } - } - return hash; -} - -// ------------------------------------------------------------------------------------------------ -void aiMaterial::CopyPropertyList(aiMaterial* pcDest, - const aiMaterial* pcSrc - ) -{ - ai_assert(NULL != pcDest); - ai_assert(NULL != pcSrc); - - unsigned int iOldNum = pcDest->mNumProperties; - pcDest->mNumAllocated += pcSrc->mNumAllocated; - pcDest->mNumProperties += pcSrc->mNumProperties; - - aiMaterialProperty** pcOld = pcDest->mProperties; - pcDest->mProperties = new aiMaterialProperty*[pcDest->mNumAllocated]; - - if (iOldNum && pcOld) { - for (unsigned int i = 0; i < iOldNum;++i) { - pcDest->mProperties[i] = pcOld[i]; - } - } - - if ( pcOld ) { - delete[] pcOld; - } - - for (unsigned int i = iOldNum; i< pcDest->mNumProperties;++i) { - aiMaterialProperty* propSrc = pcSrc->mProperties[i]; - - // search whether we have already a property with this name -> if yes, overwrite it - aiMaterialProperty* prop; - for ( unsigned int q = 0; q < iOldNum; ++q ) { - prop = pcDest->mProperties[q]; - if (prop /* just for safety */ && prop->mKey == propSrc->mKey && prop->mSemantic == propSrc->mSemantic - && prop->mIndex == propSrc->mIndex) { - delete prop; - - // collapse the whole array ... - memmove(&pcDest->mProperties[q],&pcDest->mProperties[q+1],i-q); - i--; - pcDest->mNumProperties--; - } - } - - // Allocate the output property and copy the source property - prop = pcDest->mProperties[i] = new aiMaterialProperty(); - prop->mKey = propSrc->mKey; - prop->mDataLength = propSrc->mDataLength; - prop->mType = propSrc->mType; - prop->mSemantic = propSrc->mSemantic; - prop->mIndex = propSrc->mIndex; - - prop->mData = new char[propSrc->mDataLength]; - memcpy(prop->mData,propSrc->mData,prop->mDataLength); - } -} diff --git a/thirdparty/assimp/code/Material/MaterialSystem.h b/thirdparty/assimp/code/Material/MaterialSystem.h deleted file mode 100644 index 67d53578cb..0000000000 --- a/thirdparty/assimp/code/Material/MaterialSystem.h +++ /dev/null @@ -1,72 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file MaterialSystem.h - * Now that #MaterialHelper is gone, this file only contains some - * internal material utility functions. - */ -#ifndef AI_MATERIALSYSTEM_H_INC -#define AI_MATERIALSYSTEM_H_INC - -#include <stdint.h> - -struct aiMaterial; - -namespace Assimp { - -// ------------------------------------------------------------------------------ -/** Computes a hash (hopefully unique) from all material properties - * The hash value reflects the current property state, so if you add any - * property and call this method again, the resulting hash value will be - * different. The hash is not persistent across different builds and platforms. - * - * @param includeMatName Set to 'true' to take all properties with - * '?' as initial character in their name into account. - * Currently #AI_MATKEY_NAME is the only example. - * @return 32 Bit jash value for the material - */ -uint32_t ComputeMaterialHash(const aiMaterial* mat, bool includeMatName = false); - - -} // ! namespace Assimp - -#endif //!! AI_MATERIALSYSTEM_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.cpp b/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.cpp deleted file mode 100644 index 75daeb6b59..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.cpp +++ /dev/null @@ -1,268 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#include "ArmaturePopulate.h" - -#include <assimp/BaseImporter.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <iostream> - -namespace Assimp { - -/// The default class constructor. -ArmaturePopulate::ArmaturePopulate() : BaseProcess() -{} - -/// The class destructor. -ArmaturePopulate::~ArmaturePopulate() -{} - -bool ArmaturePopulate::IsActive(unsigned int pFlags) const { - return (pFlags & aiProcess_PopulateArmatureData) != 0; -} - -void ArmaturePopulate::SetupProperties(const Importer *pImp) { - // do nothing -} - -void ArmaturePopulate::Execute(aiScene *out) { - - // Now convert all bone positions to the correct mOffsetMatrix - std::vector<aiBone *> bones; - std::vector<aiNode *> nodes; - std::map<aiBone *, aiNode *> bone_stack; - BuildBoneList(out->mRootNode, out->mRootNode, out, bones); - BuildNodeList(out->mRootNode, nodes); - - BuildBoneStack(out->mRootNode, out->mRootNode, out, bones, bone_stack, nodes); - - ASSIMP_LOG_DEBUG_F("Bone stack size: ", bone_stack.size()); - - for (std::pair<aiBone *, aiNode *> kvp : bone_stack) { - aiBone *bone = kvp.first; - aiNode *bone_node = kvp.second; - ASSIMP_LOG_DEBUG_F("active node lookup: ", bone->mName.C_Str()); - // lcl transform grab - done in generate_nodes :) - - // bone->mOffsetMatrix = bone_node->mTransformation; - aiNode *armature = GetArmatureRoot(bone_node, bones); - - ai_assert(armature); - - // set up bone armature id - bone->mArmature = armature; - - // set this bone node to be referenced properly - ai_assert(bone_node); - bone->mNode = bone_node; - } -} - - -/* Reprocess all nodes to calculate bone transforms properly based on the REAL - * mOffsetMatrix not the local. */ -/* Before this would use mesh transforms which is wrong for bone transforms */ -/* Before this would work for simple character skeletons but not complex meshes - * with multiple origins */ -/* Source: sketch fab log cutter fbx */ -void ArmaturePopulate::BuildBoneList(aiNode *current_node, - const aiNode *root_node, - const aiScene *scene, - std::vector<aiBone *> &bones) { - ai_assert(scene); - for (unsigned int nodeId = 0; nodeId < current_node->mNumChildren; ++nodeId) { - aiNode *child = current_node->mChildren[nodeId]; - ai_assert(child); - - // check for bones - for (unsigned int meshId = 0; meshId < child->mNumMeshes; ++meshId) { - ai_assert(child->mMeshes); - unsigned int mesh_index = child->mMeshes[meshId]; - aiMesh *mesh = scene->mMeshes[mesh_index]; - ai_assert(mesh); - - for (unsigned int boneId = 0; boneId < mesh->mNumBones; ++boneId) { - aiBone *bone = mesh->mBones[boneId]; - ai_assert(bone); - - // duplicate meshes exist with the same bones sometimes :) - // so this must be detected - if (std::find(bones.begin(), bones.end(), bone) == bones.end()) { - // add the element once - bones.push_back(bone); - } - } - - // find mesh and get bones - // then do recursive lookup for bones in root node hierarchy - } - - BuildBoneList(child, root_node, scene, bones); - } -} - -/* Prepare flat node list which can be used for non recursive lookups later */ -void ArmaturePopulate::BuildNodeList(const aiNode *current_node, - std::vector<aiNode *> &nodes) { - ai_assert(current_node); - - for (unsigned int nodeId = 0; nodeId < current_node->mNumChildren; ++nodeId) { - aiNode *child = current_node->mChildren[nodeId]; - ai_assert(child); - - nodes.push_back(child); - - BuildNodeList(child, nodes); - } -} - -/* A bone stack allows us to have multiple armatures, with the same bone names - * A bone stack allows us also to retrieve bones true transform even with - * duplicate names :) - */ -void ArmaturePopulate::BuildBoneStack(aiNode *current_node, - const aiNode *root_node, - const aiScene *scene, - const std::vector<aiBone *> &bones, - std::map<aiBone *, aiNode *> &bone_stack, - std::vector<aiNode *> &node_stack) { - ai_assert(scene); - ai_assert(root_node); - ai_assert(!node_stack.empty()); - - for (aiBone *bone : bones) { - ai_assert(bone); - aiNode *node = GetNodeFromStack(bone->mName, node_stack); - if (node == nullptr) { - node_stack.clear(); - BuildNodeList(root_node, node_stack); - ASSIMP_LOG_DEBUG_F("Resetting bone stack: nullptr element ", bone->mName.C_Str()); - - node = GetNodeFromStack(bone->mName, node_stack); - - if (!node) { - ASSIMP_LOG_ERROR("serious import issue node for bone was not detected"); - continue; - } - } - - ASSIMP_LOG_DEBUG_F("Successfully added bone[", bone->mName.C_Str(), "] to stack and bone node is: ", node->mName.C_Str()); - - bone_stack.insert(std::pair<aiBone *, aiNode *>(bone, node)); - } -} - - -/* Returns the armature root node */ -/* This is required to be detected for a bone initially, it will recurse up - * until it cannot find another bone and return the node No known failure - * points. (yet) - */ -aiNode *ArmaturePopulate::GetArmatureRoot(aiNode *bone_node, - std::vector<aiBone *> &bone_list) { - while (bone_node) { - if (!IsBoneNode(bone_node->mName, bone_list)) { - ASSIMP_LOG_DEBUG_F("GetArmatureRoot() Found valid armature: ", bone_node->mName.C_Str()); - return bone_node; - } - - bone_node = bone_node->mParent; - } - - ASSIMP_LOG_ERROR("GetArmatureRoot() can't find armature!"); - - return nullptr; -} - - - -/* Simple IsBoneNode check if this could be a bone */ -bool ArmaturePopulate::IsBoneNode(const aiString &bone_name, - std::vector<aiBone *> &bones) { - for (aiBone *bone : bones) { - if (bone->mName == bone_name) { - return true; - } - } - - return false; -} - -/* Pop this node by name from the stack if found */ -/* Used in multiple armature situations with duplicate node / bone names */ -/* Known flaw: cannot have nodes with bone names, will be fixed in later release - */ -/* (serious to be fixed) Known flaw: nodes which have more than one bone could - * be prematurely dropped from stack */ -aiNode *ArmaturePopulate::GetNodeFromStack(const aiString &node_name, - std::vector<aiNode *> &nodes) { - std::vector<aiNode *>::iterator iter; - aiNode *found = nullptr; - for (iter = nodes.begin(); iter < nodes.end(); ++iter) { - aiNode *element = *iter; - ai_assert(element); - // node valid and node name matches - if (element->mName == node_name) { - found = element; - break; - } - } - - if (found != nullptr) { - ASSIMP_LOG_INFO_F("Removed node from stack: ", found->mName.C_Str()); - // now pop the element from the node list - nodes.erase(iter); - - return found; - } - - // unique names can cause this problem - ASSIMP_LOG_ERROR("[Serious] GetNodeFromStack() can't find node from stack!"); - - return nullptr; -} - - - - -} // Namespace Assimp diff --git a/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.h b/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.h deleted file mode 100644 index aa1ad7c80c..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ArmaturePopulate.h +++ /dev/null @@ -1,112 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#ifndef ARMATURE_POPULATE_H_ -#define ARMATURE_POPULATE_H_ - -#include "Common/BaseProcess.h" -#include <assimp/BaseImporter.h> -#include <vector> -#include <map> - - -struct aiNode; -struct aiBone; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** Armature Populate: This is a post process designed - * To save you time when importing models into your game engines - * This was originally designed only for fbx but will work with other formats - * it is intended to auto populate aiBone data with armature and the aiNode - * This is very useful when dealing with skinned meshes - * or when dealing with many different skeletons - * It's off by default but recommend that you try it and use it - * It should reduce down any glue code you have in your - * importers - * You can contact RevoluPowered <gordon@gordonite.tech> - * For more info about this -*/ -class ASSIMP_API ArmaturePopulate : public BaseProcess { -public: - /// The default class constructor. - ArmaturePopulate(); - - /// The class destructor. - virtual ~ArmaturePopulate(); - - /// Overwritten, @see BaseProcess - virtual bool IsActive( unsigned int pFlags ) const; - - /// Overwritten, @see BaseProcess - virtual void SetupProperties( const Importer* pImp ); - - /// Overwritten, @see BaseProcess - virtual void Execute( aiScene* pScene ); - - static aiNode *GetArmatureRoot(aiNode *bone_node, - std::vector<aiBone *> &bone_list); - - static bool IsBoneNode(const aiString &bone_name, - std::vector<aiBone *> &bones); - - static aiNode *GetNodeFromStack(const aiString &node_name, - std::vector<aiNode *> &nodes); - - static void BuildNodeList(const aiNode *current_node, - std::vector<aiNode *> &nodes); - - static void BuildBoneList(aiNode *current_node, const aiNode *root_node, - const aiScene *scene, - std::vector<aiBone *> &bones); - - static void BuildBoneStack(aiNode *current_node, const aiNode *root_node, - const aiScene *scene, - const std::vector<aiBone *> &bones, - std::map<aiBone *, aiNode *> &bone_stack, - std::vector<aiNode *> &node_stack); -}; - -} // Namespace Assimp - - -#endif // SCALE_PROCESS_H_
\ No newline at end of file diff --git a/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.cpp b/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.cpp deleted file mode 100644 index a3f7dd2557..0000000000 --- a/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.cpp +++ /dev/null @@ -1,319 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to calculate - * tangents and bitangents for all imported meshes - */ - -// internal headers -#include "CalcTangentsProcess.h" -#include "ProcessHelper.h" -#include <assimp/TinyFormatter.h> -#include <assimp/qnan.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -CalcTangentsProcess::CalcTangentsProcess() -: configMaxAngle( AI_DEG_TO_RAD(45.f) ) -, configSourceUV( 0 ) { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -CalcTangentsProcess::~CalcTangentsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool CalcTangentsProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_CalcTangentSpace) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void CalcTangentsProcess::SetupProperties(const Importer* pImp) -{ - ai_assert( NULL != pImp ); - - // get the current value of the property - configMaxAngle = pImp->GetPropertyFloat(AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE,45.f); - configMaxAngle = std::max(std::min(configMaxAngle,45.0f),0.0f); - configMaxAngle = AI_DEG_TO_RAD(configMaxAngle); - - configSourceUV = pImp->GetPropertyInteger(AI_CONFIG_PP_CT_TEXTURE_CHANNEL_INDEX,0); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void CalcTangentsProcess::Execute( aiScene* pScene) -{ - ai_assert( NULL != pScene ); - - ASSIMP_LOG_DEBUG("CalcTangentsProcess begin"); - - bool bHas = false; - for ( unsigned int a = 0; a < pScene->mNumMeshes; a++ ) { - if(ProcessMesh( pScene->mMeshes[a],a))bHas = true; - } - - if ( bHas ) { - ASSIMP_LOG_INFO("CalcTangentsProcess finished. Tangents have been calculated"); - } else { - ASSIMP_LOG_DEBUG("CalcTangentsProcess finished"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Calculates tangents and bi-tangents for the given mesh -bool CalcTangentsProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshIndex) -{ - // we assume that the mesh is still in the verbose vertex format where each face has its own set - // of vertices and no vertices are shared between faces. Sadly I don't know any quick test to - // assert() it here. - // assert( must be verbose, dammit); - - if (pMesh->mTangents) // this implies that mBitangents is also there - return false; - - // If the mesh consists of lines and/or points but not of - // triangles or higher-order polygons the normal vectors - // are undefined. - if (!(pMesh->mPrimitiveTypes & (aiPrimitiveType_TRIANGLE | aiPrimitiveType_POLYGON))) - { - ASSIMP_LOG_INFO("Tangents are undefined for line and point meshes"); - return false; - } - - // what we can check, though, is if the mesh has normals and texture coordinates. That's a requirement - if( pMesh->mNormals == NULL) - { - ASSIMP_LOG_ERROR("Failed to compute tangents; need normals"); - return false; - } - if( configSourceUV >= AI_MAX_NUMBER_OF_TEXTURECOORDS || !pMesh->mTextureCoords[configSourceUV] ) - { - ASSIMP_LOG_ERROR((Formatter::format("Failed to compute tangents; need UV data in channel"),configSourceUV)); - return false; - } - - const float angleEpsilon = 0.9999f; - - std::vector<bool> vertexDone( pMesh->mNumVertices, false); - const float qnan = get_qnan(); - - // create space for the tangents and bitangents - pMesh->mTangents = new aiVector3D[pMesh->mNumVertices]; - pMesh->mBitangents = new aiVector3D[pMesh->mNumVertices]; - - const aiVector3D* meshPos = pMesh->mVertices; - const aiVector3D* meshNorm = pMesh->mNormals; - const aiVector3D* meshTex = pMesh->mTextureCoords[configSourceUV]; - aiVector3D* meshTang = pMesh->mTangents; - aiVector3D* meshBitang = pMesh->mBitangents; - - // calculate the tangent and bitangent for every face - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) - { - const aiFace& face = pMesh->mFaces[a]; - if (face.mNumIndices < 3) - { - // There are less than three indices, thus the tangent vector - // is not defined. We are finished with these vertices now, - // their tangent vectors are set to qnan. - for (unsigned int i = 0; i < face.mNumIndices;++i) - { - unsigned int idx = face.mIndices[i]; - vertexDone [idx] = true; - meshTang [idx] = aiVector3D(qnan); - meshBitang [idx] = aiVector3D(qnan); - } - - continue; - } - - // triangle or polygon... we always use only the first three indices. A polygon - // is supposed to be planar anyways.... - // FIXME: (thom) create correct calculation for multi-vertex polygons maybe? - const unsigned int p0 = face.mIndices[0], p1 = face.mIndices[1], p2 = face.mIndices[2]; - - // position differences p1->p2 and p1->p3 - aiVector3D v = meshPos[p1] - meshPos[p0], w = meshPos[p2] - meshPos[p0]; - - // texture offset p1->p2 and p1->p3 - float sx = meshTex[p1].x - meshTex[p0].x, sy = meshTex[p1].y - meshTex[p0].y; - float tx = meshTex[p2].x - meshTex[p0].x, ty = meshTex[p2].y - meshTex[p0].y; - float dirCorrection = (tx * sy - ty * sx) < 0.0f ? -1.0f : 1.0f; - // when t1, t2, t3 in same position in UV space, just use default UV direction. - if ( sx * ty == sy * tx ) { - sx = 0.0; sy = 1.0; - tx = 1.0; ty = 0.0; - } - - // tangent points in the direction where to positive X axis of the texture coord's would point in model space - // bitangent's points along the positive Y axis of the texture coord's, respectively - aiVector3D tangent, bitangent; - tangent.x = (w.x * sy - v.x * ty) * dirCorrection; - tangent.y = (w.y * sy - v.y * ty) * dirCorrection; - tangent.z = (w.z * sy - v.z * ty) * dirCorrection; - bitangent.x = (w.x * sx - v.x * tx) * dirCorrection; - bitangent.y = (w.y * sx - v.y * tx) * dirCorrection; - bitangent.z = (w.z * sx - v.z * tx) * dirCorrection; - - // store for every vertex of that face - for( unsigned int b = 0; b < face.mNumIndices; ++b ) { - unsigned int p = face.mIndices[b]; - - // project tangent and bitangent into the plane formed by the vertex' normal - aiVector3D localTangent = tangent - meshNorm[p] * (tangent * meshNorm[p]); - aiVector3D localBitangent = bitangent - meshNorm[p] * (bitangent * meshNorm[p]); - localTangent.NormalizeSafe(); localBitangent.NormalizeSafe(); - - // reconstruct tangent/bitangent according to normal and bitangent/tangent when it's infinite or NaN. - bool invalid_tangent = is_special_float(localTangent.x) || is_special_float(localTangent.y) || is_special_float(localTangent.z); - bool invalid_bitangent = is_special_float(localBitangent.x) || is_special_float(localBitangent.y) || is_special_float(localBitangent.z); - if (invalid_tangent != invalid_bitangent) { - if (invalid_tangent) { - localTangent = meshNorm[p] ^ localBitangent; - localTangent.NormalizeSafe(); - } else { - localBitangent = localTangent ^ meshNorm[p]; - localBitangent.NormalizeSafe(); - } - } - - // and write it into the mesh. - meshTang[ p ] = localTangent; - meshBitang[ p ] = localBitangent; - } - } - - - // create a helper to quickly find locally close vertices among the vertex array - // FIX: check whether we can reuse the SpatialSort of a previous step - SpatialSort* vertexFinder = NULL; - SpatialSort _vertexFinder; - float posEpsilon; - if (shared) - { - std::vector<std::pair<SpatialSort,float> >* avf; - shared->GetProperty(AI_SPP_SPATIAL_SORT,avf); - if (avf) - { - std::pair<SpatialSort,float>& blubb = avf->operator [] (meshIndex); - vertexFinder = &blubb.first; - posEpsilon = blubb.second;; - } - } - if (!vertexFinder) - { - _vertexFinder.Fill(pMesh->mVertices, pMesh->mNumVertices, sizeof( aiVector3D)); - vertexFinder = &_vertexFinder; - posEpsilon = ComputePositionEpsilon(pMesh); - } - std::vector<unsigned int> verticesFound; - - const float fLimit = std::cos(configMaxAngle); - std::vector<unsigned int> closeVertices; - - // in the second pass we now smooth out all tangents and bitangents at the same local position - // if they are not too far off. - for( unsigned int a = 0; a < pMesh->mNumVertices; a++) - { - if( vertexDone[a]) - continue; - - const aiVector3D& origPos = pMesh->mVertices[a]; - const aiVector3D& origNorm = pMesh->mNormals[a]; - const aiVector3D& origTang = pMesh->mTangents[a]; - const aiVector3D& origBitang = pMesh->mBitangents[a]; - closeVertices.resize( 0 ); - - // find all vertices close to that position - vertexFinder->FindPositions( origPos, posEpsilon, verticesFound); - - closeVertices.reserve (verticesFound.size()+5); - closeVertices.push_back( a); - - // look among them for other vertices sharing the same normal and a close-enough tangent/bitangent - for( unsigned int b = 0; b < verticesFound.size(); b++) - { - unsigned int idx = verticesFound[b]; - if( vertexDone[idx]) - continue; - if( meshNorm[idx] * origNorm < angleEpsilon) - continue; - if( meshTang[idx] * origTang < fLimit) - continue; - if( meshBitang[idx] * origBitang < fLimit) - continue; - - // it's similar enough -> add it to the smoothing group - closeVertices.push_back( idx); - vertexDone[idx] = true; - } - - // smooth the tangents and bitangents of all vertices that were found to be close enough - aiVector3D smoothTangent( 0, 0, 0), smoothBitangent( 0, 0, 0); - for( unsigned int b = 0; b < closeVertices.size(); ++b) - { - smoothTangent += meshTang[ closeVertices[b] ]; - smoothBitangent += meshBitang[ closeVertices[b] ]; - } - smoothTangent.Normalize(); - smoothBitangent.Normalize(); - - // and write it back into all affected tangents - for( unsigned int b = 0; b < closeVertices.size(); ++b) - { - meshTang[ closeVertices[b] ] = smoothTangent; - meshBitang[ closeVertices[b] ] = smoothBitangent; - } - } - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.h b/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.h deleted file mode 100644 index 3568a624f8..0000000000 --- a/thirdparty/assimp/code/PostProcessing/CalcTangentsProcess.h +++ /dev/null @@ -1,117 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - - -/** @file Defines a post processing step to calculate tangents and - bi-tangents on all imported meshes.*/ -#ifndef AI_CALCTANGENTSPROCESS_H_INC -#define AI_CALCTANGENTSPROCESS_H_INC - -#include "Common/BaseProcess.h" - -struct aiMesh; - -namespace Assimp -{ - -// --------------------------------------------------------------------------- -/** The CalcTangentsProcess calculates the tangent and bitangent for any vertex - * of all meshes. It is expected to be run before the JoinVerticesProcess runs - * because the joining of vertices also considers tangents and bitangents for - * uniqueness. - */ -class ASSIMP_API_WINONLY CalcTangentsProcess : public BaseProcess -{ -public: - - CalcTangentsProcess(); - ~CalcTangentsProcess(); - -public: - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. - * A bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - void SetupProperties(const Importer* pImp); - - - // setter for configMaxAngle - inline void SetMaxSmoothAngle(float f) - { - configMaxAngle =f; - } - -protected: - - // ------------------------------------------------------------------- - /** Calculates tangents and bitangents for a specific mesh. - * @param pMesh The mesh to process. - * @param meshIndex Index of the mesh - */ - bool ProcessMesh( aiMesh* pMesh, unsigned int meshIndex); - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - -private: - - /** Configuration option: maximum smoothing angle, in radians*/ - float configMaxAngle; - unsigned int configSourceUV; -}; - -} // end of namespace Assimp - -#endif // AI_CALCTANGENTSPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.cpp b/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.cpp deleted file mode 100644 index df4d44337d..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.cpp +++ /dev/null @@ -1,506 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file GenUVCoords step */ - - -#include "ComputeUVMappingProcess.h" -#include "ProcessHelper.h" -#include <assimp/Exceptional.h> - -using namespace Assimp; - -namespace { - - const static aiVector3D base_axis_y(0.0,1.0,0.0); - const static aiVector3D base_axis_x(1.0,0.0,0.0); - const static aiVector3D base_axis_z(0.0,0.0,1.0); - const static ai_real angle_epsilon = ai_real( 0.95 ); -} - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -ComputeUVMappingProcess::ComputeUVMappingProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -ComputeUVMappingProcess::~ComputeUVMappingProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool ComputeUVMappingProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_GenUVCoords) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Check whether a ray intersects a plane and find the intersection point -inline bool PlaneIntersect(const aiRay& ray, const aiVector3D& planePos, - const aiVector3D& planeNormal, aiVector3D& pos) -{ - const ai_real b = planeNormal * (planePos - ray.pos); - ai_real h = ray.dir * planeNormal; - if ((h < 10e-5 && h > -10e-5) || (h = b/h) < 0) - return false; - - pos = ray.pos + (ray.dir * h); - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Find the first empty UV channel in a mesh -inline unsigned int FindEmptyUVChannel (aiMesh* mesh) -{ - for (unsigned int m = 0; m < AI_MAX_NUMBER_OF_TEXTURECOORDS;++m) - if (!mesh->mTextureCoords[m])return m; - - ASSIMP_LOG_ERROR("Unable to compute UV coordinates, no free UV slot found"); - return UINT_MAX; -} - -// ------------------------------------------------------------------------------------------------ -// Try to remove UV seams -void RemoveUVSeams (aiMesh* mesh, aiVector3D* out) -{ - // TODO: just a very rough algorithm. I think it could be done - // much easier, but I don't know how and am currently too tired to - // to think about a better solution. - - const static ai_real LOWER_LIMIT = ai_real( 0.1 ); - const static ai_real UPPER_LIMIT = ai_real( 0.9 ); - - const static ai_real LOWER_EPSILON = ai_real( 10e-3 ); - const static ai_real UPPER_EPSILON = ai_real( 1.0-10e-3 ); - - for (unsigned int fidx = 0; fidx < mesh->mNumFaces;++fidx) - { - const aiFace& face = mesh->mFaces[fidx]; - if (face.mNumIndices < 3) continue; // triangles and polygons only, please - - unsigned int small = face.mNumIndices, large = small; - bool zero = false, one = false, round_to_zero = false; - - // Check whether this face lies on a UV seam. We can just guess, - // but the assumption that a face with at least one very small - // on the one side and one very large U coord on the other side - // lies on a UV seam should work for most cases. - for (unsigned int n = 0; n < face.mNumIndices;++n) - { - if (out[face.mIndices[n]].x < LOWER_LIMIT) - { - small = n; - - // If we have a U value very close to 0 we can't - // round the others to 0, too. - if (out[face.mIndices[n]].x <= LOWER_EPSILON) - zero = true; - else round_to_zero = true; - } - if (out[face.mIndices[n]].x > UPPER_LIMIT) - { - large = n; - - // If we have a U value very close to 1 we can't - // round the others to 1, too. - if (out[face.mIndices[n]].x >= UPPER_EPSILON) - one = true; - } - } - if (small != face.mNumIndices && large != face.mNumIndices) - { - for (unsigned int n = 0; n < face.mNumIndices;++n) - { - // If the u value is over the upper limit and no other u - // value of that face is 0, round it to 0 - if (out[face.mIndices[n]].x > UPPER_LIMIT && !zero) - out[face.mIndices[n]].x = 0.0; - - // If the u value is below the lower limit and no other u - // value of that face is 1, round it to 1 - else if (out[face.mIndices[n]].x < LOWER_LIMIT && !one) - out[face.mIndices[n]].x = 1.0; - - // The face contains both 0 and 1 as UV coords. This can occur - // for faces which have an edge that lies directly on the seam. - // Due to numerical inaccuracies one U coord becomes 0, the - // other 1. But we do still have a third UV coord to determine - // to which side we must round to. - else if (one && zero) - { - if (round_to_zero && out[face.mIndices[n]].x >= UPPER_EPSILON) - out[face.mIndices[n]].x = 0.0; - else if (!round_to_zero && out[face.mIndices[n]].x <= LOWER_EPSILON) - out[face.mIndices[n]].x = 1.0; - } - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -void ComputeUVMappingProcess::ComputeSphereMapping(aiMesh* mesh,const aiVector3D& axis, aiVector3D* out) -{ - aiVector3D center, min, max; - FindMeshCenter(mesh, center, min, max); - - // If the axis is one of x,y,z run a faster code path. It's worth the extra effort ... - // currently the mapping axis will always be one of x,y,z, except if the - // PretransformVertices step is used (it transforms the meshes into worldspace, - // thus changing the mapping axis) - if (axis * base_axis_x >= angle_epsilon) { - - // For each point get a normalized projection vector in the sphere, - // get its longitude and latitude and map them to their respective - // UV axes. Problems occur around the poles ... unsolvable. - // - // The spherical coordinate system looks like this: - // x = cos(lon)*cos(lat) - // y = sin(lon)*cos(lat) - // z = sin(lat) - // - // Thus we can derive: - // lat = arcsin (z) - // lon = arctan (y/x) - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D diff = (mesh->mVertices[pnt]-center).Normalize(); - out[pnt] = aiVector3D((std::atan2(diff.z, diff.y) + AI_MATH_PI_F ) / AI_MATH_TWO_PI_F, - (std::asin (diff.x) + AI_MATH_HALF_PI_F) / AI_MATH_PI_F, 0.0); - } - } - else if (axis * base_axis_y >= angle_epsilon) { - // ... just the same again - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D diff = (mesh->mVertices[pnt]-center).Normalize(); - out[pnt] = aiVector3D((std::atan2(diff.x, diff.z) + AI_MATH_PI_F ) / AI_MATH_TWO_PI_F, - (std::asin (diff.y) + AI_MATH_HALF_PI_F) / AI_MATH_PI_F, 0.0); - } - } - else if (axis * base_axis_z >= angle_epsilon) { - // ... just the same again - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D diff = (mesh->mVertices[pnt]-center).Normalize(); - out[pnt] = aiVector3D((std::atan2(diff.y, diff.x) + AI_MATH_PI_F ) / AI_MATH_TWO_PI_F, - (std::asin (diff.z) + AI_MATH_HALF_PI_F) / AI_MATH_PI_F, 0.0); - } - } - // slower code path in case the mapping axis is not one of the coordinate system axes - else { - aiMatrix4x4 mTrafo; - aiMatrix4x4::FromToMatrix(axis,base_axis_y,mTrafo); - - // again the same, except we're applying a transformation now - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D diff = ((mTrafo*mesh->mVertices[pnt])-center).Normalize(); - out[pnt] = aiVector3D((std::atan2(diff.y, diff.x) + AI_MATH_PI_F ) / AI_MATH_TWO_PI_F, - (std::asin(diff.z) + AI_MATH_HALF_PI_F) / AI_MATH_PI_F, 0.0); - } - } - - - // Now find and remove UV seams. A seam occurs if a face has a tcoord - // close to zero on the one side, and a tcoord close to one on the - // other side. - RemoveUVSeams(mesh,out); -} - -// ------------------------------------------------------------------------------------------------ -void ComputeUVMappingProcess::ComputeCylinderMapping(aiMesh* mesh,const aiVector3D& axis, aiVector3D* out) -{ - aiVector3D center, min, max; - - // If the axis is one of x,y,z run a faster code path. It's worth the extra effort ... - // currently the mapping axis will always be one of x,y,z, except if the - // PretransformVertices step is used (it transforms the meshes into worldspace, - // thus changing the mapping axis) - if (axis * base_axis_x >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - const ai_real diff = max.x - min.x; - - // If the main axis is 'z', the z coordinate of a point 'p' is mapped - // directly to the texture V axis. The other axis is derived from - // the angle between ( p.x - c.x, p.y - c.y ) and (1,0), where - // 'c' is the center point of the mesh. - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - aiVector3D& uv = out[pnt]; - - uv.y = (pos.x - min.x) / diff; - uv.x = (std::atan2( pos.z - center.z, pos.y - center.y) +(ai_real)AI_MATH_PI ) / (ai_real)AI_MATH_TWO_PI; - } - } - else if (axis * base_axis_y >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - const ai_real diff = max.y - min.y; - - // just the same ... - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - aiVector3D& uv = out[pnt]; - - uv.y = (pos.y - min.y) / diff; - uv.x = (std::atan2( pos.x - center.x, pos.z - center.z) +(ai_real)AI_MATH_PI ) / (ai_real)AI_MATH_TWO_PI; - } - } - else if (axis * base_axis_z >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - const ai_real diff = max.z - min.z; - - // just the same ... - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - aiVector3D& uv = out[pnt]; - - uv.y = (pos.z - min.z) / diff; - uv.x = (std::atan2( pos.y - center.y, pos.x - center.x) +(ai_real)AI_MATH_PI ) / (ai_real)AI_MATH_TWO_PI; - } - } - // slower code path in case the mapping axis is not one of the coordinate system axes - else { - aiMatrix4x4 mTrafo; - aiMatrix4x4::FromToMatrix(axis,base_axis_y,mTrafo); - FindMeshCenterTransformed(mesh, center, min, max,mTrafo); - const ai_real diff = max.y - min.y; - - // again the same, except we're applying a transformation now - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt){ - const aiVector3D pos = mTrafo* mesh->mVertices[pnt]; - aiVector3D& uv = out[pnt]; - - uv.y = (pos.y - min.y) / diff; - uv.x = (std::atan2( pos.x - center.x, pos.z - center.z) +(ai_real)AI_MATH_PI ) / (ai_real)AI_MATH_TWO_PI; - } - } - - // Now find and remove UV seams. A seam occurs if a face has a tcoord - // close to zero on the one side, and a tcoord close to one on the - // other side. - RemoveUVSeams(mesh,out); -} - -// ------------------------------------------------------------------------------------------------ -void ComputeUVMappingProcess::ComputePlaneMapping(aiMesh* mesh,const aiVector3D& axis, aiVector3D* out) -{ - ai_real diffu,diffv; - aiVector3D center, min, max; - - // If the axis is one of x,y,z run a faster code path. It's worth the extra effort ... - // currently the mapping axis will always be one of x,y,z, except if the - // PretransformVertices step is used (it transforms the meshes into worldspace, - // thus changing the mapping axis) - if (axis * base_axis_x >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - diffu = max.z - min.z; - diffv = max.y - min.y; - - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - out[pnt].Set((pos.z - min.z) / diffu,(pos.y - min.y) / diffv,0.0); - } - } - else if (axis * base_axis_y >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - diffu = max.x - min.x; - diffv = max.z - min.z; - - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - out[pnt].Set((pos.x - min.x) / diffu,(pos.z - min.z) / diffv,0.0); - } - } - else if (axis * base_axis_z >= angle_epsilon) { - FindMeshCenter(mesh, center, min, max); - diffu = max.x - min.x; - diffv = max.y - min.y; - - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D& pos = mesh->mVertices[pnt]; - out[pnt].Set((pos.x - min.x) / diffu,(pos.y - min.y) / diffv,0.0); - } - } - // slower code path in case the mapping axis is not one of the coordinate system axes - else - { - aiMatrix4x4 mTrafo; - aiMatrix4x4::FromToMatrix(axis,base_axis_y,mTrafo); - FindMeshCenterTransformed(mesh, center, min, max,mTrafo); - diffu = max.x - min.x; - diffv = max.z - min.z; - - // again the same, except we're applying a transformation now - for (unsigned int pnt = 0; pnt < mesh->mNumVertices;++pnt) { - const aiVector3D pos = mTrafo * mesh->mVertices[pnt]; - out[pnt].Set((pos.x - min.x) / diffu,(pos.z - min.z) / diffv,0.0); - } - } - - // shouldn't be necessary to remove UV seams ... -} - -// ------------------------------------------------------------------------------------------------ -void ComputeUVMappingProcess::ComputeBoxMapping( aiMesh*, aiVector3D* ) -{ - ASSIMP_LOG_ERROR("Mapping type currently not implemented"); -} - -// ------------------------------------------------------------------------------------------------ -void ComputeUVMappingProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("GenUVCoordsProcess begin"); - char buffer[1024]; - - if (pScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) - throw DeadlyImportError("Post-processing order mismatch: expecting pseudo-indexed (\"verbose\") vertices here"); - - std::list<MappingInfo> mappingStack; - - /* Iterate through all materials and search for non-UV mapped textures - */ - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) - { - mappingStack.clear(); - aiMaterial* mat = pScene->mMaterials[i]; - for (unsigned int a = 0; a < mat->mNumProperties;++a) - { - aiMaterialProperty* prop = mat->mProperties[a]; - if (!::strcmp( prop->mKey.data, "$tex.mapping")) - { - aiTextureMapping& mapping = *((aiTextureMapping*)prop->mData); - if (aiTextureMapping_UV != mapping) - { - if (!DefaultLogger::isNullLogger()) - { - ai_snprintf(buffer, 1024, "Found non-UV mapped texture (%s,%u). Mapping type: %s", - TextureTypeToString((aiTextureType)prop->mSemantic),prop->mIndex, - MappingTypeToString(mapping)); - - ASSIMP_LOG_INFO(buffer); - } - - if (aiTextureMapping_OTHER == mapping) - continue; - - MappingInfo info (mapping); - - // Get further properties - currently only the major axis - for (unsigned int a2 = 0; a2 < mat->mNumProperties;++a2) - { - aiMaterialProperty* prop2 = mat->mProperties[a2]; - if (prop2->mSemantic != prop->mSemantic || prop2->mIndex != prop->mIndex) - continue; - - if ( !::strcmp( prop2->mKey.data, "$tex.mapaxis")) { - info.axis = *((aiVector3D*)prop2->mData); - break; - } - } - - unsigned int idx( 99999999 ); - - // Check whether we have this mapping mode already - std::list<MappingInfo>::iterator it = std::find (mappingStack.begin(),mappingStack.end(), info); - if (mappingStack.end() != it) - { - idx = (*it).uv; - } - else - { - /* We have found a non-UV mapped texture. Now - * we need to find all meshes using this material - * that we can compute UV channels for them. - */ - for (unsigned int m = 0; m < pScene->mNumMeshes;++m) - { - aiMesh* mesh = pScene->mMeshes[m]; - unsigned int outIdx = 0; - if ( mesh->mMaterialIndex != i || ( outIdx = FindEmptyUVChannel(mesh) ) == UINT_MAX || - !mesh->mNumVertices) - { - continue; - } - - // Allocate output storage - aiVector3D* p = mesh->mTextureCoords[outIdx] = new aiVector3D[mesh->mNumVertices]; - - switch (mapping) - { - case aiTextureMapping_SPHERE: - ComputeSphereMapping(mesh,info.axis,p); - break; - case aiTextureMapping_CYLINDER: - ComputeCylinderMapping(mesh,info.axis,p); - break; - case aiTextureMapping_PLANE: - ComputePlaneMapping(mesh,info.axis,p); - break; - case aiTextureMapping_BOX: - ComputeBoxMapping(mesh,p); - break; - default: - ai_assert(false); - } - if (m && idx != outIdx) - { - ASSIMP_LOG_WARN("UV index mismatch. Not all meshes assigned to " - "this material have equal numbers of UV channels. The UV index stored in " - "the material structure does therefore not apply for all meshes. "); - } - idx = outIdx; - } - info.uv = idx; - mappingStack.push_back(info); - } - - // Update the material property list - mapping = aiTextureMapping_UV; - ((aiMaterial*)mat)->AddProperty(&idx,1,AI_MATKEY_UVWSRC(prop->mSemantic,prop->mIndex)); - } - } - } - } - ASSIMP_LOG_DEBUG("GenUVCoordsProcess finished"); -} diff --git a/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.h b/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.h deleted file mode 100644 index a6d36e06ea..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ComputeUVMappingProcess.h +++ /dev/null @@ -1,149 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to compute UV coordinates - from abstract mappings, such as box or spherical*/ -#ifndef AI_COMPUTEUVMAPPING_H_INC -#define AI_COMPUTEUVMAPPING_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> -#include <assimp/material.h> -#include <assimp/types.h> - -class ComputeUVMappingTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** ComputeUVMappingProcess - converts special mappings, such as spherical, - * cylindrical or boxed to proper UV coordinates for rendering. -*/ -class ComputeUVMappingProcess : public BaseProcess -{ -public: - ComputeUVMappingProcess(); - ~ComputeUVMappingProcess(); - -public: - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - -protected: - - // ------------------------------------------------------------------- - /** Computes spherical UV coordinates for a mesh - * - * @param mesh Mesh to be processed - * @param axis Main axis - * @param out Receives output UV coordinates - */ - void ComputeSphereMapping(aiMesh* mesh,const aiVector3D& axis, - aiVector3D* out); - - // ------------------------------------------------------------------- - /** Computes cylindrical UV coordinates for a mesh - * - * @param mesh Mesh to be processed - * @param axis Main axis - * @param out Receives output UV coordinates - */ - void ComputeCylinderMapping(aiMesh* mesh,const aiVector3D& axis, - aiVector3D* out); - - // ------------------------------------------------------------------- - /** Computes planar UV coordinates for a mesh - * - * @param mesh Mesh to be processed - * @param axis Main axis - * @param out Receives output UV coordinates - */ - void ComputePlaneMapping(aiMesh* mesh,const aiVector3D& axis, - aiVector3D* out); - - // ------------------------------------------------------------------- - /** Computes cubic UV coordinates for a mesh - * - * @param mesh Mesh to be processed - * @param out Receives output UV coordinates - */ - void ComputeBoxMapping(aiMesh* mesh, aiVector3D* out); - -private: - - // temporary structure to describe a mapping - struct MappingInfo - { - explicit MappingInfo(aiTextureMapping _type) - : type (_type) - , axis (0.f,1.f,0.f) - , uv (0u) - {} - - aiTextureMapping type; - aiVector3D axis; - unsigned int uv; - - bool operator== (const MappingInfo& other) - { - return type == other.type && axis == other.axis; - } - }; -}; - -} // end of namespace Assimp - -#endif // AI_COMPUTEUVMAPPING_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.cpp b/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.cpp deleted file mode 100644 index b7cd4f0bc6..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.cpp +++ /dev/null @@ -1,414 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file MakeLeftHandedProcess.cpp - * @brief Implementation of the post processing step to convert all - * imported data to a left-handed coordinate system. - * - * Face order & UV flip are also implemented here, for the sake of a - * better location. - */ - - -#include "ConvertToLHProcess.h" -#include <assimp/scene.h> -#include <assimp/postprocess.h> -#include <assimp/DefaultLogger.hpp> - -using namespace Assimp; - -#ifndef ASSIMP_BUILD_NO_MAKELEFTHANDED_PROCESS - -namespace { - -template <typename aiMeshType> -void flipUVs(aiMeshType* pMesh) { - if (pMesh == nullptr) { return; } - // mirror texture y coordinate - for (unsigned int tcIdx = 0; tcIdx < AI_MAX_NUMBER_OF_TEXTURECOORDS; tcIdx++) { - if (!pMesh->HasTextureCoords(tcIdx)) { - break; - } - - for (unsigned int vIdx = 0; vIdx < pMesh->mNumVertices; vIdx++) { - pMesh->mTextureCoords[tcIdx][vIdx].y = 1.0f - pMesh->mTextureCoords[tcIdx][vIdx].y; - } - } -} - -} // namespace - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -MakeLeftHandedProcess::MakeLeftHandedProcess() -: BaseProcess() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -MakeLeftHandedProcess::~MakeLeftHandedProcess() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool MakeLeftHandedProcess::IsActive( unsigned int pFlags) const -{ - return 0 != (pFlags & aiProcess_MakeLeftHanded); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void MakeLeftHandedProcess::Execute( aiScene* pScene) -{ - // Check for an existent root node to proceed - ai_assert(pScene->mRootNode != NULL); - ASSIMP_LOG_DEBUG("MakeLeftHandedProcess begin"); - - // recursively convert all the nodes - ProcessNode( pScene->mRootNode, aiMatrix4x4()); - - // process the meshes accordingly - for ( unsigned int a = 0; a < pScene->mNumMeshes; ++a ) { - ProcessMesh( pScene->mMeshes[ a ] ); - } - - // process the materials accordingly - for ( unsigned int a = 0; a < pScene->mNumMaterials; ++a ) { - ProcessMaterial( pScene->mMaterials[ a ] ); - } - - // transform all animation channels as well - for( unsigned int a = 0; a < pScene->mNumAnimations; a++) - { - aiAnimation* anim = pScene->mAnimations[a]; - for( unsigned int b = 0; b < anim->mNumChannels; b++) - { - aiNodeAnim* nodeAnim = anim->mChannels[b]; - ProcessAnimation( nodeAnim); - } - } - ASSIMP_LOG_DEBUG("MakeLeftHandedProcess finished"); -} - -// ------------------------------------------------------------------------------------------------ -// Recursively converts a node, all of its children and all of its meshes -void MakeLeftHandedProcess::ProcessNode( aiNode* pNode, const aiMatrix4x4& pParentGlobalRotation) -{ - // mirror all base vectors at the local Z axis - pNode->mTransformation.c1 = -pNode->mTransformation.c1; - pNode->mTransformation.c2 = -pNode->mTransformation.c2; - pNode->mTransformation.c3 = -pNode->mTransformation.c3; - pNode->mTransformation.c4 = -pNode->mTransformation.c4; - - // now invert the Z axis again to keep the matrix determinant positive. - // The local meshes will be inverted accordingly so that the result should look just fine again. - pNode->mTransformation.a3 = -pNode->mTransformation.a3; - pNode->mTransformation.b3 = -pNode->mTransformation.b3; - pNode->mTransformation.c3 = -pNode->mTransformation.c3; - pNode->mTransformation.d3 = -pNode->mTransformation.d3; // useless, but anyways... - - // continue for all children - for( size_t a = 0; a < pNode->mNumChildren; ++a ) { - ProcessNode( pNode->mChildren[ a ], pParentGlobalRotation * pNode->mTransformation ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Converts a single mesh to left handed coordinates. -void MakeLeftHandedProcess::ProcessMesh( aiMesh* pMesh) { - if ( nullptr == pMesh ) { - ASSIMP_LOG_ERROR( "Nullptr to mesh found." ); - return; - } - // mirror positions, normals and stuff along the Z axis - for( size_t a = 0; a < pMesh->mNumVertices; ++a) - { - pMesh->mVertices[a].z *= -1.0f; - if (pMesh->HasNormals()) { - pMesh->mNormals[a].z *= -1.0f; - } - if( pMesh->HasTangentsAndBitangents()) - { - pMesh->mTangents[a].z *= -1.0f; - pMesh->mBitangents[a].z *= -1.0f; - } - } - - // mirror anim meshes positions, normals and stuff along the Z axis - for (size_t m = 0; m < pMesh->mNumAnimMeshes; ++m) - { - for (size_t a = 0; a < pMesh->mAnimMeshes[m]->mNumVertices; ++a) - { - pMesh->mAnimMeshes[m]->mVertices[a].z *= -1.0f; - if (pMesh->mAnimMeshes[m]->HasNormals()) { - pMesh->mAnimMeshes[m]->mNormals[a].z *= -1.0f; - } - if (pMesh->mAnimMeshes[m]->HasTangentsAndBitangents()) - { - pMesh->mAnimMeshes[m]->mTangents[a].z *= -1.0f; - pMesh->mAnimMeshes[m]->mBitangents[a].z *= -1.0f; - } - } - } - - // mirror offset matrices of all bones - for( size_t a = 0; a < pMesh->mNumBones; ++a) - { - aiBone* bone = pMesh->mBones[a]; - bone->mOffsetMatrix.a3 = -bone->mOffsetMatrix.a3; - bone->mOffsetMatrix.b3 = -bone->mOffsetMatrix.b3; - bone->mOffsetMatrix.d3 = -bone->mOffsetMatrix.d3; - bone->mOffsetMatrix.c1 = -bone->mOffsetMatrix.c1; - bone->mOffsetMatrix.c2 = -bone->mOffsetMatrix.c2; - bone->mOffsetMatrix.c4 = -bone->mOffsetMatrix.c4; - } - - // mirror bitangents as well as they're derived from the texture coords - if( pMesh->HasTangentsAndBitangents()) - { - for( unsigned int a = 0; a < pMesh->mNumVertices; a++) - pMesh->mBitangents[a] *= -1.0f; - } -} - -// ------------------------------------------------------------------------------------------------ -// Converts a single material to left handed coordinates. -void MakeLeftHandedProcess::ProcessMaterial( aiMaterial* _mat) { - if ( nullptr == _mat ) { - ASSIMP_LOG_ERROR( "Nullptr to aiMaterial found." ); - return; - } - - aiMaterial* mat = (aiMaterial*)_mat; - for (unsigned int a = 0; a < mat->mNumProperties;++a) { - aiMaterialProperty* prop = mat->mProperties[a]; - - // Mapping axis for UV mappings? - if (!::strcmp( prop->mKey.data, "$tex.mapaxis")) { - ai_assert( prop->mDataLength >= sizeof(aiVector3D)); /* something is wrong with the validation if we end up here */ - aiVector3D* pff = (aiVector3D*)prop->mData; - pff->z *= -1.f; - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Converts the given animation to LH coordinates. -void MakeLeftHandedProcess::ProcessAnimation( aiNodeAnim* pAnim) -{ - // position keys - for( unsigned int a = 0; a < pAnim->mNumPositionKeys; a++) - pAnim->mPositionKeys[a].mValue.z *= -1.0f; - - // rotation keys - for( unsigned int a = 0; a < pAnim->mNumRotationKeys; a++) - { - /* That's the safe version, but the float errors add up. So we try the short version instead - aiMatrix3x3 rotmat = pAnim->mRotationKeys[a].mValue.GetMatrix(); - rotmat.a3 = -rotmat.a3; rotmat.b3 = -rotmat.b3; - rotmat.c1 = -rotmat.c1; rotmat.c2 = -rotmat.c2; - aiQuaternion rotquat( rotmat); - pAnim->mRotationKeys[a].mValue = rotquat; - */ - pAnim->mRotationKeys[a].mValue.x *= -1.0f; - pAnim->mRotationKeys[a].mValue.y *= -1.0f; - } -} - -#endif // !! ASSIMP_BUILD_NO_MAKELEFTHANDED_PROCESS -#ifndef ASSIMP_BUILD_NO_FLIPUVS_PROCESS -// # FlipUVsProcess - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FlipUVsProcess::FlipUVsProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FlipUVsProcess::~FlipUVsProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FlipUVsProcess::IsActive( unsigned int pFlags) const -{ - return 0 != (pFlags & aiProcess_FlipUVs); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FlipUVsProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("FlipUVsProcess begin"); - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) - ProcessMesh(pScene->mMeshes[i]); - - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) - ProcessMaterial(pScene->mMaterials[i]); - ASSIMP_LOG_DEBUG("FlipUVsProcess finished"); -} - -// ------------------------------------------------------------------------------------------------ -// Converts a single material -void FlipUVsProcess::ProcessMaterial (aiMaterial* _mat) -{ - aiMaterial* mat = (aiMaterial*)_mat; - for (unsigned int a = 0; a < mat->mNumProperties;++a) { - aiMaterialProperty* prop = mat->mProperties[a]; - if( !prop ) { - ASSIMP_LOG_DEBUG( "Property is null" ); - continue; - } - - // UV transformation key? - if (!::strcmp( prop->mKey.data, "$tex.uvtrafo")) { - ai_assert( prop->mDataLength >= sizeof(aiUVTransform)); /* something is wrong with the validation if we end up here */ - aiUVTransform* uv = (aiUVTransform*)prop->mData; - - // just flip it, that's everything - uv->mTranslation.y *= -1.f; - uv->mRotation *= -1.f; - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Converts a single mesh -void FlipUVsProcess::ProcessMesh( aiMesh* pMesh) -{ - flipUVs(pMesh); - for (unsigned int idx = 0; idx < pMesh->mNumAnimMeshes; idx++) { - flipUVs(pMesh->mAnimMeshes[idx]); - } -} - -#endif // !ASSIMP_BUILD_NO_FLIPUVS_PROCESS -#ifndef ASSIMP_BUILD_NO_FLIPWINDING_PROCESS -// # FlipWindingOrderProcess - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FlipWindingOrderProcess::FlipWindingOrderProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FlipWindingOrderProcess::~FlipWindingOrderProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FlipWindingOrderProcess::IsActive( unsigned int pFlags) const -{ - return 0 != (pFlags & aiProcess_FlipWindingOrder); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FlipWindingOrderProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("FlipWindingOrderProcess begin"); - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) - ProcessMesh(pScene->mMeshes[i]); - ASSIMP_LOG_DEBUG("FlipWindingOrderProcess finished"); -} - -// ------------------------------------------------------------------------------------------------ -// Converts a single mesh -void FlipWindingOrderProcess::ProcessMesh( aiMesh* pMesh) -{ - // invert the order of all faces in this mesh - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) - { - aiFace& face = pMesh->mFaces[a]; - for (unsigned int b = 0; b < face.mNumIndices / 2; b++) { - std::swap(face.mIndices[b], face.mIndices[face.mNumIndices - 1 - b]); - } - } - - // invert the order of all components in this mesh anim meshes - for (unsigned int m = 0; m < pMesh->mNumAnimMeshes; m++) { - aiAnimMesh* animMesh = pMesh->mAnimMeshes[m]; - unsigned int numVertices = animMesh->mNumVertices; - if (animMesh->HasPositions()) { - for (unsigned int a = 0; a < numVertices; a++) - { - std::swap(animMesh->mVertices[a], animMesh->mVertices[numVertices - 1 - a]); - } - } - if (animMesh->HasNormals()) { - for (unsigned int a = 0; a < numVertices; a++) - { - std::swap(animMesh->mNormals[a], animMesh->mNormals[numVertices - 1 - a]); - } - } - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; i++) { - if (animMesh->HasTextureCoords(i)) { - for (unsigned int a = 0; a < numVertices; a++) - { - std::swap(animMesh->mTextureCoords[i][a], animMesh->mTextureCoords[i][numVertices - 1 - a]); - } - } - } - if (animMesh->HasTangentsAndBitangents()) { - for (unsigned int a = 0; a < numVertices; a++) - { - std::swap(animMesh->mTangents[a], animMesh->mTangents[numVertices - 1 - a]); - std::swap(animMesh->mBitangents[a], animMesh->mBitangents[numVertices - 1 - a]); - } - } - for (unsigned int v = 0; v < AI_MAX_NUMBER_OF_COLOR_SETS; v++) { - if (animMesh->HasVertexColors(v)) { - for (unsigned int a = 0; a < numVertices; a++) - { - std::swap(animMesh->mColors[v][a], animMesh->mColors[v][numVertices - 1 - a]); - } - } - } - } -} - -#endif // !! ASSIMP_BUILD_NO_FLIPWINDING_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.h b/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.h deleted file mode 100644 index f32b91fc39..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ConvertToLHProcess.h +++ /dev/null @@ -1,171 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file MakeLeftHandedProcess.h - * @brief Defines a bunch of post-processing steps to handle - * coordinate system conversions. - * - * - LH to RH - * - UV origin upper-left to lower-left - * - face order cw to ccw - */ -#ifndef AI_CONVERTTOLHPROCESS_H_INC -#define AI_CONVERTTOLHPROCESS_H_INC - -#include <assimp/types.h> - -#include "Common/BaseProcess.h" - -struct aiMesh; -struct aiNodeAnim; -struct aiNode; -struct aiMaterial; - -namespace Assimp { - -// ----------------------------------------------------------------------------------- -/** @brief The MakeLeftHandedProcess converts all imported data to a left-handed - * coordinate system. - * - * This implies a mirroring of the Z axis of the coordinate system. But to keep - * transformation matrices free from reflections we shift the reflection to other - * places. We mirror the meshes and adapt the rotations. - * - * @note RH-LH and LH-RH is the same, so this class can be used for both - */ -class MakeLeftHandedProcess : public BaseProcess -{ - - -public: - MakeLeftHandedProcess(); - ~MakeLeftHandedProcess(); - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - -protected: - - // ------------------------------------------------------------------- - /** Recursively converts a node and all of its children - */ - void ProcessNode( aiNode* pNode, const aiMatrix4x4& pParentGlobalRotation); - - // ------------------------------------------------------------------- - /** Converts a single mesh to left handed coordinates. - * This means that positions, normals and tangents are mirrored at - * the local Z axis and the order of all faces are inverted. - * @param pMesh The mesh to convert. - */ - void ProcessMesh( aiMesh* pMesh); - - // ------------------------------------------------------------------- - /** Converts a single material to left-handed coordinates - * @param pMat Material to convert - */ - void ProcessMaterial( aiMaterial* pMat); - - // ------------------------------------------------------------------- - /** Converts the given animation to LH coordinates. - * The rotation and translation keys are transformed, the scale keys - * work in local space and can therefore be left untouched. - * @param pAnim The bone animation to transform - */ - void ProcessAnimation( aiNodeAnim* pAnim); -}; - - -// --------------------------------------------------------------------------- -/** Postprocessing step to flip the face order of the imported data - */ -class FlipWindingOrderProcess : public BaseProcess -{ - friend class Importer; - -public: - /** Constructor to be privately used by Importer */ - FlipWindingOrderProcess(); - - /** Destructor, private as well */ - ~FlipWindingOrderProcess(); - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - -protected: - void ProcessMesh( aiMesh* pMesh); -}; - -// --------------------------------------------------------------------------- -/** Postprocessing step to flip the UV coordinate system of the import data - */ -class FlipUVsProcess : public BaseProcess -{ - friend class Importer; - -public: - /** Constructor to be privately used by Importer */ - FlipUVsProcess(); - - /** Destructor, private as well */ - ~FlipUVsProcess(); - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - -protected: - void ProcessMesh( aiMesh* pMesh); - void ProcessMaterial( aiMaterial* mat); -}; - -} // end of namespace Assimp - -#endif // AI_CONVERTTOLHPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/DeboneProcess.cpp b/thirdparty/assimp/code/PostProcessing/DeboneProcess.cpp deleted file mode 100644 index 83b8336bc9..0000000000 --- a/thirdparty/assimp/code/PostProcessing/DeboneProcess.cpp +++ /dev/null @@ -1,465 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/// @file DeboneProcess.cpp -/** Implementation of the DeboneProcess post processing step */ - - - -// internal headers of the post-processing framework -#include "ProcessHelper.h" -#include "DeboneProcess.h" -#include <stdio.h> - - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -DeboneProcess::DeboneProcess() -{ - mNumBones = 0; - mNumBonesCanDoWithout = 0; - - mThreshold = AI_DEBONE_THRESHOLD; - mAllOrNone = false; -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -DeboneProcess::~DeboneProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool DeboneProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_Debone) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void DeboneProcess::SetupProperties(const Importer* pImp) -{ - // get the current value of the property - mAllOrNone = pImp->GetPropertyInteger(AI_CONFIG_PP_DB_ALL_OR_NONE,0)?true:false; - mThreshold = pImp->GetPropertyFloat(AI_CONFIG_PP_DB_THRESHOLD,AI_DEBONE_THRESHOLD); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void DeboneProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("DeboneProcess begin"); - - if(!pScene->mNumMeshes) { - return; - } - - std::vector<bool> splitList(pScene->mNumMeshes); - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) { - splitList[a] = ConsiderMesh( pScene->mMeshes[a] ); - } - - int numSplits = 0; - - if(!!mNumBonesCanDoWithout && (!mAllOrNone||mNumBonesCanDoWithout==mNumBones)) { - for(unsigned int a = 0; a < pScene->mNumMeshes; a++) { - if(splitList[a]) { - numSplits++; - } - } - } - - if(numSplits) { - // we need to do something. Let's go. - //mSubMeshIndices.clear(); // really needed? - mSubMeshIndices.resize(pScene->mNumMeshes); // because we're doing it here anyway - - // build a new array of meshes for the scene - std::vector<aiMesh*> meshes; - - for(unsigned int a=0;a<pScene->mNumMeshes;a++) - { - aiMesh* srcMesh = pScene->mMeshes[a]; - - std::vector<std::pair<aiMesh*,const aiBone*> > newMeshes; - - if(splitList[a]) { - SplitMesh(srcMesh,newMeshes); - } - - // mesh was split - if(!newMeshes.empty()) { - unsigned int out = 0, in = srcMesh->mNumBones; - - // store new meshes and indices of the new meshes - for(unsigned int b=0;b<newMeshes.size();b++) { - const aiString *find = newMeshes[b].second?&newMeshes[b].second->mName:0; - - aiNode *theNode = find?pScene->mRootNode->FindNode(*find):0; - std::pair<unsigned int,aiNode*> push_pair(static_cast<unsigned int>(meshes.size()),theNode); - - mSubMeshIndices[a].push_back(push_pair); - meshes.push_back(newMeshes[b].first); - - out+=newMeshes[b].first->mNumBones; - } - - if(!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_INFO_F("Removed %u bones. Input bones:", in - out, ". Output bones: ", out); - } - - // and destroy the source mesh. It should be completely contained inside the new submeshes - delete srcMesh; - } - else { - // Mesh is kept unchanged - store it's new place in the mesh array - mSubMeshIndices[a].push_back(std::pair<unsigned int,aiNode*>(static_cast<unsigned int>(meshes.size()),(aiNode*)0)); - meshes.push_back(srcMesh); - } - } - - // rebuild the scene's mesh array - pScene->mNumMeshes = static_cast<unsigned int>(meshes.size()); - delete [] pScene->mMeshes; - pScene->mMeshes = new aiMesh*[pScene->mNumMeshes]; - std::copy( meshes.begin(), meshes.end(), pScene->mMeshes); - - // recurse through all nodes and translate the node's mesh indices to fit the new mesh array - UpdateNode( pScene->mRootNode); - } - - ASSIMP_LOG_DEBUG("DeboneProcess end"); -} - -// ------------------------------------------------------------------------------------------------ -// Counts bones total/removable in a given mesh. -bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh) -{ - if(!pMesh->HasBones()) { - return false; - } - - bool split = false; - - //interstitial faces not permitted - bool isInterstitialRequired = false; - - std::vector<bool> isBoneNecessary(pMesh->mNumBones,false); - std::vector<unsigned int> vertexBones(pMesh->mNumVertices,UINT_MAX); - - const unsigned int cUnowned = UINT_MAX; - const unsigned int cCoowned = UINT_MAX-1; - - for(unsigned int i=0;i<pMesh->mNumBones;i++) { - for(unsigned int j=0;j<pMesh->mBones[i]->mNumWeights;j++) { - float w = pMesh->mBones[i]->mWeights[j].mWeight; - - if(w==0.0f) { - continue; - } - - unsigned int vid = pMesh->mBones[i]->mWeights[j].mVertexId; - if(w>=mThreshold) { - - if(vertexBones[vid]!=cUnowned) { - if(vertexBones[vid]==i) //double entry - { - ASSIMP_LOG_WARN("Encountered double entry in bone weights"); - } - else //TODO: track attraction in order to break tie - { - vertexBones[vid] = cCoowned; - } - } - else vertexBones[vid] = i; - } - - if(!isBoneNecessary[i]) { - isBoneNecessary[i] = w<mThreshold; - } - } - - if(!isBoneNecessary[i]) { - isInterstitialRequired = true; - } - } - - if(isInterstitialRequired) { - for(unsigned int i=0;i<pMesh->mNumFaces;i++) { - unsigned int v = vertexBones[pMesh->mFaces[i].mIndices[0]]; - - for(unsigned int j=1;j<pMesh->mFaces[i].mNumIndices;j++) { - unsigned int w = vertexBones[pMesh->mFaces[i].mIndices[j]]; - - if(v!=w) { - if(v<pMesh->mNumBones) isBoneNecessary[v] = true; - if(w<pMesh->mNumBones) isBoneNecessary[w] = true; - } - } - } - } - - for(unsigned int i=0;i<pMesh->mNumBones;i++) { - if(!isBoneNecessary[i]) { - mNumBonesCanDoWithout++; - split = true; - } - - mNumBones++; - } - return split; -} - -// ------------------------------------------------------------------------------------------------ -// Splits the given mesh by bone count. -void DeboneProcess::SplitMesh( const aiMesh* pMesh, std::vector< std::pair< aiMesh*,const aiBone* > >& poNewMeshes) const -{ - // same deal here as ConsiderMesh basically - - std::vector<bool> isBoneNecessary(pMesh->mNumBones,false); - std::vector<unsigned int> vertexBones(pMesh->mNumVertices,UINT_MAX); - - const unsigned int cUnowned = UINT_MAX; - const unsigned int cCoowned = UINT_MAX-1; - - for(unsigned int i=0;i<pMesh->mNumBones;i++) { - for(unsigned int j=0;j<pMesh->mBones[i]->mNumWeights;j++) { - float w = pMesh->mBones[i]->mWeights[j].mWeight; - - if(w==0.0f) { - continue; - } - - unsigned int vid = pMesh->mBones[i]->mWeights[j].mVertexId; - - if(w>=mThreshold) { - if(vertexBones[vid]!=cUnowned) { - if(vertexBones[vid]==i) //double entry - { - ASSIMP_LOG_WARN("Encountered double entry in bone weights"); - } - else //TODO: track attraction in order to break tie - { - vertexBones[vid] = cCoowned; - } - } - else vertexBones[vid] = i; - } - - if(!isBoneNecessary[i]) { - isBoneNecessary[i] = w<mThreshold; - } - } - } - - unsigned int nFacesUnowned = 0; - - std::vector<unsigned int> faceBones(pMesh->mNumFaces,UINT_MAX); - std::vector<unsigned int> facesPerBone(pMesh->mNumBones,0); - - for(unsigned int i=0;i<pMesh->mNumFaces;i++) { - unsigned int nInterstitial = 1; - - unsigned int v = vertexBones[pMesh->mFaces[i].mIndices[0]]; - - for(unsigned int j=1;j<pMesh->mFaces[i].mNumIndices;j++) { - unsigned int w = vertexBones[pMesh->mFaces[i].mIndices[j]]; - - if(v!=w) { - if(v<pMesh->mNumBones) isBoneNecessary[v] = true; - if(w<pMesh->mNumBones) isBoneNecessary[w] = true; - } - else nInterstitial++; - } - - if(v<pMesh->mNumBones &&nInterstitial==pMesh->mFaces[i].mNumIndices) { - faceBones[i] = v; //primitive belongs to bone #v - facesPerBone[v]++; - } - else nFacesUnowned++; - } - - // invalidate any "cojoined" faces - for(unsigned int i=0;i<pMesh->mNumFaces;i++) { - if(faceBones[i]<pMesh->mNumBones&&isBoneNecessary[faceBones[i]]) - { - ai_assert(facesPerBone[faceBones[i]]>0); - facesPerBone[faceBones[i]]--; - - nFacesUnowned++; - faceBones[i] = cUnowned; - } - } - - if(nFacesUnowned) { - std::vector<unsigned int> subFaces; - - for(unsigned int i=0;i<pMesh->mNumFaces;i++) { - if(faceBones[i]==cUnowned) { - subFaces.push_back(i); - } - } - - aiMesh *baseMesh = MakeSubmesh(pMesh,subFaces,0); - std::pair<aiMesh*,const aiBone*> push_pair(baseMesh,(const aiBone*)0); - - poNewMeshes.push_back(push_pair); - } - - for(unsigned int i=0;i<pMesh->mNumBones;i++) { - - if(!isBoneNecessary[i]&&facesPerBone[i]>0) { - std::vector<unsigned int> subFaces; - - for(unsigned int j=0;j<pMesh->mNumFaces;j++) { - if(faceBones[j]==i) { - subFaces.push_back(j); - } - } - - unsigned int f = AI_SUBMESH_FLAGS_SANS_BONES; - aiMesh *subMesh =MakeSubmesh(pMesh,subFaces,f); - - //Lifted from PretransformVertices.cpp - ApplyTransform(subMesh,pMesh->mBones[i]->mOffsetMatrix); - std::pair<aiMesh*,const aiBone*> push_pair(subMesh,pMesh->mBones[i]); - - poNewMeshes.push_back(push_pair); - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Recursively updates the node's mesh list to account for the changed mesh list -void DeboneProcess::UpdateNode(aiNode* pNode) const -{ - // rebuild the node's mesh index list - - std::vector<unsigned int> newMeshList; - - // this will require two passes - - unsigned int m = static_cast<unsigned int>(pNode->mNumMeshes), n = static_cast<unsigned int>(mSubMeshIndices.size()); - - // first pass, look for meshes which have not moved - - for(unsigned int a=0;a<m;a++) { - - unsigned int srcIndex = pNode->mMeshes[a]; - const std::vector< std::pair< unsigned int,aiNode* > > &subMeshes = mSubMeshIndices[srcIndex]; - unsigned int nSubmeshes = static_cast<unsigned int>(subMeshes.size()); - - for(unsigned int b=0;b<nSubmeshes;b++) { - if(!subMeshes[b].second) { - newMeshList.push_back(subMeshes[b].first); - } - } - } - - // second pass, collect deboned meshes - - for(unsigned int a=0;a<n;a++) - { - const std::vector< std::pair< unsigned int,aiNode* > > &subMeshes = mSubMeshIndices[a]; - unsigned int nSubmeshes = static_cast<unsigned int>(subMeshes.size()); - - for(unsigned int b=0;b<nSubmeshes;b++) { - if(subMeshes[b].second == pNode) { - newMeshList.push_back(subMeshes[b].first); - } - } - } - - if( pNode->mNumMeshes > 0 ) { - delete [] pNode->mMeshes; pNode->mMeshes = NULL; - } - - pNode->mNumMeshes = static_cast<unsigned int>(newMeshList.size()); - - if(pNode->mNumMeshes) { - pNode->mMeshes = new unsigned int[pNode->mNumMeshes]; - std::copy( newMeshList.begin(), newMeshList.end(), pNode->mMeshes); - } - - // do that also recursively for all children - for( unsigned int a = 0; a < pNode->mNumChildren; ++a ) { - UpdateNode( pNode->mChildren[a]); - } -} - -// ------------------------------------------------------------------------------------------------ -// Apply the node transformation to a mesh -void DeboneProcess::ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat)const -{ - // Check whether we need to transform the coordinates at all - if (!mat.IsIdentity()) { - - if (mesh->HasPositions()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mVertices[i] = mat * mesh->mVertices[i]; - } - } - if (mesh->HasNormals() || mesh->HasTangentsAndBitangents()) { - aiMatrix4x4 mWorldIT = mat; - mWorldIT.Inverse().Transpose(); - - // TODO: implement Inverse() for aiMatrix3x3 - aiMatrix3x3 m = aiMatrix3x3(mWorldIT); - - if (mesh->HasNormals()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mNormals[i] = (m * mesh->mNormals[i]).Normalize(); - } - } - if (mesh->HasTangentsAndBitangents()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mTangents[i] = (m * mesh->mTangents[i]).Normalize(); - mesh->mBitangents[i] = (m * mesh->mBitangents[i]).Normalize(); - } - } - } - } -} diff --git a/thirdparty/assimp/code/PostProcessing/DeboneProcess.h b/thirdparty/assimp/code/PostProcessing/DeboneProcess.h deleted file mode 100644 index 8b64c2acc6..0000000000 --- a/thirdparty/assimp/code/PostProcessing/DeboneProcess.h +++ /dev/null @@ -1,131 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** Defines a post processing step to limit the number of bones affecting a single vertex. */ -#ifndef AI_DEBONEPROCESS_H_INC -#define AI_DEBONEPROCESS_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> -#include <assimp/scene.h> - -#include <vector> -#include <utility> - -#// Forward declarations -class DeboneTest; - -namespace Assimp { - -#if (!defined AI_DEBONE_THRESHOLD) -# define AI_DEBONE_THRESHOLD 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -// --------------------------------------------------------------------------- -/** This post processing step removes bones nearly losslessly or according to -* a configured threshold. In order to remove the bone, the primitives affected by -* the bone are split from the mesh. The split off (new) mesh is boneless. At any -* point in time, bones without affect upon a given mesh are to be removed. -*/ -class DeboneProcess : public BaseProcess { -public: - DeboneProcess(); - ~DeboneProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. - * A bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - void SetupProperties(const Importer* pImp); - -protected: - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Counts bones total/removable in a given mesh. - * @param pMesh The mesh to process. - */ - bool ConsiderMesh( const aiMesh* pMesh); - - /// Splits the given mesh by bone count. - /// @param pMesh the Mesh to split. Is not changed at all, but might be superfluous in case it was split. - /// @param poNewMeshes Array of submeshes created in the process. Empty if splitting was not necessary. - void SplitMesh(const aiMesh* pMesh, std::vector< std::pair< aiMesh*,const aiBone* > >& poNewMeshes) const; - - /// Recursively updates the node's mesh list to account for the changed mesh list - void UpdateNode(aiNode* pNode) const; - - // ------------------------------------------------------------------- - // Apply transformation to a mesh - void ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat)const; - -public: - /** Number of bones present in the scene. */ - unsigned int mNumBones; - unsigned int mNumBonesCanDoWithout; - - float mThreshold; - bool mAllOrNone; - - /// Per mesh index: Array of indices of the new submeshes. - std::vector< std::vector< std::pair< unsigned int,aiNode* > > > mSubMeshIndices; -}; - -} // end of namespace Assimp - -#endif // AI_DEBONEPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.cpp b/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.cpp deleted file mode 100644 index b11615bb82..0000000000 --- a/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to drop face -* normals for all imported faces. -*/ - - -#include "DropFaceNormalsProcess.h" -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/Exceptional.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -DropFaceNormalsProcess::DropFaceNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -DropFaceNormalsProcess::~DropFaceNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool DropFaceNormalsProcess::IsActive( unsigned int pFlags) const { - return (pFlags & aiProcess_DropNormals) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void DropFaceNormalsProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("DropFaceNormalsProcess begin"); - - if (pScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) { - throw DeadlyImportError("Post-processing order mismatch: expecting pseudo-indexed (\"verbose\") vertices here"); - } - - bool bHas = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) { - bHas |= this->DropMeshFaceNormals( pScene->mMeshes[a]); - } - if (bHas) { - ASSIMP_LOG_INFO("DropFaceNormalsProcess finished. " - "Face normals have been removed"); - } else { - ASSIMP_LOG_DEBUG("DropFaceNormalsProcess finished. " - "No normals were present"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -bool DropFaceNormalsProcess::DropMeshFaceNormals (aiMesh* pMesh) { - if (NULL == pMesh->mNormals) { - return false; - } - - delete[] pMesh->mNormals; - pMesh->mNormals = nullptr; - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.h b/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.h deleted file mode 100644 index c710c5a5ee..0000000000 --- a/thirdparty/assimp/code/PostProcessing/DropFaceNormalsProcess.h +++ /dev/null @@ -1,83 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to compute face normals for all loaded faces*/ -#ifndef AI_DROPFACENORMALPROCESS_H_INC -#define AI_DROPFACENORMALPROCESS_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** The DropFaceNormalsProcess computes face normals for all faces of all meshes -*/ -class ASSIMP_API_WINONLY DropFaceNormalsProcess : public BaseProcess { -public: - DropFaceNormalsProcess(); - ~DropFaceNormalsProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - -private: - bool DropMeshFaceNormals(aiMesh* pcMesh); -}; - -} // end of namespace Assimp - -#endif // !!AI_DROPFACENORMALPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.cpp b/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.cpp deleted file mode 100644 index 739382a057..0000000000 --- a/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#include "EmbedTexturesProcess.h" -#include <assimp/ParsingUtils.h> -#include "ProcessHelper.h" - -#include <fstream> - -using namespace Assimp; - -EmbedTexturesProcess::EmbedTexturesProcess() -: BaseProcess() { -} - -EmbedTexturesProcess::~EmbedTexturesProcess() { -} - -bool EmbedTexturesProcess::IsActive(unsigned int pFlags) const { - return (pFlags & aiProcess_EmbedTextures) != 0; -} - -void EmbedTexturesProcess::SetupProperties(const Importer* pImp) { - mRootPath = pImp->GetPropertyString("sourceFilePath"); - mRootPath = mRootPath.substr(0, mRootPath.find_last_of("\\/") + 1u); -} - -void EmbedTexturesProcess::Execute(aiScene* pScene) { - if (pScene == nullptr || pScene->mRootNode == nullptr) return; - - aiString path; - - uint32_t embeddedTexturesCount = 0u; - - for (auto matId = 0u; matId < pScene->mNumMaterials; ++matId) { - auto material = pScene->mMaterials[matId]; - - for (auto ttId = 1u; ttId < AI_TEXTURE_TYPE_MAX; ++ttId) { - auto tt = static_cast<aiTextureType>(ttId); - auto texturesCount = material->GetTextureCount(tt); - - for (auto texId = 0u; texId < texturesCount; ++texId) { - material->GetTexture(tt, texId, &path); - if (path.data[0] == '*') continue; // Already embedded - - // Indeed embed - if (addTexture(pScene, path.data)) { - auto embeddedTextureId = pScene->mNumTextures - 1u; - ::ai_snprintf(path.data, 1024, "*%u", embeddedTextureId); - material->AddProperty(&path, AI_MATKEY_TEXTURE(tt, texId)); - embeddedTexturesCount++; - } - } - } - } - - ASSIMP_LOG_INFO_F("EmbedTexturesProcess finished. Embedded ", embeddedTexturesCount, " textures." ); -} - -bool EmbedTexturesProcess::addTexture(aiScene* pScene, std::string path) const { - std::streampos imageSize = 0; - std::string imagePath = path; - - // Test path directly - std::ifstream file(imagePath, std::ios::binary | std::ios::ate); - if ((imageSize = file.tellg()) == std::streampos(-1)) { - ASSIMP_LOG_WARN_F("EmbedTexturesProcess: Cannot find image: ", imagePath, ". Will try to find it in root folder."); - - // Test path in root path - imagePath = mRootPath + path; - file.open(imagePath, std::ios::binary | std::ios::ate); - if ((imageSize = file.tellg()) == std::streampos(-1)) { - // Test path basename in root path - imagePath = mRootPath + path.substr(path.find_last_of("\\/") + 1u); - file.open(imagePath, std::ios::binary | std::ios::ate); - if ((imageSize = file.tellg()) == std::streampos(-1)) { - ASSIMP_LOG_ERROR_F("EmbedTexturesProcess: Unable to embed texture: ", path, "."); - return false; - } - } - } - - aiTexel* imageContent = new aiTexel[ 1ul + static_cast<unsigned long>( imageSize ) / sizeof(aiTexel)]; - file.seekg(0, std::ios::beg); - file.read(reinterpret_cast<char*>(imageContent), imageSize); - - // Enlarging the textures table - unsigned int textureId = pScene->mNumTextures++; - auto oldTextures = pScene->mTextures; - pScene->mTextures = new aiTexture*[pScene->mNumTextures]; - ::memmove(pScene->mTextures, oldTextures, sizeof(aiTexture*) * (pScene->mNumTextures - 1u)); - - // Add the new texture - auto pTexture = new aiTexture; - pTexture->mHeight = 0; // Means that this is still compressed - pTexture->mWidth = static_cast<uint32_t>(imageSize); - pTexture->pcData = imageContent; - - auto extension = path.substr(path.find_last_of('.') + 1u); - std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower); - if (extension == "jpeg") { - extension = "jpg"; - } - - size_t len = extension.size(); - if (len > HINTMAXTEXTURELEN -1 ) { - len = HINTMAXTEXTURELEN - 1; - } - ::strncpy(pTexture->achFormatHint, extension.c_str(), len); - pScene->mTextures[textureId] = pTexture; - - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.h b/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.h deleted file mode 100644 index 3c4b2eab4e..0000000000 --- a/thirdparty/assimp/code/PostProcessing/EmbedTexturesProcess.h +++ /dev/null @@ -1,85 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#pragma once - -#include "Common/BaseProcess.h" - -#include <string> - -struct aiNode; - -namespace Assimp { - -/** - * Force embedding of textures (using the path = "*1" convention). - * If a texture's file does not exist at the specified path - * (due, for instance, to an absolute path generated on another system), - * it will check if a file with the same name exists at the root folder - * of the imported model. And if so, it uses that. - */ -class ASSIMP_API EmbedTexturesProcess : public BaseProcess { -public: - /// The default class constructor. - EmbedTexturesProcess(); - - /// The class destructor. - virtual ~EmbedTexturesProcess(); - - /// Overwritten, @see BaseProcess - virtual bool IsActive(unsigned int pFlags) const; - - /// Overwritten, @see BaseProcess - virtual void SetupProperties(const Importer* pImp); - - /// Overwritten, @see BaseProcess - virtual void Execute(aiScene* pScene); - -private: - // Resolve the path and add the file content to the scene as a texture. - bool addTexture(aiScene* pScene, std::string path) const; - -private: - std::string mRootPath; -}; - -} // namespace Assimp diff --git a/thirdparty/assimp/code/PostProcessing/FindDegenerates.cpp b/thirdparty/assimp/code/PostProcessing/FindDegenerates.cpp deleted file mode 100644 index 50fac46dba..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindDegenerates.cpp +++ /dev/null @@ -1,301 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file FindDegenerates.cpp - * @brief Implementation of the FindDegenerates post-process step. -*/ - - - -// internal headers -#include "ProcessHelper.h" -#include "FindDegenerates.h" -#include <assimp/Exceptional.h> - -using namespace Assimp; - -//remove mesh at position 'index' from the scene -static void removeMesh(aiScene* pScene, unsigned const index); -//correct node indices to meshes and remove references to deleted mesh -static void updateSceneGraph(aiNode* pNode, unsigned const index); - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FindDegeneratesProcess::FindDegeneratesProcess() -: mConfigRemoveDegenerates( false ) -, mConfigCheckAreaOfTriangle( false ){ - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FindDegeneratesProcess::~FindDegeneratesProcess() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FindDegeneratesProcess::IsActive( unsigned int pFlags) const { - return 0 != (pFlags & aiProcess_FindDegenerates); -} - -// ------------------------------------------------------------------------------------------------ -// Setup import configuration -void FindDegeneratesProcess::SetupProperties(const Importer* pImp) { - // Get the current value of AI_CONFIG_PP_FD_REMOVE - mConfigRemoveDegenerates = (0 != pImp->GetPropertyInteger(AI_CONFIG_PP_FD_REMOVE,0)); - mConfigCheckAreaOfTriangle = ( 0 != pImp->GetPropertyInteger(AI_CONFIG_PP_FD_CHECKAREA) ); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FindDegeneratesProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("FindDegeneratesProcess begin"); - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) - { - //Do not process point cloud, ExecuteOnMesh works only with faces data - if ((pScene->mMeshes[i]->mPrimitiveTypes != aiPrimitiveType::aiPrimitiveType_POINT) && ExecuteOnMesh(pScene->mMeshes[i])) { - removeMesh(pScene, i); - --i; //the current i is removed, do not skip the next one - } - } - ASSIMP_LOG_DEBUG("FindDegeneratesProcess finished"); -} - -static void removeMesh(aiScene* pScene, unsigned const index) { - //we start at index and copy the pointers one position forward - //save the mesh pointer to delete it later - auto delete_me = pScene->mMeshes[index]; - for (unsigned i = index; i < pScene->mNumMeshes - 1; ++i) { - pScene->mMeshes[i] = pScene->mMeshes[i+1]; - } - pScene->mMeshes[pScene->mNumMeshes - 1] = nullptr; - --(pScene->mNumMeshes); - delete delete_me; - - //removing a mesh also requires updating all references to it in the scene graph - updateSceneGraph(pScene->mRootNode, index); -} - -static void updateSceneGraph(aiNode* pNode, unsigned const index) { - for (unsigned i = 0; i < pNode->mNumMeshes; ++i) { - if (pNode->mMeshes[i] > index) { - --(pNode->mMeshes[i]); - continue; - } - if (pNode->mMeshes[i] == index) { - for (unsigned j = i; j < pNode->mNumMeshes -1; ++j) { - pNode->mMeshes[j] = pNode->mMeshes[j+1]; - } - --(pNode->mNumMeshes); - --i; - continue; - } - } - //recurse to all children - for (unsigned i = 0; i < pNode->mNumChildren; ++i) { - updateSceneGraph(pNode->mChildren[i], index); - } -} - -static ai_real heron( ai_real a, ai_real b, ai_real c ) { - ai_real s = (a + b + c) / 2; - ai_real area = pow((s * ( s - a ) * ( s - b ) * ( s - c ) ), (ai_real)0.5 ); - return area; -} - -static ai_real distance3D( const aiVector3D &vA, aiVector3D &vB ) { - const ai_real lx = ( vB.x - vA.x ); - const ai_real ly = ( vB.y - vA.y ); - const ai_real lz = ( vB.z - vA.z ); - ai_real a = lx*lx + ly*ly + lz*lz; - ai_real d = pow( a, (ai_real)0.5 ); - - return d; -} - -static ai_real calculateAreaOfTriangle( const aiFace& face, aiMesh* mesh ) { - ai_real area = 0; - - aiVector3D vA( mesh->mVertices[ face.mIndices[ 0 ] ] ); - aiVector3D vB( mesh->mVertices[ face.mIndices[ 1 ] ] ); - aiVector3D vC( mesh->mVertices[ face.mIndices[ 2 ] ] ); - - ai_real a( distance3D( vA, vB ) ); - ai_real b( distance3D( vB, vC ) ); - ai_real c( distance3D( vC, vA ) ); - area = heron( a, b, c ); - - return area; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported mesh -bool FindDegeneratesProcess::ExecuteOnMesh( aiMesh* mesh) { - mesh->mPrimitiveTypes = 0; - - std::vector<bool> remove_me; - if (mConfigRemoveDegenerates) { - remove_me.resize( mesh->mNumFaces, false ); - } - - unsigned int deg = 0, limit; - for ( unsigned int a = 0; a < mesh->mNumFaces; ++a ) { - aiFace& face = mesh->mFaces[a]; - bool first = true; - - // check whether the face contains degenerated entries - for (unsigned int i = 0; i < face.mNumIndices; ++i) { - // Polygons with more than 4 points are allowed to have double points, that is - // simulating polygons with holes just with concave polygons. However, - // double points may not come directly after another. - limit = face.mNumIndices; - if (face.mNumIndices > 4) { - limit = std::min( limit, i+2 ); - } - - for (unsigned int t = i+1; t < limit; ++t) { - if (mesh->mVertices[face.mIndices[ i ] ] == mesh->mVertices[ face.mIndices[ t ] ]) { - // we have found a matching vertex position - // remove the corresponding index from the array - --face.mNumIndices; - --limit; - for (unsigned int m = t; m < face.mNumIndices; ++m) { - face.mIndices[ m ] = face.mIndices[ m+1 ]; - } - --t; - - // NOTE: we set the removed vertex index to an unique value - // to make sure the developer gets notified when his - // application attempts to access this data. - face.mIndices[ face.mNumIndices ] = 0xdeadbeef; - - if(first) { - ++deg; - first = false; - } - - if ( mConfigRemoveDegenerates ) { - remove_me[ a ] = true; - goto evil_jump_outside; // hrhrhrh ... yeah, this rocks baby! - } - } - } - - if ( mConfigCheckAreaOfTriangle ) { - if ( face.mNumIndices == 3 ) { - ai_real area = calculateAreaOfTriangle( face, mesh ); - if ( area < 1e-6 ) { - if ( mConfigRemoveDegenerates ) { - remove_me[ a ] = true; - ++deg; - goto evil_jump_outside; - } - - // todo: check for index which is corrupt. - } - } - } - } - - // We need to update the primitive flags array of the mesh. - switch (face.mNumIndices) - { - case 1u: - mesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 2u: - mesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 3u: - mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - break; - }; -evil_jump_outside: - continue; - } - - // If AI_CONFIG_PP_FD_REMOVE is true, remove degenerated faces from the import - if (mConfigRemoveDegenerates && deg) { - unsigned int n = 0; - for (unsigned int a = 0; a < mesh->mNumFaces; ++a) - { - aiFace& face_src = mesh->mFaces[a]; - if (!remove_me[a]) { - aiFace& face_dest = mesh->mFaces[n++]; - - // Do a manual copy, keep the index array - face_dest.mNumIndices = face_src.mNumIndices; - face_dest.mIndices = face_src.mIndices; - - if (&face_src != &face_dest) { - // clear source - face_src.mNumIndices = 0; - face_src.mIndices = nullptr; - } - } - else { - // Otherwise delete it if we don't need this face - delete[] face_src.mIndices; - face_src.mIndices = nullptr; - face_src.mNumIndices = 0; - } - } - // Just leave the rest of the array unreferenced, we don't care for now - mesh->mNumFaces = n; - if (!mesh->mNumFaces) { - //The whole mesh consists of degenerated faces - //signal upward, that this mesh should be deleted. - ASSIMP_LOG_DEBUG("FindDegeneratesProcess removed a mesh full of degenerated primitives"); - return true; - } - } - - if (deg && !DefaultLogger::isNullLogger()) { - ASSIMP_LOG_WARN_F( "Found ", deg, " degenerated primitives"); - } - return false; -} diff --git a/thirdparty/assimp/code/PostProcessing/FindDegenerates.h b/thirdparty/assimp/code/PostProcessing/FindDegenerates.h deleted file mode 100644 index 7a15e77cf1..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindDegenerates.h +++ /dev/null @@ -1,130 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to search all meshes for - degenerated faces */ -#ifndef AI_FINDDEGENERATESPROCESS_H_INC -#define AI_FINDDEGENERATESPROCESS_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> - -class FindDegeneratesProcessTest; -namespace Assimp { - - -// --------------------------------------------------------------------------- -/** FindDegeneratesProcess: Searches a mesh for degenerated triangles. -*/ -class ASSIMP_API FindDegeneratesProcess : public BaseProcess { -public: - FindDegeneratesProcess(); - ~FindDegeneratesProcess(); - - // ------------------------------------------------------------------- - // Check whether step is active - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Execute step on a given scene - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - // Setup import settings - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - // Execute step on a given mesh - ///@returns true if the current mesh should be deleted, false otherwise - bool ExecuteOnMesh( aiMesh* mesh); - - // ------------------------------------------------------------------- - /// @brief Enable the instant removal of degenerated primitives - /// @param enabled true for enabled. - void EnableInstantRemoval(bool enabled); - - // ------------------------------------------------------------------- - /// @brief Check whether instant removal is currently enabled - /// @return The instant removal state. - bool IsInstantRemoval() const; - - // ------------------------------------------------------------------- - /// @brief Enable the area check for triangles. - /// @param enabled true for enabled. - void EnableAreaCheck( bool enabled ); - - // ------------------------------------------------------------------- - /// @brief Check whether the area check is enabled. - /// @return The area check state. - bool isAreaCheckEnabled() const; - -private: - //! Configuration option: remove degenerates faces immediately - bool mConfigRemoveDegenerates; - //! Configuration option: check for area - bool mConfigCheckAreaOfTriangle; -}; - -inline -void FindDegeneratesProcess::EnableInstantRemoval(bool enabled) { - mConfigRemoveDegenerates = enabled; -} - -inline -bool FindDegeneratesProcess::IsInstantRemoval() const { - return mConfigRemoveDegenerates; -} - -inline -void FindDegeneratesProcess::EnableAreaCheck( bool enabled ) { - mConfigCheckAreaOfTriangle = enabled; -} - -inline -bool FindDegeneratesProcess::isAreaCheckEnabled() const { - return mConfigCheckAreaOfTriangle; -} - -} // Namespace Assimp - -#endif // !! AI_FINDDEGENERATESPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.cpp b/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.cpp deleted file mode 100644 index 64907458a1..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.cpp +++ /dev/null @@ -1,277 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file FindInstancesProcess.cpp - * @brief Implementation of the aiProcess_FindInstances postprocessing step -*/ - - -#include "FindInstancesProcess.h" -#include <memory> -#include <stdio.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FindInstancesProcess::FindInstancesProcess() -: configSpeedFlag (false) -{} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FindInstancesProcess::~FindInstancesProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FindInstancesProcess::IsActive( unsigned int pFlags) const -{ - // FindInstances makes absolutely no sense together with PreTransformVertices - // fixme: spawn error message somewhere else? - return 0 != (pFlags & aiProcess_FindInstances) && 0 == (pFlags & aiProcess_PreTransformVertices); -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties for the step -void FindInstancesProcess::SetupProperties(const Importer* pImp) -{ - // AI_CONFIG_FAVOUR_SPEED - configSpeedFlag = (0 != pImp->GetPropertyInteger(AI_CONFIG_FAVOUR_SPEED,0)); -} - -// ------------------------------------------------------------------------------------------------ -// Compare the bones of two meshes -bool CompareBones(const aiMesh* orig, const aiMesh* inst) -{ - for (unsigned int i = 0; i < orig->mNumBones;++i) { - aiBone* aha = orig->mBones[i]; - aiBone* oha = inst->mBones[i]; - - if (aha->mNumWeights != oha->mNumWeights || - aha->mOffsetMatrix != oha->mOffsetMatrix) { - return false; - } - - // compare weight per weight --- - for (unsigned int n = 0; n < aha->mNumWeights;++n) { - if (aha->mWeights[n].mVertexId != oha->mWeights[n].mVertexId || - (aha->mWeights[n].mWeight - oha->mWeights[n].mWeight) < 10e-3f) { - return false; - } - } - } - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Update mesh indices in the node graph -void UpdateMeshIndices(aiNode* node, unsigned int* lookup) -{ - for (unsigned int n = 0; n < node->mNumMeshes;++n) - node->mMeshes[n] = lookup[node->mMeshes[n]]; - - for (unsigned int n = 0; n < node->mNumChildren;++n) - UpdateMeshIndices(node->mChildren[n],lookup); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FindInstancesProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("FindInstancesProcess begin"); - if (pScene->mNumMeshes) { - - // use a pseudo hash for all meshes in the scene to quickly find - // the ones which are possibly equal. This step is executed early - // in the pipeline, so we could, depending on the file format, - // have several thousand small meshes. That's too much for a brute - // everyone-against-everyone check involving up to 10 comparisons - // each. - std::unique_ptr<uint64_t[]> hashes (new uint64_t[pScene->mNumMeshes]); - std::unique_ptr<unsigned int[]> remapping (new unsigned int[pScene->mNumMeshes]); - - unsigned int numMeshesOut = 0; - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - - aiMesh* inst = pScene->mMeshes[i]; - hashes[i] = GetMeshHash(inst); - - // Find an appropriate epsilon - // to compare position differences against - float epsilon = ComputePositionEpsilon(inst); - epsilon *= epsilon; - - for (int a = i-1; a >= 0; --a) { - if (hashes[i] == hashes[a]) - { - aiMesh* orig = pScene->mMeshes[a]; - if (!orig) - continue; - - // check for hash collision .. we needn't check - // the vertex format, it *must* match due to the - // (brilliant) construction of the hash - if (orig->mNumBones != inst->mNumBones || - orig->mNumFaces != inst->mNumFaces || - orig->mNumVertices != inst->mNumVertices || - orig->mMaterialIndex != inst->mMaterialIndex || - orig->mPrimitiveTypes != inst->mPrimitiveTypes) - continue; - - // up to now the meshes are equal. Now compare vertex positions, normals, - // tangents and bitangents using this epsilon. - if (orig->HasPositions()) { - if(!CompareArrays(orig->mVertices,inst->mVertices,orig->mNumVertices,epsilon)) - continue; - } - if (orig->HasNormals()) { - if(!CompareArrays(orig->mNormals,inst->mNormals,orig->mNumVertices,epsilon)) - continue; - } - if (orig->HasTangentsAndBitangents()) { - if (!CompareArrays(orig->mTangents,inst->mTangents,orig->mNumVertices,epsilon) || - !CompareArrays(orig->mBitangents,inst->mBitangents,orig->mNumVertices,epsilon)) - continue; - } - - // use a constant epsilon for colors and UV coordinates - static const float uvEpsilon = 10e-4f; - { - unsigned int j, end = orig->GetNumUVChannels(); - for(j = 0; j < end; ++j) { - if (!orig->mTextureCoords[j]) { - continue; - } - if(!CompareArrays(orig->mTextureCoords[j],inst->mTextureCoords[j],orig->mNumVertices,uvEpsilon)) { - break; - } - } - if (j != end) { - continue; - } - } - { - unsigned int j, end = orig->GetNumColorChannels(); - for(j = 0; j < end; ++j) { - if (!orig->mColors[j]) { - continue; - } - if(!CompareArrays(orig->mColors[j],inst->mColors[j],orig->mNumVertices,uvEpsilon)) { - break; - } - } - if (j != end) { - continue; - } - } - - // These two checks are actually quite expensive and almost *never* required. - // Almost. That's why they're still here. But there's no reason to do them - // in speed-targeted imports. - if (!configSpeedFlag) { - - // It seems to be strange, but we really need to check whether the - // bones are identical too. Although it's extremely unprobable - // that they're not if control reaches here, we need to deal - // with unprobable cases, too. It could still be that there are - // equal shapes which are deformed differently. - if (!CompareBones(orig,inst)) - continue; - - // For completeness ... compare even the index buffers for equality - // face order & winding order doesn't care. Input data is in verbose format. - std::unique_ptr<unsigned int[]> ftbl_orig(new unsigned int[orig->mNumVertices]); - std::unique_ptr<unsigned int[]> ftbl_inst(new unsigned int[orig->mNumVertices]); - - for (unsigned int tt = 0; tt < orig->mNumFaces;++tt) { - aiFace& f = orig->mFaces[tt]; - for (unsigned int nn = 0; nn < f.mNumIndices;++nn) - ftbl_orig[f.mIndices[nn]] = tt; - - aiFace& f2 = inst->mFaces[tt]; - for (unsigned int nn = 0; nn < f2.mNumIndices;++nn) - ftbl_inst[f2.mIndices[nn]] = tt; - } - if (0 != ::memcmp(ftbl_inst.get(),ftbl_orig.get(),orig->mNumVertices*sizeof(unsigned int))) - continue; - } - - // We're still here. Or in other words: 'inst' is an instance of 'orig'. - // Place a marker in our list that we can easily update mesh indices. - remapping[i] = remapping[a]; - - // Delete the instanced mesh, we don't need it anymore - delete inst; - pScene->mMeshes[i] = NULL; - break; - } - } - - // If we didn't find a match for the current mesh: keep it - if (pScene->mMeshes[i]) { - remapping[i] = numMeshesOut++; - } - } - ai_assert(0 != numMeshesOut); - if (numMeshesOut != pScene->mNumMeshes) { - - // Collapse the meshes array by removing all NULL entries - for (unsigned int real = 0, i = 0; real < numMeshesOut; ++i) { - if (pScene->mMeshes[i]) - pScene->mMeshes[real++] = pScene->mMeshes[i]; - } - - // And update the node graph with our nice lookup table - UpdateMeshIndices(pScene->mRootNode,remapping.get()); - - // write to log - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_INFO_F( "FindInstancesProcess finished. Found ", (pScene->mNumMeshes - numMeshesOut), " instances" ); - } - pScene->mNumMeshes = numMeshesOut; - } else { - ASSIMP_LOG_DEBUG("FindInstancesProcess finished. No instanced meshes found"); - } - } -} diff --git a/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.h b/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.h deleted file mode 100644 index 64b838d7cc..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindInstancesProcess.h +++ /dev/null @@ -1,137 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file FindInstancesProcess.h - * @brief Declares the aiProcess_FindInstances post-process step - */ -#ifndef AI_FINDINSTANCES_H_INC -#define AI_FINDINSTANCES_H_INC - -#include "Common/BaseProcess.h" -#include "PostProcessing/ProcessHelper.h" - -class FindInstancesProcessTest; -namespace Assimp { - -// ------------------------------------------------------------------------------- -/** @brief Get a pseudo(!)-hash representing a mesh. - * - * The hash is built from number of vertices, faces, primitive types, - * .... but *not* from the real mesh data. The funcction is not a perfect hash. - * @param in Input mesh - * @return Hash. - */ -inline -uint64_t GetMeshHash(aiMesh* in) { - ai_assert(nullptr != in); - - // ... get an unique value representing the vertex format of the mesh - const unsigned int fhash = GetMeshVFormatUnique(in); - - // and bake it with number of vertices/faces/bones/matidx/ptypes - return ((uint64_t)fhash << 32u) | (( - (in->mNumBones << 16u) ^ (in->mNumVertices) ^ - (in->mNumFaces<<4u) ^ (in->mMaterialIndex<<15) ^ - (in->mPrimitiveTypes<<28)) & 0xffffffff ); -} - -// ------------------------------------------------------------------------------- -/** @brief Perform a component-wise comparison of two arrays - * - * @param first First array - * @param second Second array - * @param size Size of both arrays - * @param e Epsilon - * @return true if the arrays are identical - */ -inline -bool CompareArrays(const aiVector3D* first, const aiVector3D* second, - unsigned int size, float e) { - for (const aiVector3D* end = first+size; first != end; ++first,++second) { - if ( (*first - *second).SquareLength() >= e) - return false; - } - return true; -} - -// and the same for colors ... -inline bool CompareArrays(const aiColor4D* first, const aiColor4D* second, - unsigned int size, float e) -{ - for (const aiColor4D* end = first+size; first != end; ++first,++second) { - if ( GetColorDifference(*first,*second) >= e) - return false; - } - return true; -} - -// --------------------------------------------------------------------------- -/** @brief A post-processing steps to search for instanced meshes -*/ -class FindInstancesProcess : public BaseProcess -{ -public: - - FindInstancesProcess(); - ~FindInstancesProcess(); - -public: - // ------------------------------------------------------------------- - // Check whether step is active in given flags combination - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Execute step on a given scene - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - // Setup properties prior to executing the process - void SetupProperties(const Importer* pImp); - -private: - - bool configSpeedFlag; - -}; // ! end class FindInstancesProcess -} // ! end namespace Assimp - -#endif // !! AI_FINDINSTANCES_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.cpp b/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.cpp deleted file mode 100644 index 016884c6e7..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.cpp +++ /dev/null @@ -1,423 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to search an importer's output - for data that is obviously invalid */ - - - -#ifndef ASSIMP_BUILD_NO_FINDINVALIDDATA_PROCESS - -// internal headers -#include "FindInvalidDataProcess.h" -#include "ProcessHelper.h" - -#include <assimp/Exceptional.h> -#include <assimp/qnan.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FindInvalidDataProcess::FindInvalidDataProcess() -: configEpsilon(0.0) -, mIgnoreTexCoods( false ){ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FindInvalidDataProcess::~FindInvalidDataProcess() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FindInvalidDataProcess::IsActive( unsigned int pFlags) const { - return 0 != (pFlags & aiProcess_FindInvalidData); -} - -// ------------------------------------------------------------------------------------------------ -// Setup import configuration -void FindInvalidDataProcess::SetupProperties(const Importer* pImp) { - // Get the current value of AI_CONFIG_PP_FID_ANIM_ACCURACY - configEpsilon = (0 != pImp->GetPropertyFloat(AI_CONFIG_PP_FID_ANIM_ACCURACY,0.f)); - mIgnoreTexCoods = pImp->GetPropertyBool(AI_CONFIG_PP_FID_IGNORE_TEXTURECOORDS, false); -} - -// ------------------------------------------------------------------------------------------------ -// Update mesh references in the node graph -void UpdateMeshReferences(aiNode* node, const std::vector<unsigned int>& meshMapping) { - if (node->mNumMeshes) { - unsigned int out = 0; - for (unsigned int a = 0; a < node->mNumMeshes;++a) { - - unsigned int ref = node->mMeshes[a]; - if (UINT_MAX != (ref = meshMapping[ref])) { - node->mMeshes[out++] = ref; - } - } - // just let the members that are unused, that's much cheaper - // than a full array realloc'n'copy party ... - if(!(node->mNumMeshes = out)) { - - delete[] node->mMeshes; - node->mMeshes = NULL; - } - } - // recursively update all children - for (unsigned int i = 0; i < node->mNumChildren;++i) { - UpdateMeshReferences(node->mChildren[i],meshMapping); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FindInvalidDataProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("FindInvalidDataProcess begin"); - - bool out = false; - std::vector<unsigned int> meshMapping(pScene->mNumMeshes); - unsigned int real = 0; - - // Process meshes - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) { - - int result; - if ((result = ProcessMesh( pScene->mMeshes[a]))) { - out = true; - - if (2 == result) { - // remove this mesh - delete pScene->mMeshes[a]; - AI_DEBUG_INVALIDATE_PTR(pScene->mMeshes[a]); - - meshMapping[a] = UINT_MAX; - continue; - } - } - pScene->mMeshes[real] = pScene->mMeshes[a]; - meshMapping[a] = real++; - } - - // Process animations - for (unsigned int a = 0; a < pScene->mNumAnimations;++a) { - ProcessAnimation( pScene->mAnimations[a]); - } - - - if (out) { - if ( real != pScene->mNumMeshes) { - if (!real) { - throw DeadlyImportError("No meshes remaining"); - } - - // we need to remove some meshes. - // therefore we'll also need to remove all references - // to them from the scenegraph - UpdateMeshReferences(pScene->mRootNode,meshMapping); - pScene->mNumMeshes = real; - } - - ASSIMP_LOG_INFO("FindInvalidDataProcess finished. Found issues ..."); - } else { - ASSIMP_LOG_DEBUG("FindInvalidDataProcess finished. Everything seems to be OK."); - } -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -const char* ValidateArrayContents(const T* /*arr*/, unsigned int /*size*/, - const std::vector<bool>& /*dirtyMask*/, bool /*mayBeIdentical = false*/, bool /*mayBeZero = true*/) -{ - return nullptr; -} - -// ------------------------------------------------------------------------------------------------ -template <> -inline -const char* ValidateArrayContents<aiVector3D>(const aiVector3D* arr, unsigned int size, - const std::vector<bool>& dirtyMask, bool mayBeIdentical , bool mayBeZero ) { - bool b = false; - unsigned int cnt = 0; - for (unsigned int i = 0; i < size;++i) { - - if (dirtyMask.size() && dirtyMask[i]) { - continue; - } - ++cnt; - - const aiVector3D& v = arr[i]; - if (is_special_float(v.x) || is_special_float(v.y) || is_special_float(v.z)) { - return "INF/NAN was found in a vector component"; - } - if (!mayBeZero && !v.x && !v.y && !v.z ) { - return "Found zero-length vector"; - } - if (i && v != arr[i-1])b = true; - } - if (cnt > 1 && !b && !mayBeIdentical) { - return "All vectors are identical"; - } - return nullptr; -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -bool ProcessArray(T*& in, unsigned int num,const char* name, - const std::vector<bool>& dirtyMask, bool mayBeIdentical = false, bool mayBeZero = true) { - const char* err = ValidateArrayContents(in,num,dirtyMask,mayBeIdentical,mayBeZero); - if (err) { - ASSIMP_LOG_ERROR_F( "FindInvalidDataProcess fails on mesh ", name, ": ", err); - delete[] in; - in = NULL; - return true; - } - return false; -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -AI_FORCE_INLINE bool EpsilonCompare(const T& n, const T& s, ai_real epsilon); - -// ------------------------------------------------------------------------------------------------ -AI_FORCE_INLINE bool EpsilonCompare(ai_real n, ai_real s, ai_real epsilon) { - return std::fabs(n-s)>epsilon; -} - -// ------------------------------------------------------------------------------------------------ -template <> -bool EpsilonCompare<aiVectorKey>(const aiVectorKey& n, const aiVectorKey& s, ai_real epsilon) { - return - EpsilonCompare(n.mValue.x,s.mValue.x,epsilon) && - EpsilonCompare(n.mValue.y,s.mValue.y,epsilon) && - EpsilonCompare(n.mValue.z,s.mValue.z,epsilon); -} - -// ------------------------------------------------------------------------------------------------ -template <> -bool EpsilonCompare<aiQuatKey>(const aiQuatKey& n, const aiQuatKey& s, ai_real epsilon) { - return - EpsilonCompare(n.mValue.x,s.mValue.x,epsilon) && - EpsilonCompare(n.mValue.y,s.mValue.y,epsilon) && - EpsilonCompare(n.mValue.z,s.mValue.z,epsilon) && - EpsilonCompare(n.mValue.w,s.mValue.w,epsilon); -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -bool AllIdentical(T* in, unsigned int num, ai_real epsilon) { - if (num <= 1) { - return true; - } - - if (fabs(epsilon) > 0.f) { - for (unsigned int i = 0; i < num-1;++i) { - if (!EpsilonCompare(in[i],in[i+1],epsilon)) { - return false; - } - } - } else { - for (unsigned int i = 0; i < num-1;++i) { - if (in[i] != in[i+1]) { - return false; - } - } - } - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Search an animation for invalid content -void FindInvalidDataProcess::ProcessAnimation (aiAnimation* anim) { - // Process all animation channels - for ( unsigned int a = 0; a < anim->mNumChannels; ++a ) { - ProcessAnimationChannel( anim->mChannels[a]); - } -} - -// ------------------------------------------------------------------------------------------------ -void FindInvalidDataProcess::ProcessAnimationChannel (aiNodeAnim* anim) { - ai_assert( nullptr != anim ); - if (anim->mNumPositionKeys == 0 && anim->mNumRotationKeys == 0 && anim->mNumScalingKeys == 0) { - ai_assert_entry(); - return; - } - - // Check whether all values in a tracks are identical - in this case - // we can remove al keys except one. - // POSITIONS - int i = 0; - if (anim->mNumPositionKeys > 1 && AllIdentical(anim->mPositionKeys,anim->mNumPositionKeys,configEpsilon)) { - aiVectorKey v = anim->mPositionKeys[0]; - - // Reallocate ... we need just ONE element, it makes no sense to reuse the array - delete[] anim->mPositionKeys; - anim->mPositionKeys = new aiVectorKey[anim->mNumPositionKeys = 1]; - anim->mPositionKeys[0] = v; - i = 1; - } - - // ROTATIONS - if (anim->mNumRotationKeys > 1 && AllIdentical(anim->mRotationKeys,anim->mNumRotationKeys,configEpsilon)) { - aiQuatKey v = anim->mRotationKeys[0]; - - // Reallocate ... we need just ONE element, it makes no sense to reuse the array - delete[] anim->mRotationKeys; - anim->mRotationKeys = new aiQuatKey[anim->mNumRotationKeys = 1]; - anim->mRotationKeys[0] = v; - i = 1; - } - - // SCALINGS - if (anim->mNumScalingKeys > 1 && AllIdentical(anim->mScalingKeys,anim->mNumScalingKeys,configEpsilon)) { - aiVectorKey v = anim->mScalingKeys[0]; - - // Reallocate ... we need just ONE element, it makes no sense to reuse the array - delete[] anim->mScalingKeys; - anim->mScalingKeys = new aiVectorKey[anim->mNumScalingKeys = 1]; - anim->mScalingKeys[0] = v; - i = 1; - } - if ( 1 == i ) { - ASSIMP_LOG_WARN("Simplified dummy tracks with just one key"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Search a mesh for invalid contents -int FindInvalidDataProcess::ProcessMesh(aiMesh* pMesh) -{ - bool ret = false; - std::vector<bool> dirtyMask(pMesh->mNumVertices, pMesh->mNumFaces != 0); - - // Ignore elements that are not referenced by vertices. - // (they are, for example, caused by the FindDegenerates step) - for (unsigned int m = 0; m < pMesh->mNumFaces; ++m) { - const aiFace& f = pMesh->mFaces[m]; - - for (unsigned int i = 0; i < f.mNumIndices; ++i) { - dirtyMask[f.mIndices[i]] = false; - } - } - - // Process vertex positions - if (pMesh->mVertices && ProcessArray(pMesh->mVertices, pMesh->mNumVertices, "positions", dirtyMask)) { - ASSIMP_LOG_ERROR("Deleting mesh: Unable to continue without vertex positions"); - - return 2; - } - - // process texture coordinates - if (!mIgnoreTexCoods) { - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS && pMesh->mTextureCoords[i]; ++i) { - if (ProcessArray(pMesh->mTextureCoords[i], pMesh->mNumVertices, "uvcoords", dirtyMask)) { - pMesh->mNumUVComponents[i] = 0; - - // delete all subsequent texture coordinate sets. - for (unsigned int a = i + 1; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a) { - delete[] pMesh->mTextureCoords[a]; - pMesh->mTextureCoords[a] = NULL; - pMesh->mNumUVComponents[a] = 0; - } - - ret = true; - } - } - } - - // -- we don't validate vertex colors, it's difficult to say whether - // they are invalid or not. - - // Normals and tangents are undefined for point and line faces. - if (pMesh->mNormals || pMesh->mTangents) { - - if (aiPrimitiveType_POINT & pMesh->mPrimitiveTypes || - aiPrimitiveType_LINE & pMesh->mPrimitiveTypes) - { - if (aiPrimitiveType_TRIANGLE & pMesh->mPrimitiveTypes || - aiPrimitiveType_POLYGON & pMesh->mPrimitiveTypes) - { - // We need to update the lookup-table - for (unsigned int m = 0; m < pMesh->mNumFaces;++m) { - const aiFace& f = pMesh->mFaces[ m ]; - - if (f.mNumIndices < 3) { - dirtyMask[f.mIndices[0]] = true; - if (f.mNumIndices == 2) { - dirtyMask[f.mIndices[1]] = true; - } - } - } - } - // Normals, tangents and bitangents are undefined for - // the whole mesh (and should not even be there) - else { - return ret; - } - } - - // Process mesh normals - if (pMesh->mNormals && ProcessArray(pMesh->mNormals,pMesh->mNumVertices, - "normals",dirtyMask,true,false)) - ret = true; - - // Process mesh tangents - if (pMesh->mTangents && ProcessArray(pMesh->mTangents,pMesh->mNumVertices,"tangents",dirtyMask)) { - delete[] pMesh->mBitangents; pMesh->mBitangents = NULL; - ret = true; - } - - // Process mesh bitangents - if (pMesh->mBitangents && ProcessArray(pMesh->mBitangents,pMesh->mNumVertices,"bitangents",dirtyMask)) { - delete[] pMesh->mTangents; pMesh->mTangents = NULL; - ret = true; - } - } - return ret ? 1 : 0; -} - -#endif // !! ASSIMP_BUILD_NO_FINDINVALIDDATA_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.h b/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.h deleted file mode 100644 index ce7375f34f..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FindInvalidDataProcess.h +++ /dev/null @@ -1,106 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to search an importer's output - * for data that is obviously invalid - */ -#ifndef AI_FINDINVALIDDATA_H_INC -#define AI_FINDINVALIDDATA_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/types.h> -#include <assimp/anim.h> - -struct aiMesh; - -class FindInvalidDataProcessTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** The FindInvalidData post-processing step. It searches the mesh data - * for parts that are obviously invalid and removes them. - * - * Originally this was a workaround for some models written by Blender - * which have zero normal vectors. */ -class ASSIMP_API FindInvalidDataProcess : public BaseProcess { -public: - FindInvalidDataProcess(); - ~FindInvalidDataProcess(); - - // ------------------------------------------------------------------- - // - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Setup import settings - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - // Run the step - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Executes the post-processing step on the given mesh - * @param pMesh The mesh to process. - * @return 0 - nothing, 1 - removed sth, 2 - please delete me */ - int ProcessMesh( aiMesh* pMesh); - - // ------------------------------------------------------------------- - /** Executes the post-processing step on the given animation - * @param anim The animation to process. */ - void ProcessAnimation (aiAnimation* anim); - - // ------------------------------------------------------------------- - /** Executes the post-processing step on the given anim channel - * @param anim The animation channel to process.*/ - void ProcessAnimationChannel (aiNodeAnim* anim); - -private: - ai_real configEpsilon; - bool mIgnoreTexCoods; -}; - -} // end of namespace Assimp - -#endif // AI_AI_FINDINVALIDDATA_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/FixNormalsStep.cpp b/thirdparty/assimp/code/PostProcessing/FixNormalsStep.cpp deleted file mode 100644 index bbbe6899b4..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FixNormalsStep.cpp +++ /dev/null @@ -1,184 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to invert - * all normals in meshes with infacing normals. - */ - -// internal headers -#include "FixNormalsStep.h" -#include <assimp/StringUtils.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <stdio.h> - - -using namespace Assimp; - - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -FixInfacingNormalsProcess::FixInfacingNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -FixInfacingNormalsProcess::~FixInfacingNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool FixInfacingNormalsProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_FixInfacingNormals) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void FixInfacingNormalsProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("FixInfacingNormalsProcess begin"); - - bool bHas( false ); - for (unsigned int a = 0; a < pScene->mNumMeshes; ++a) { - if (ProcessMesh(pScene->mMeshes[a], a)) { - bHas = true; - } - } - - if (bHas) { - ASSIMP_LOG_DEBUG("FixInfacingNormalsProcess finished. Found issues."); - } else { - ASSIMP_LOG_DEBUG("FixInfacingNormalsProcess finished. No changes to the scene."); - } -} - -// ------------------------------------------------------------------------------------------------ -// Apply the step to the mesh -bool FixInfacingNormalsProcess::ProcessMesh( aiMesh* pcMesh, unsigned int index) -{ - ai_assert(nullptr != pcMesh); - - // Nothing to do if there are no model normals - if (!pcMesh->HasNormals()) { - return false; - } - - // Compute the bounding box of both the model vertices + normals and - // the unmodified model vertices. Then check whether the first BB - // is smaller than the second. In this case we can assume that the - // normals need to be flipped, although there are a few special cases .. - // convex, concave, planar models ... - - aiVector3D vMin0 (1e10f,1e10f,1e10f); - aiVector3D vMin1 (1e10f,1e10f,1e10f); - aiVector3D vMax0 (-1e10f,-1e10f,-1e10f); - aiVector3D vMax1 (-1e10f,-1e10f,-1e10f); - - for (unsigned int i = 0; i < pcMesh->mNumVertices;++i) - { - vMin1.x = std::min(vMin1.x,pcMesh->mVertices[i].x); - vMin1.y = std::min(vMin1.y,pcMesh->mVertices[i].y); - vMin1.z = std::min(vMin1.z,pcMesh->mVertices[i].z); - - vMax1.x = std::max(vMax1.x,pcMesh->mVertices[i].x); - vMax1.y = std::max(vMax1.y,pcMesh->mVertices[i].y); - vMax1.z = std::max(vMax1.z,pcMesh->mVertices[i].z); - - const aiVector3D vWithNormal = pcMesh->mVertices[i] + pcMesh->mNormals[i]; - - vMin0.x = std::min(vMin0.x,vWithNormal.x); - vMin0.y = std::min(vMin0.y,vWithNormal.y); - vMin0.z = std::min(vMin0.z,vWithNormal.z); - - vMax0.x = std::max(vMax0.x,vWithNormal.x); - vMax0.y = std::max(vMax0.y,vWithNormal.y); - vMax0.z = std::max(vMax0.z,vWithNormal.z); - } - - const float fDelta0_x = (vMax0.x - vMin0.x); - const float fDelta0_y = (vMax0.y - vMin0.y); - const float fDelta0_z = (vMax0.z - vMin0.z); - - const float fDelta1_x = (vMax1.x - vMin1.x); - const float fDelta1_y = (vMax1.y - vMin1.y); - const float fDelta1_z = (vMax1.z - vMin1.z); - - // Check whether the boxes are overlapping - if ((fDelta0_x > 0.0f) != (fDelta1_x > 0.0f))return false; - if ((fDelta0_y > 0.0f) != (fDelta1_y > 0.0f))return false; - if ((fDelta0_z > 0.0f) != (fDelta1_z > 0.0f))return false; - - // Check whether this is a planar surface - const float fDelta1_yz = fDelta1_y * fDelta1_z; - - if (fDelta1_x < 0.05f * std::sqrt( fDelta1_yz ))return false; - if (fDelta1_y < 0.05f * std::sqrt( fDelta1_z * fDelta1_x ))return false; - if (fDelta1_z < 0.05f * std::sqrt( fDelta1_y * fDelta1_x ))return false; - - // now compare the volumes of the bounding boxes - if (std::fabs(fDelta0_x * fDelta0_y * fDelta0_z) < std::fabs(fDelta1_x * fDelta1_yz)) { - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_INFO_F("Mesh ", index, ": Normals are facing inwards (or the mesh is planar)", index); - } - - // Invert normals - for (unsigned int i = 0; i < pcMesh->mNumVertices;++i) - pcMesh->mNormals[i] *= -1.0f; - - // ... and flip faces - for (unsigned int i = 0; i < pcMesh->mNumFaces;++i) - { - aiFace& face = pcMesh->mFaces[i]; - for( unsigned int b = 0; b < face.mNumIndices / 2; b++) - std::swap( face.mIndices[b], face.mIndices[ face.mNumIndices - 1 - b]); - } - return true; - } - return false; -} diff --git a/thirdparty/assimp/code/PostProcessing/FixNormalsStep.h b/thirdparty/assimp/code/PostProcessing/FixNormalsStep.h deleted file mode 100644 index f60ce596a4..0000000000 --- a/thirdparty/assimp/code/PostProcessing/FixNormalsStep.h +++ /dev/null @@ -1,91 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - - -/** @file Defines a post processing step to fix infacing normals */ -#ifndef AI_FIXNORMALSPROCESS_H_INC -#define AI_FIXNORMALSPROCESS_H_INC - -#include "Common/BaseProcess.h" - -struct aiMesh; - -namespace Assimp -{ - -// --------------------------------------------------------------------------- -/** The FixInfacingNormalsProcess tries to determine whether the normal - * vectors of an object are facing inwards. In this case they will be - * flipped. - */ -class FixInfacingNormalsProcess : public BaseProcess { -public: - FixInfacingNormalsProcess(); - ~FixInfacingNormalsProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - -protected: - - // ------------------------------------------------------------------- - /** Executes the step on the given mesh - * @param pMesh The mesh to process. - */ - bool ProcessMesh( aiMesh* pMesh, unsigned int index); -}; - -} // end of namespace Assimp - -#endif // AI_FIXNORMALSPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.cpp b/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.cpp deleted file mode 100644 index c013454fc3..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -#ifndef ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS - -#include "PostProcessing/GenBoundingBoxesProcess.h" - -#include <assimp/postprocess.h> -#include <assimp/scene.h> - -namespace Assimp { - -GenBoundingBoxesProcess::GenBoundingBoxesProcess() -: BaseProcess() { - -} - -GenBoundingBoxesProcess::~GenBoundingBoxesProcess() { - // empty -} - -bool GenBoundingBoxesProcess::IsActive(unsigned int pFlags) const { - return 0 != ( pFlags & aiProcess_GenBoundingBoxes ); -} - -void checkMesh(aiMesh* mesh, aiVector3D& min, aiVector3D& max) { - ai_assert(nullptr != mesh); - - if (0 == mesh->mNumVertices) { - return; - } - - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - const aiVector3D &pos = mesh->mVertices[i]; - if (pos.x < min.x) { - min.x = pos.x; - } - if (pos.y < min.y) { - min.y = pos.y; - } - if (pos.z < min.z) { - min.z = pos.z; - } - - if (pos.x > max.x) { - max.x = pos.x; - } - if (pos.y > max.y) { - max.y = pos.y; - } - if (pos.z > max.z) { - max.z = pos.z; - } - } -} - -void GenBoundingBoxesProcess::Execute(aiScene* pScene) { - if (nullptr == pScene) { - return; - } - - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - aiMesh* mesh = pScene->mMeshes[i]; - if (nullptr == mesh) { - continue; - } - - aiVector3D min(999999, 999999, 999999), max(-999999, -999999, -999999); - checkMesh(mesh, min, max); - mesh->mAABB.mMin = min; - mesh->mAABB.mMax = max; - } -} - -} // Namespace Assimp - -#endif // ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.h b/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.h deleted file mode 100644 index 4b43c82a42..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenBoundingBoxesProcess.h +++ /dev/null @@ -1,76 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Defines a post-processing step to generate Axis-aligned bounding - * volumes for all meshes. - */ - -#pragma once - -#ifndef AI_GENBOUNDINGBOXESPROCESS_H_INC -#define AI_GENBOUNDINGBOXESPROCESS_H_INC - -#ifndef ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS - -#include "Common/BaseProcess.h" - -namespace Assimp { - -/** Post-processing process to find axis-aligned bounding volumes for amm meshes - * used in a scene - */ -class ASSIMP_API GenBoundingBoxesProcess : public BaseProcess { -public: - /// The class constructor. - GenBoundingBoxesProcess(); - /// The class destructor. - ~GenBoundingBoxesProcess(); - /// Will return true, if aiProcess_GenBoundingBoxes is defined. - bool IsActive(unsigned int pFlags) const override; - /// The execution callback. - void Execute(aiScene* pScene) override; -}; - -} // Namespace Assimp - -#endif // #ifndef ASSIMP_BUILD_NO_GENBOUNDINGBOXES_PROCESS - -#endif // AI_GENBOUNDINGBOXESPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.cpp b/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.cpp deleted file mode 100644 index 028334dec7..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to generate face -* normals for all imported faces. -*/ - - -#include "GenFaceNormalsProcess.h" -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/Exceptional.h> -#include <assimp/qnan.h> - - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -GenFaceNormalsProcess::GenFaceNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -GenFaceNormalsProcess::~GenFaceNormalsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool GenFaceNormalsProcess::IsActive( unsigned int pFlags) const { - force_ = (pFlags & aiProcess_ForceGenNormals) != 0; - return (pFlags & aiProcess_GenNormals) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void GenFaceNormalsProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("GenFaceNormalsProcess begin"); - - if (pScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) { - throw DeadlyImportError("Post-processing order mismatch: expecting pseudo-indexed (\"verbose\") vertices here"); - } - - bool bHas = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) { - if(this->GenMeshFaceNormals( pScene->mMeshes[a])) { - bHas = true; - } - } - if (bHas) { - ASSIMP_LOG_INFO("GenFaceNormalsProcess finished. " - "Face normals have been calculated"); - } else { - ASSIMP_LOG_DEBUG("GenFaceNormalsProcess finished. " - "Normals are already there"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -bool GenFaceNormalsProcess::GenMeshFaceNormals (aiMesh* pMesh) -{ - if (NULL != pMesh->mNormals) { - if (force_) delete[] pMesh->mNormals; - else return false; - } - - // If the mesh consists of lines and/or points but not of - // triangles or higher-order polygons the normal vectors - // are undefined. - if (!(pMesh->mPrimitiveTypes & (aiPrimitiveType_TRIANGLE | aiPrimitiveType_POLYGON))) { - ASSIMP_LOG_INFO("Normal vectors are undefined for line and point meshes"); - return false; - } - - // allocate an array to hold the output normals - pMesh->mNormals = new aiVector3D[pMesh->mNumVertices]; - const float qnan = get_qnan(); - - // iterate through all faces and compute per-face normals but store them per-vertex. - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) { - const aiFace& face = pMesh->mFaces[a]; - if (face.mNumIndices < 3) { - // either a point or a line -> no well-defined normal vector - for (unsigned int i = 0;i < face.mNumIndices;++i) { - pMesh->mNormals[face.mIndices[i]] = aiVector3D(qnan); - } - continue; - } - - const aiVector3D* pV1 = &pMesh->mVertices[face.mIndices[0]]; - const aiVector3D* pV2 = &pMesh->mVertices[face.mIndices[1]]; - const aiVector3D* pV3 = &pMesh->mVertices[face.mIndices[face.mNumIndices-1]]; - const aiVector3D vNor = ((*pV2 - *pV1) ^ (*pV3 - *pV1)).NormalizeSafe(); - - for (unsigned int i = 0;i < face.mNumIndices;++i) { - pMesh->mNormals[face.mIndices[i]] = vNor; - } - } - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.h b/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.h deleted file mode 100644 index c641fd6353..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenFaceNormalsProcess.h +++ /dev/null @@ -1,87 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to compute face normals for all loaded faces*/ -#ifndef AI_GENFACENORMALPROCESS_H_INC -#define AI_GENFACENORMALPROCESS_H_INC - -#include "Common/BaseProcess.h" -#include <assimp/mesh.h> - -namespace Assimp -{ - -// --------------------------------------------------------------------------- -/** The GenFaceNormalsProcess computes face normals for all faces of all meshes -*/ -class ASSIMP_API_WINONLY GenFaceNormalsProcess : public BaseProcess -{ -public: - - GenFaceNormalsProcess(); - ~GenFaceNormalsProcess(); - -public: - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - -private: - bool GenMeshFaceNormals(aiMesh* pcMesh); - mutable bool force_ = false; -}; - -} // end of namespace Assimp - -#endif // !!AI_GENFACENORMALPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.cpp b/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.cpp deleted file mode 100644 index 3f6c2f86bd..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.cpp +++ /dev/null @@ -1,239 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to generate face -* normals for all imported faces. -*/ - - - -// internal headers -#include "GenVertexNormalsProcess.h" -#include "ProcessHelper.h" -#include <assimp/Exceptional.h> -#include <assimp/qnan.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -GenVertexNormalsProcess::GenVertexNormalsProcess() -: configMaxAngle( AI_DEG_TO_RAD( 175.f ) ) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -GenVertexNormalsProcess::~GenVertexNormalsProcess() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool GenVertexNormalsProcess::IsActive( unsigned int pFlags) const -{ - force_ = (pFlags & aiProcess_ForceGenNormals) != 0; - return (pFlags & aiProcess_GenSmoothNormals) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void GenVertexNormalsProcess::SetupProperties(const Importer* pImp) -{ - // Get the current value of the AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE property - configMaxAngle = pImp->GetPropertyFloat(AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE,(ai_real)175.0); - configMaxAngle = AI_DEG_TO_RAD(std::max(std::min(configMaxAngle,(ai_real)175.0),(ai_real)0.0)); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void GenVertexNormalsProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("GenVertexNormalsProcess begin"); - - if (pScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT) { - throw DeadlyImportError("Post-processing order mismatch: expecting pseudo-indexed (\"verbose\") vertices here"); - } - - bool bHas = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a) { - if(GenMeshVertexNormals( pScene->mMeshes[a],a)) - bHas = true; - } - - if (bHas) { - ASSIMP_LOG_INFO("GenVertexNormalsProcess finished. " - "Vertex normals have been calculated"); - } else { - ASSIMP_LOG_DEBUG("GenVertexNormalsProcess finished. " - "Normals are already there"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -bool GenVertexNormalsProcess::GenMeshVertexNormals (aiMesh* pMesh, unsigned int meshIndex) -{ - if (NULL != pMesh->mNormals) { - if (force_) delete[] pMesh->mNormals; - else return false; - } - - // If the mesh consists of lines and/or points but not of - // triangles or higher-order polygons the normal vectors - // are undefined. - if (!(pMesh->mPrimitiveTypes & (aiPrimitiveType_TRIANGLE | aiPrimitiveType_POLYGON))) - { - ASSIMP_LOG_INFO("Normal vectors are undefined for line and point meshes"); - return false; - } - - // Allocate the array to hold the output normals - const float qnan = std::numeric_limits<ai_real>::quiet_NaN(); - pMesh->mNormals = new aiVector3D[pMesh->mNumVertices]; - - // Compute per-face normals but store them per-vertex - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) - { - const aiFace& face = pMesh->mFaces[a]; - if (face.mNumIndices < 3) - { - // either a point or a line -> no normal vector - for (unsigned int i = 0;i < face.mNumIndices;++i) { - pMesh->mNormals[face.mIndices[i]] = aiVector3D(qnan); - } - - continue; - } - - const aiVector3D* pV1 = &pMesh->mVertices[face.mIndices[0]]; - const aiVector3D* pV2 = &pMesh->mVertices[face.mIndices[1]]; - const aiVector3D* pV3 = &pMesh->mVertices[face.mIndices[face.mNumIndices-1]]; - const aiVector3D vNor = ((*pV2 - *pV1) ^ (*pV3 - *pV1)).NormalizeSafe(); - - for (unsigned int i = 0;i < face.mNumIndices;++i) { - pMesh->mNormals[face.mIndices[i]] = vNor; - } - } - - // Set up a SpatialSort to quickly find all vertices close to a given position - // check whether we can reuse the SpatialSort of a previous step. - SpatialSort* vertexFinder = NULL; - SpatialSort _vertexFinder; - ai_real posEpsilon = ai_real( 1e-5 ); - if (shared) { - std::vector<std::pair<SpatialSort,ai_real> >* avf; - shared->GetProperty(AI_SPP_SPATIAL_SORT,avf); - if (avf) - { - std::pair<SpatialSort,ai_real>& blubb = avf->operator [] (meshIndex); - vertexFinder = &blubb.first; - posEpsilon = blubb.second; - } - } - if (!vertexFinder) { - _vertexFinder.Fill(pMesh->mVertices, pMesh->mNumVertices, sizeof( aiVector3D)); - vertexFinder = &_vertexFinder; - posEpsilon = ComputePositionEpsilon(pMesh); - } - std::vector<unsigned int> verticesFound; - aiVector3D* pcNew = new aiVector3D[pMesh->mNumVertices]; - - if (configMaxAngle >= AI_DEG_TO_RAD( 175.f )) { - // There is no angle limit. Thus all vertices with positions close - // to each other will receive the same vertex normal. This allows us - // to optimize the whole algorithm a little bit ... - std::vector<bool> abHad(pMesh->mNumVertices,false); - for (unsigned int i = 0; i < pMesh->mNumVertices;++i) { - if (abHad[i]) { - continue; - } - - // Get all vertices that share this one ... - vertexFinder->FindPositions( pMesh->mVertices[i], posEpsilon, verticesFound); - - aiVector3D pcNor; - for (unsigned int a = 0; a < verticesFound.size(); ++a) { - const aiVector3D& v = pMesh->mNormals[verticesFound[a]]; - if (is_not_qnan(v.x))pcNor += v; - } - pcNor.NormalizeSafe(); - - // Write the smoothed normal back to all affected normals - for (unsigned int a = 0; a < verticesFound.size(); ++a) - { - unsigned int vidx = verticesFound[a]; - pcNew[vidx] = pcNor; - abHad[vidx] = true; - } - } - } - // Slower code path if a smooth angle is set. There are many ways to achieve - // the effect, this one is the most straightforward one. - else { - const ai_real fLimit = std::cos(configMaxAngle); - for (unsigned int i = 0; i < pMesh->mNumVertices;++i) { - // Get all vertices that share this one ... - vertexFinder->FindPositions( pMesh->mVertices[i] , posEpsilon, verticesFound); - - aiVector3D vr = pMesh->mNormals[i]; - - aiVector3D pcNor; - for (unsigned int a = 0; a < verticesFound.size(); ++a) { - aiVector3D v = pMesh->mNormals[verticesFound[a]]; - - // Check whether the angle between the two normals is not too large. - // Skip the angle check on our own normal to avoid false negatives - // (v*v is not guaranteed to be 1.0 for all unit vectors v) - if (is_not_qnan(v.x) && (verticesFound[a] == i || (v * vr >= fLimit))) - pcNor += v; - } - pcNew[i] = pcNor.NormalizeSafe(); - } - } - - delete[] pMesh->mNormals; - pMesh->mNormals = pcNew; - - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.h b/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.h deleted file mode 100644 index 2ceee17e85..0000000000 --- a/thirdparty/assimp/code/PostProcessing/GenVertexNormalsProcess.h +++ /dev/null @@ -1,111 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to compute vertex normals - for all loaded vertizes */ -#ifndef AI_GENVERTEXNORMALPROCESS_H_INC -#define AI_GENVERTEXNORMALPROCESS_H_INC - -#include "Common/assbin_chunks.h" -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> - -// Forward declarations -class GenNormalsTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** The GenFaceNormalsProcess computes vertex normals for all vertices -*/ -class ASSIMP_API GenVertexNormalsProcess : public BaseProcess { -public: - GenVertexNormalsProcess(); - ~GenVertexNormalsProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. - * A bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - - // setter for configMaxAngle - inline void SetMaxSmoothAngle(ai_real f) { - configMaxAngle =f; - } - - // ------------------------------------------------------------------- - /** Computes normals for a specific mesh - * @param pcMesh Mesh - * @param meshIndex Index of the mesh - * @return true if vertex normals have been computed - */ - bool GenMeshVertexNormals (aiMesh* pcMesh, unsigned int meshIndex); - -private: - /** Configuration option: maximum smoothing angle, in radians*/ - ai_real configMaxAngle; - mutable bool force_ = false; -}; - -} // end of namespace Assimp - -#endif // !!AI_GENVERTEXNORMALPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.cpp b/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.cpp deleted file mode 100644 index d0a016fa42..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.cpp +++ /dev/null @@ -1,379 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to improve the cache locality of a mesh. - * <br> - * The algorithm is roughly basing on this paper: - * http://www.cs.princeton.edu/gfx/pubs/Sander_2007_%3ETR/tipsy.pdf - * .. although overdraw reduction isn't implemented yet ... - */ - -// internal headers -#include "PostProcessing/ImproveCacheLocality.h" -#include "Common/VertexTriangleAdjacency.h" - -#include <assimp/StringUtils.h> -#include <assimp/postprocess.h> -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> -#include <stdio.h> -#include <stack> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -ImproveCacheLocalityProcess::ImproveCacheLocalityProcess() -: mConfigCacheDepth(PP_ICL_PTCACHE_SIZE) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -ImproveCacheLocalityProcess::~ImproveCacheLocalityProcess() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool ImproveCacheLocalityProcess::IsActive( unsigned int pFlags) const { - return (pFlags & aiProcess_ImproveCacheLocality) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Setup configuration -void ImproveCacheLocalityProcess::SetupProperties(const Importer* pImp) { - // AI_CONFIG_PP_ICL_PTCACHE_SIZE controls the target cache size for the optimizer - mConfigCacheDepth = pImp->GetPropertyInteger(AI_CONFIG_PP_ICL_PTCACHE_SIZE,PP_ICL_PTCACHE_SIZE); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void ImproveCacheLocalityProcess::Execute( aiScene* pScene) { - if (!pScene->mNumMeshes) { - ASSIMP_LOG_DEBUG("ImproveCacheLocalityProcess skipped; there are no meshes"); - return; - } - - ASSIMP_LOG_DEBUG("ImproveCacheLocalityProcess begin"); - - float out = 0.f; - unsigned int numf = 0, numm = 0; - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a ){ - const float res = ProcessMesh( pScene->mMeshes[a],a); - if (res) { - numf += pScene->mMeshes[a]->mNumFaces; - out += res; - ++numm; - } - } - if (!DefaultLogger::isNullLogger()) { - if (numf > 0) { - ASSIMP_LOG_INFO_F("Cache relevant are ", numm, " meshes (", numf, " faces). Average output ACMR is ", out / numf); - } - ASSIMP_LOG_DEBUG("ImproveCacheLocalityProcess finished. "); - } -} - -// ------------------------------------------------------------------------------------------------ -// Improves the cache coherency of a specific mesh -ai_real ImproveCacheLocalityProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshNum) { - // TODO: rewrite this to use std::vector or boost::shared_array - ai_assert(nullptr != pMesh); - - // Check whether the input data is valid - // - there must be vertices and faces - // - all faces must be triangulated or we can't operate on them - if (!pMesh->HasFaces() || !pMesh->HasPositions()) - return static_cast<ai_real>(0.f); - - if (pMesh->mPrimitiveTypes != aiPrimitiveType_TRIANGLE) { - ASSIMP_LOG_ERROR("This algorithm works on triangle meshes only"); - return static_cast<ai_real>(0.f); - } - - if(pMesh->mNumVertices <= mConfigCacheDepth) { - return static_cast<ai_real>(0.f); - } - - ai_real fACMR = 3.f; - const aiFace* const pcEnd = pMesh->mFaces+pMesh->mNumFaces; - - // Input ACMR is for logging purposes only - if (!DefaultLogger::isNullLogger()) { - - unsigned int* piFIFOStack = new unsigned int[mConfigCacheDepth]; - memset(piFIFOStack,0xff,mConfigCacheDepth*sizeof(unsigned int)); - unsigned int* piCur = piFIFOStack; - const unsigned int* const piCurEnd = piFIFOStack + mConfigCacheDepth; - - // count the number of cache misses - unsigned int iCacheMisses = 0; - for (const aiFace* pcFace = pMesh->mFaces;pcFace != pcEnd;++pcFace) { - for (unsigned int qq = 0; qq < 3;++qq) { - bool bInCache = false; - for (unsigned int* pp = piFIFOStack;pp < piCurEnd;++pp) { - if (*pp == pcFace->mIndices[qq]) { - // the vertex is in cache - bInCache = true; - break; - } - } - if (!bInCache) { - ++iCacheMisses; - if (piCurEnd == piCur) { - piCur = piFIFOStack; - } - *piCur++ = pcFace->mIndices[qq]; - } - } - } - delete[] piFIFOStack; - fACMR = (ai_real) iCacheMisses / pMesh->mNumFaces; - if (3.0 == fACMR) { - char szBuff[128]; // should be sufficiently large in every case - - // the JoinIdenticalVertices process has not been executed on this - // mesh, otherwise this value would normally be at least minimally - // smaller than 3.0 ... - ai_snprintf(szBuff,128,"Mesh %u: Not suitable for vcache optimization",meshNum); - ASSIMP_LOG_WARN(szBuff); - return static_cast<ai_real>(0.f); - } - } - - // first we need to build a vertex-triangle adjacency list - VertexTriangleAdjacency adj(pMesh->mFaces,pMesh->mNumFaces, pMesh->mNumVertices,true); - - // build a list to store per-vertex caching time stamps - unsigned int* const piCachingStamps = new unsigned int[pMesh->mNumVertices]; - memset(piCachingStamps,0x0,pMesh->mNumVertices*sizeof(unsigned int)); - - // allocate an empty output index buffer. We store the output indices in one large array. - // Since the number of triangles won't change the input faces can be reused. This is how - // we save thousands of redundant mini allocations for aiFace::mIndices - const unsigned int iIdxCnt = pMesh->mNumFaces*3; - unsigned int* const piIBOutput = new unsigned int[iIdxCnt]; - unsigned int* piCSIter = piIBOutput; - - // allocate the flag array to hold the information - // whether a face has already been emitted or not - std::vector<bool> abEmitted(pMesh->mNumFaces,false); - - // dead-end vertex index stack - std::stack<unsigned int, std::vector<unsigned int> > sDeadEndVStack; - - // create a copy of the piNumTriPtr buffer - unsigned int* const piNumTriPtr = adj.mLiveTriangles; - const std::vector<unsigned int> piNumTriPtrNoModify(piNumTriPtr, piNumTriPtr + pMesh->mNumVertices); - - // get the largest number of referenced triangles and allocate the "candidate buffer" - unsigned int iMaxRefTris = 0; { - const unsigned int* piCur = adj.mLiveTriangles; - const unsigned int* const piCurEnd = adj.mLiveTriangles+pMesh->mNumVertices; - for (;piCur != piCurEnd;++piCur) { - iMaxRefTris = std::max(iMaxRefTris,*piCur); - } - } - ai_assert(iMaxRefTris > 0); - unsigned int* piCandidates = new unsigned int[iMaxRefTris*3]; - unsigned int iCacheMisses = 0; - - // ................................................................................... - /** PSEUDOCODE for the algorithm - - A = Build-Adjacency(I) Vertex-triangle adjacency - L = Get-Triangle-Counts(A) Per-vertex live triangle counts - C = Zero(Vertex-Count(I)) Per-vertex caching time stamps - D = Empty-Stack() Dead-end vertex stack - E = False(Triangle-Count(I)) Per triangle emitted flag - O = Empty-Index-Buffer() Empty output buffer - f = 0 Arbitrary starting vertex - s = k+1, i = 1 Time stamp and cursor - while f >= 0 For all valid fanning vertices - N = Empty-Set() 1-ring of next candidates - for each Triangle t in Neighbors(A, f) - if !Emitted(E,t) - for each Vertex v in t - Append(O,v) Output vertex - Push(D,v) Add to dead-end stack - Insert(N,v) Register as candidate - L[v] = L[v]-1 Decrease live triangle count - if s-C[v] > k If not in cache - C[v] = s Set time stamp - s = s+1 Increment time stamp - E[t] = true Flag triangle as emitted - Select next fanning vertex - f = Get-Next-Vertex(I,i,k,N,C,s,L,D) - return O - */ - // ................................................................................... - - int ivdx = 0; - int ics = 1; - int iStampCnt = mConfigCacheDepth+1; - while (ivdx >= 0) { - - unsigned int icnt = piNumTriPtrNoModify[ivdx]; - unsigned int* piList = adj.GetAdjacentTriangles(ivdx); - unsigned int* piCurCandidate = piCandidates; - - // get all triangles in the neighborhood - for (unsigned int tri = 0; tri < icnt;++tri) { - - // if they have not yet been emitted, add them to the output IB - const unsigned int fidx = *piList++; - if (!abEmitted[fidx]) { - - // so iterate through all vertices of the current triangle - const aiFace* pcFace = &pMesh->mFaces[ fidx ]; - unsigned nind = pcFace->mNumIndices; - for (unsigned ind = 0; ind < nind; ind++) { - unsigned dp = pcFace->mIndices[ind]; - - // the current vertex won't have any free triangles after this step - if (ivdx != (int)dp) { - // append the vertex to the dead-end stack - sDeadEndVStack.push(dp); - - // register as candidate for the next step - *piCurCandidate++ = dp; - - // decrease the per-vertex triangle counts - piNumTriPtr[dp]--; - } - - // append the vertex to the output index buffer - *piCSIter++ = dp; - - // if the vertex is not yet in cache, set its cache count - if (iStampCnt-piCachingStamps[dp] > mConfigCacheDepth) { - piCachingStamps[dp] = iStampCnt++; - ++iCacheMisses; - } - } - // flag triangle as emitted - abEmitted[fidx] = true; - } - } - - // the vertex has now no living adjacent triangles anymore - piNumTriPtr[ivdx] = 0; - - // get next fanning vertex - ivdx = -1; - int max_priority = -1; - for (unsigned int* piCur = piCandidates;piCur != piCurCandidate;++piCur) { - const unsigned int dp = *piCur; - - // must have live triangles - if (piNumTriPtr[dp] > 0) { - int priority = 0; - - // will the vertex be in cache, even after fanning occurs? - unsigned int tmp; - if ((tmp = iStampCnt-piCachingStamps[dp]) + 2*piNumTriPtr[dp] <= mConfigCacheDepth) { - priority = tmp; - } - - // keep best candidate - if (priority > max_priority) { - max_priority = priority; - ivdx = dp; - } - } - } - // did we reach a dead end? - if (-1 == ivdx) { - // need to get a non-local vertex for which we have a good chance that it is still - // in the cache ... - while (!sDeadEndVStack.empty()) { - unsigned int iCachedIdx = sDeadEndVStack.top(); - sDeadEndVStack.pop(); - if (piNumTriPtr[ iCachedIdx ] > 0) { - ivdx = iCachedIdx; - break; - } - } - - if (-1 == ivdx) { - // well, there isn't such a vertex. Simply get the next vertex in input order and - // hope it is not too bad ... - while (ics < (int)pMesh->mNumVertices) { - ++ics; - if (piNumTriPtr[ics] > 0) { - ivdx = ics; - break; - } - } - } - } - } - ai_real fACMR2 = 0.0f; - if (!DefaultLogger::isNullLogger()) { - fACMR2 = (float)iCacheMisses / pMesh->mNumFaces; - - // very intense verbose logging ... prepare for much text if there are many meshes - if ( DefaultLogger::get()->getLogSeverity() == Logger::VERBOSE) { - ASSIMP_LOG_DEBUG_F("Mesh %u | ACMR in: ", meshNum, " out: ", fACMR, " | ~", fACMR2, ((fACMR - fACMR2) / fACMR) * 100.f); - } - - fACMR2 *= pMesh->mNumFaces; - } - // sort the output index buffer back to the input array - piCSIter = piIBOutput; - for (aiFace* pcFace = pMesh->mFaces; pcFace != pcEnd;++pcFace) { - unsigned nind = pcFace->mNumIndices; - unsigned * ind = pcFace->mIndices; - if (nind > 0) ind[0] = *piCSIter++; - if (nind > 1) ind[1] = *piCSIter++; - if (nind > 2) ind[2] = *piCSIter++; - } - - // delete temporary storage - delete[] piCachingStamps; - delete[] piIBOutput; - delete[] piCandidates; - - return fACMR2; -} diff --git a/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.h b/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.h deleted file mode 100644 index de25ecd9fb..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ImproveCacheLocality.h +++ /dev/null @@ -1,101 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to reorder faces for - better cache locality*/ -#ifndef AI_IMPROVECACHELOCALITY_H_INC -#define AI_IMPROVECACHELOCALITY_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/types.h> - -struct aiMesh; - -namespace Assimp -{ - -// --------------------------------------------------------------------------- -/** The ImproveCacheLocalityProcess reorders all faces for improved vertex - * cache locality. It tries to arrange all faces to fans and to render - * faces which share vertices directly one after the other. - * - * @note This step expects triagulated input data. - */ -class ImproveCacheLocalityProcess : public BaseProcess -{ -public: - - ImproveCacheLocalityProcess(); - ~ImproveCacheLocalityProcess(); - -public: - - // ------------------------------------------------------------------- - // Check whether the pp step is active - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Executes the pp step on a given scene - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - // Configures the pp step - void SetupProperties(const Importer* pImp); - -protected: - // ------------------------------------------------------------------- - /** Executes the postprocessing step on the given mesh - * @param pMesh The mesh to process. - * @param meshNum Index of the mesh to process - */ - ai_real ProcessMesh( aiMesh* pMesh, unsigned int meshNum); - -private: - //! Configuration parameter: specifies the size of the cache to - //! optimize the vertex data for. - unsigned int mConfigCacheDepth; -}; - -} // end of namespace Assimp - -#endif // AI_IMPROVECACHELOCALITY_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.cpp b/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.cpp deleted file mode 100644 index f121fc60d3..0000000000 --- a/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.cpp +++ /dev/null @@ -1,438 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the post processing step to join identical vertices - * for all imported meshes - */ - - -#ifndef ASSIMP_BUILD_NO_JOINVERTICES_PROCESS - -#include "JoinVerticesProcess.h" -#include "ProcessHelper.h" -#include <assimp/Vertex.h> -#include <assimp/TinyFormatter.h> -#include <stdio.h> -#include <unordered_set> - -using namespace Assimp; -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -JoinVerticesProcess::JoinVerticesProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -JoinVerticesProcess::~JoinVerticesProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool JoinVerticesProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_JoinIdenticalVertices) != 0; -} -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void JoinVerticesProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("JoinVerticesProcess begin"); - - // get the total number of vertices BEFORE the step is executed - int iNumOldVertices = 0; - if (!DefaultLogger::isNullLogger()) { - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) { - iNumOldVertices += pScene->mMeshes[a]->mNumVertices; - } - } - - // execute the step - int iNumVertices = 0; - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) - iNumVertices += ProcessMesh( pScene->mMeshes[a],a); - - // if logging is active, print detailed statistics - if (!DefaultLogger::isNullLogger()) { - if (iNumOldVertices == iNumVertices) { - ASSIMP_LOG_DEBUG("JoinVerticesProcess finished "); - } else { - ASSIMP_LOG_INFO_F("JoinVerticesProcess finished | Verts in: ", iNumOldVertices, - " out: ", iNumVertices, " | ~", - ((iNumOldVertices - iNumVertices) / (float)iNumOldVertices) * 100.f ); - } - } - - pScene->mFlags |= AI_SCENE_FLAGS_NON_VERBOSE_FORMAT; -} - -namespace { - -bool areVerticesEqual(const Vertex &lhs, const Vertex &rhs, bool complex) -{ - // A little helper to find locally close vertices faster. - // Try to reuse the lookup table from the last step. - const static float epsilon = 1e-5f; - // Squared because we check against squared length of the vector difference - static const float squareEpsilon = epsilon * epsilon; - - // Square compare is useful for animeshes vertices compare - if ((lhs.position - rhs.position).SquareLength() > squareEpsilon) { - return false; - } - - // We just test the other attributes even if they're not present in the mesh. - // In this case they're initialized to 0 so the comparison succeeds. - // By this method the non-present attributes are effectively ignored in the comparison. - if ((lhs.normal - rhs.normal).SquareLength() > squareEpsilon) { - return false; - } - - if ((lhs.texcoords[0] - rhs.texcoords[0]).SquareLength() > squareEpsilon) { - return false; - } - - if ((lhs.tangent - rhs.tangent).SquareLength() > squareEpsilon) { - return false; - } - - if ((lhs.bitangent - rhs.bitangent).SquareLength() > squareEpsilon) { - return false; - } - - // Usually we won't have vertex colors or multiple UVs, so we can skip from here - // Actually this increases runtime performance slightly, at least if branch - // prediction is on our side. - if (complex) { - for (int i = 0; i < 8; i++) { - if (i > 0 && (lhs.texcoords[i] - rhs.texcoords[i]).SquareLength() > squareEpsilon) { - return false; - } - if (GetColorDifference(lhs.colors[i], rhs.colors[i]) > squareEpsilon) { - return false; - } - } - } - return true; -} - -template<class XMesh> -void updateXMeshVertices(XMesh *pMesh, std::vector<Vertex> &uniqueVertices) { - // replace vertex data with the unique data sets - pMesh->mNumVertices = (unsigned int)uniqueVertices.size(); - - // ---------------------------------------------------------------------------- - // NOTE - we're *not* calling Vertex::SortBack() because it would check for - // presence of every single vertex component once PER VERTEX. And our CPU - // dislikes branches, even if they're easily predictable. - // ---------------------------------------------------------------------------- - - // Position, if present (check made for aiAnimMesh) - if (pMesh->mVertices) - { - delete [] pMesh->mVertices; - pMesh->mVertices = new aiVector3D[pMesh->mNumVertices]; - for (unsigned int a = 0; a < pMesh->mNumVertices; a++) { - pMesh->mVertices[a] = uniqueVertices[a].position; - } - } - - // Normals, if present - if (pMesh->mNormals) - { - delete [] pMesh->mNormals; - pMesh->mNormals = new aiVector3D[pMesh->mNumVertices]; - for( unsigned int a = 0; a < pMesh->mNumVertices; a++) { - pMesh->mNormals[a] = uniqueVertices[a].normal; - } - } - // Tangents, if present - if (pMesh->mTangents) - { - delete [] pMesh->mTangents; - pMesh->mTangents = new aiVector3D[pMesh->mNumVertices]; - for (unsigned int a = 0; a < pMesh->mNumVertices; a++) { - pMesh->mTangents[a] = uniqueVertices[a].tangent; - } - } - // Bitangents as well - if (pMesh->mBitangents) - { - delete [] pMesh->mBitangents; - pMesh->mBitangents = new aiVector3D[pMesh->mNumVertices]; - for (unsigned int a = 0; a < pMesh->mNumVertices; a++) { - pMesh->mBitangents[a] = uniqueVertices[a].bitangent; - } - } - // Vertex colors - for (unsigned int a = 0; pMesh->HasVertexColors(a); a++) - { - delete [] pMesh->mColors[a]; - pMesh->mColors[a] = new aiColor4D[pMesh->mNumVertices]; - for( unsigned int b = 0; b < pMesh->mNumVertices; b++) { - pMesh->mColors[a][b] = uniqueVertices[b].colors[a]; - } - } - // Texture coords - for (unsigned int a = 0; pMesh->HasTextureCoords(a); a++) - { - delete [] pMesh->mTextureCoords[a]; - pMesh->mTextureCoords[a] = new aiVector3D[pMesh->mNumVertices]; - for (unsigned int b = 0; b < pMesh->mNumVertices; b++) { - pMesh->mTextureCoords[a][b] = uniqueVertices[b].texcoords[a]; - } - } -} -} // namespace - -// ------------------------------------------------------------------------------------------------ -// Unites identical vertices in the given mesh -int JoinVerticesProcess::ProcessMesh( aiMesh* pMesh, unsigned int meshIndex) -{ - static_assert( AI_MAX_NUMBER_OF_COLOR_SETS == 8, "AI_MAX_NUMBER_OF_COLOR_SETS == 8"); - static_assert( AI_MAX_NUMBER_OF_TEXTURECOORDS == 8, "AI_MAX_NUMBER_OF_TEXTURECOORDS == 8"); - - // Return early if we don't have any positions - if (!pMesh->HasPositions() || !pMesh->HasFaces()) { - return 0; - } - - // We should care only about used vertices, not all of them - // (this can happen due to original file vertices buffer being used by - // multiple meshes) - std::unordered_set<unsigned int> usedVertexIndices; - usedVertexIndices.reserve(pMesh->mNumVertices); - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) - { - aiFace& face = pMesh->mFaces[a]; - for( unsigned int b = 0; b < face.mNumIndices; b++) { - usedVertexIndices.insert(face.mIndices[b]); - } - } - - // We'll never have more vertices afterwards. - std::vector<Vertex> uniqueVertices; - uniqueVertices.reserve( pMesh->mNumVertices); - - // For each vertex the index of the vertex it was replaced by. - // Since the maximal number of vertices is 2^31-1, the most significand bit can be used to mark - // whether a new vertex was created for the index (true) or if it was replaced by an existing - // unique vertex (false). This saves an additional std::vector<bool> and greatly enhances - // branching performance. - static_assert(AI_MAX_VERTICES == 0x7fffffff, "AI_MAX_VERTICES == 0x7fffffff"); - std::vector<unsigned int> replaceIndex( pMesh->mNumVertices, 0xffffffff); - - // float posEpsilonSqr; - SpatialSort* vertexFinder = NULL; - SpatialSort _vertexFinder; - - typedef std::pair<SpatialSort,float> SpatPair; - if (shared) { - std::vector<SpatPair >* avf; - shared->GetProperty(AI_SPP_SPATIAL_SORT,avf); - if (avf) { - SpatPair& blubb = (*avf)[meshIndex]; - vertexFinder = &blubb.first; - // posEpsilonSqr = blubb.second; - } - } - if (!vertexFinder) { - // bad, need to compute it. - _vertexFinder.Fill(pMesh->mVertices, pMesh->mNumVertices, sizeof( aiVector3D)); - vertexFinder = &_vertexFinder; - // posEpsilonSqr = ComputePositionEpsilon(pMesh); - } - - // Again, better waste some bytes than a realloc ... - std::vector<unsigned int> verticesFound; - verticesFound.reserve(10); - - // Run an optimized code path if we don't have multiple UVs or vertex colors. - // This should yield false in more than 99% of all imports ... - const bool complex = ( pMesh->GetNumColorChannels() > 0 || pMesh->GetNumUVChannels() > 1); - const bool hasAnimMeshes = pMesh->mNumAnimMeshes > 0; - - // We'll never have more vertices afterwards. - std::vector<std::vector<Vertex>> uniqueAnimatedVertices; - if (hasAnimMeshes) { - uniqueAnimatedVertices.resize(pMesh->mNumAnimMeshes); - for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) { - uniqueAnimatedVertices[animMeshIndex].reserve(pMesh->mNumVertices); - } - } - - // Now check each vertex if it brings something new to the table - for( unsigned int a = 0; a < pMesh->mNumVertices; a++) { - if (usedVertexIndices.find(a) == usedVertexIndices.end()) { - continue; - } - - // collect the vertex data - Vertex v(pMesh,a); - - // collect all vertices that are close enough to the given position - vertexFinder->FindIdenticalPositions( v.position, verticesFound); - unsigned int matchIndex = 0xffffffff; - - // check all unique vertices close to the position if this vertex is already present among them - for( unsigned int b = 0; b < verticesFound.size(); b++) { - const unsigned int vidx = verticesFound[b]; - const unsigned int uidx = replaceIndex[ vidx]; - if( uidx & 0x80000000) - continue; - - const Vertex& uv = uniqueVertices[ uidx]; - - if (!areVerticesEqual(v, uv, complex)) { - continue; - } - - if (hasAnimMeshes) { - // If given vertex is animated, then it has to be preserver 1 to 1 (base mesh and animated mesh require same topology) - // NOTE: not doing this totaly breaks anim meshes as they don't have their own faces (they use pMesh->mFaces) - bool breaksAnimMesh = false; - for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) { - const Vertex& animatedUV = uniqueAnimatedVertices[animMeshIndex][ uidx]; - Vertex aniMeshVertex(pMesh->mAnimMeshes[animMeshIndex], a); - if (!areVerticesEqual(aniMeshVertex, animatedUV, complex)) { - breaksAnimMesh = true; - break; - } - } - if (breaksAnimMesh) { - continue; - } - } - - // we're still here -> this vertex perfectly matches our given vertex - matchIndex = uidx; - break; - } - - // found a replacement vertex among the uniques? - if( matchIndex != 0xffffffff) - { - // store where to found the matching unique vertex - replaceIndex[a] = matchIndex | 0x80000000; - } - else - { - // no unique vertex matches it up to now -> so add it - replaceIndex[a] = (unsigned int)uniqueVertices.size(); - uniqueVertices.push_back( v); - if (hasAnimMeshes) { - for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) { - Vertex aniMeshVertex(pMesh->mAnimMeshes[animMeshIndex], a); - uniqueAnimatedVertices[animMeshIndex].push_back(aniMeshVertex); - } - } - } - } - - if (!DefaultLogger::isNullLogger() && DefaultLogger::get()->getLogSeverity() == Logger::VERBOSE) { - ASSIMP_LOG_DEBUG_F( - "Mesh ",meshIndex, - " (", - (pMesh->mName.length ? pMesh->mName.data : "unnamed"), - ") | Verts in: ",pMesh->mNumVertices, - " out: ", - uniqueVertices.size(), - " | ~", - ((pMesh->mNumVertices - uniqueVertices.size()) / (float)pMesh->mNumVertices) * 100.f, - "%" - ); - } - - updateXMeshVertices(pMesh, uniqueVertices); - if (hasAnimMeshes) { - for (unsigned int animMeshIndex = 0; animMeshIndex < pMesh->mNumAnimMeshes; animMeshIndex++) { - updateXMeshVertices(pMesh->mAnimMeshes[animMeshIndex], uniqueAnimatedVertices[animMeshIndex]); - } - } - - // adjust the indices in all faces - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) - { - aiFace& face = pMesh->mFaces[a]; - for( unsigned int b = 0; b < face.mNumIndices; b++) { - face.mIndices[b] = replaceIndex[face.mIndices[b]] & ~0x80000000; - } - } - - // adjust bone vertex weights. - for( int a = 0; a < (int)pMesh->mNumBones; a++) { - aiBone* bone = pMesh->mBones[a]; - std::vector<aiVertexWeight> newWeights; - newWeights.reserve( bone->mNumWeights); - - if ( NULL != bone->mWeights ) { - for ( unsigned int b = 0; b < bone->mNumWeights; b++ ) { - const aiVertexWeight& ow = bone->mWeights[ b ]; - // if the vertex is a unique one, translate it - if ( !( replaceIndex[ ow.mVertexId ] & 0x80000000 ) ) { - aiVertexWeight nw; - nw.mVertexId = replaceIndex[ ow.mVertexId ]; - nw.mWeight = ow.mWeight; - newWeights.push_back( nw ); - } - } - } else { - ASSIMP_LOG_ERROR( "X-Export: aiBone shall contain weights, but pointer to them is NULL." ); - } - - if (newWeights.size() > 0) { - // kill the old and replace them with the translated weights - delete [] bone->mWeights; - bone->mNumWeights = (unsigned int)newWeights.size(); - - bone->mWeights = new aiVertexWeight[bone->mNumWeights]; - memcpy( bone->mWeights, &newWeights[0], bone->mNumWeights * sizeof( aiVertexWeight)); - } - } - return pMesh->mNumVertices; -} - -#endif // !! ASSIMP_BUILD_NO_JOINVERTICES_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.h b/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.h deleted file mode 100644 index e017ae62db..0000000000 --- a/thirdparty/assimp/code/PostProcessing/JoinVerticesProcess.h +++ /dev/null @@ -1,95 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to join identical vertices - on all imported meshes.*/ -#ifndef AI_JOINVERTICESPROCESS_H_INC -#define AI_JOINVERTICESPROCESS_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/types.h> - -struct aiMesh; - -namespace Assimp -{ - -// --------------------------------------------------------------------------- -/** The JoinVerticesProcess unites identical vertices in all imported meshes. - * By default the importer returns meshes where each face addressed its own - * set of vertices even if that means that identical vertices are stored multiple - * times. The JoinVerticesProcess finds these identical vertices and - * erases all but one of the copies. This usually reduces the number of vertices - * in a mesh by a serious amount and is the standard form to render a mesh. - */ -class ASSIMP_API JoinVerticesProcess : public BaseProcess { -public: - JoinVerticesProcess(); - ~JoinVerticesProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Unites identical vertices in the given mesh. - * @param pMesh The mesh to process. - * @param meshIndex Index of the mesh to process - */ - int ProcessMesh( aiMesh* pMesh, unsigned int meshIndex); -}; - -} // end of namespace Assimp - -#endif // AI_CALCTANGENTSPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.cpp b/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.cpp deleted file mode 100644 index d560f19287..0000000000 --- a/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.cpp +++ /dev/null @@ -1,201 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** Implementation of the LimitBoneWeightsProcess post processing step */ - - -#include "LimitBoneWeightsProcess.h" -#include <assimp/StringUtils.h> -#include <assimp/postprocess.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> -#include <stdio.h> - -using namespace Assimp; - - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -LimitBoneWeightsProcess::LimitBoneWeightsProcess() -{ - mMaxWeights = AI_LMW_MAX_WEIGHTS; -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -LimitBoneWeightsProcess::~LimitBoneWeightsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool LimitBoneWeightsProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_LimitBoneWeights) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void LimitBoneWeightsProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("LimitBoneWeightsProcess begin"); - for (unsigned int a = 0; a < pScene->mNumMeshes; ++a ) { - ProcessMesh(pScene->mMeshes[a]); - } - - ASSIMP_LOG_DEBUG("LimitBoneWeightsProcess end"); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void LimitBoneWeightsProcess::SetupProperties(const Importer* pImp) -{ - // get the current value of the property - this->mMaxWeights = pImp->GetPropertyInteger(AI_CONFIG_PP_LBW_MAX_WEIGHTS,AI_LMW_MAX_WEIGHTS); -} - -// ------------------------------------------------------------------------------------------------ -// Unites identical vertices in the given mesh -void LimitBoneWeightsProcess::ProcessMesh( aiMesh* pMesh) -{ - if( !pMesh->HasBones()) - return; - - // collect all bone weights per vertex - typedef std::vector< std::vector< Weight > > WeightsPerVertex; - WeightsPerVertex vertexWeights( pMesh->mNumVertices); - - // collect all weights per vertex - for( unsigned int a = 0; a < pMesh->mNumBones; a++) - { - const aiBone* bone = pMesh->mBones[a]; - for( unsigned int b = 0; b < bone->mNumWeights; b++) - { - const aiVertexWeight& w = bone->mWeights[b]; - vertexWeights[w.mVertexId].push_back( Weight( a, w.mWeight)); - } - } - - unsigned int removed = 0, old_bones = pMesh->mNumBones; - - // now cut the weight count if it exceeds the maximum - bool bChanged = false; - for( WeightsPerVertex::iterator vit = vertexWeights.begin(); vit != vertexWeights.end(); ++vit) - { - if( vit->size() <= mMaxWeights) - continue; - - bChanged = true; - - // more than the defined maximum -> first sort by weight in descending order. That's - // why we defined the < operator in such a weird way. - std::sort( vit->begin(), vit->end()); - - // now kill everything beyond the maximum count - unsigned int m = static_cast<unsigned int>(vit->size()); - vit->erase( vit->begin() + mMaxWeights, vit->end()); - removed += static_cast<unsigned int>(m-vit->size()); - - // and renormalize the weights - float sum = 0.0f; - for( std::vector<Weight>::const_iterator it = vit->begin(); it != vit->end(); ++it ) { - sum += it->mWeight; - } - if( 0.0f != sum ) { - const float invSum = 1.0f / sum; - for( std::vector<Weight>::iterator it = vit->begin(); it != vit->end(); ++it ) { - it->mWeight *= invSum; - } - } - } - - if (bChanged) { - // rebuild the vertex weight array for all bones - typedef std::vector< std::vector< aiVertexWeight > > WeightsPerBone; - WeightsPerBone boneWeights( pMesh->mNumBones); - for( unsigned int a = 0; a < vertexWeights.size(); a++) - { - const std::vector<Weight>& vw = vertexWeights[a]; - for( std::vector<Weight>::const_iterator it = vw.begin(); it != vw.end(); ++it) - boneWeights[it->mBone].push_back( aiVertexWeight( a, it->mWeight)); - } - - // and finally copy the vertex weight list over to the mesh's bones - std::vector<bool> abNoNeed(pMesh->mNumBones,false); - bChanged = false; - - for( unsigned int a = 0; a < pMesh->mNumBones; a++) - { - const std::vector<aiVertexWeight>& bw = boneWeights[a]; - aiBone* bone = pMesh->mBones[a]; - - if ( bw.empty() ) - { - abNoNeed[a] = bChanged = true; - continue; - } - - // copy the weight list. should always be less weights than before, so we don't need a new allocation - ai_assert( bw.size() <= bone->mNumWeights); - bone->mNumWeights = static_cast<unsigned int>( bw.size() ); - ::memcpy( bone->mWeights, &bw[0], bw.size() * sizeof( aiVertexWeight)); - } - - if (bChanged) { - // the number of new bones is smaller than before, so we can reuse the old array - aiBone** ppcCur = pMesh->mBones;aiBone** ppcSrc = ppcCur; - - for (std::vector<bool>::const_iterator iter = abNoNeed.begin();iter != abNoNeed.end() ;++iter) { - if (*iter) { - delete *ppcSrc; - --pMesh->mNumBones; - } - else *ppcCur++ = *ppcSrc; - ++ppcSrc; - } - } - - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_INFO_F("Removed ", removed, " weights. Input bones: ", old_bones, ". Output bones: ", pMesh->mNumBones ); - } - } -} diff --git a/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.h b/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.h deleted file mode 100644 index 73c2a68d53..0000000000 --- a/thirdparty/assimp/code/PostProcessing/LimitBoneWeightsProcess.h +++ /dev/null @@ -1,138 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** Defines a post processing step to limit the number of bones affecting a single vertex. */ -#ifndef AI_LIMITBONEWEIGHTSPROCESS_H_INC -#define AI_LIMITBONEWEIGHTSPROCESS_H_INC - -#include "Common/BaseProcess.h" - -// Forward declarations -struct aiMesh; - -class LimitBoneWeightsTest; - -namespace Assimp { - -// NOTE: If you change these limits, don't forget to change the -// corresponding values in all Assimp ports - -// ********************************************************** -// Java: ConfigProperty.java, -// ConfigProperty.DEFAULT_BONE_WEIGHT_LIMIT -// ********************************************************** - -#if (!defined AI_LMW_MAX_WEIGHTS) -# define AI_LMW_MAX_WEIGHTS 0x4 -#endif // !! AI_LMW_MAX_WEIGHTS - -// --------------------------------------------------------------------------- -/** This post processing step limits the number of bones affecting a vertex -* to a certain maximum value. If a vertex is affected by more than that number -* of bones, the bone weight with the least influence on this vertex are removed. -* The other weights on this bone are then renormalized to assure the sum weight -* to be 1. -*/ -class ASSIMP_API LimitBoneWeightsProcess : public BaseProcess { -public: - LimitBoneWeightsProcess(); - ~LimitBoneWeightsProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. - * A bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** Limits the bone weight count for all vertices in the given mesh. - * @param pMesh The mesh to process. - */ - void ProcessMesh( aiMesh* pMesh); - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Describes a bone weight on a vertex */ - struct Weight { - unsigned int mBone; ///< Index of the bone - float mWeight; ///< Weight of that bone on this vertex - Weight() AI_NO_EXCEPT - : mBone(0) - , mWeight(0.0f) { - // empty - } - - Weight( unsigned int pBone, float pWeight) - : mBone(pBone) - , mWeight(pWeight) { - // empty - } - - /** Comparison operator to sort bone weights by descending weight */ - bool operator < (const Weight& pWeight) const { - return mWeight > pWeight.mWeight; - } - }; - - /** Maximum number of bones influencing any single vertex. */ - unsigned int mMaxWeights; -}; - -} // end of namespace Assimp - -#endif // AI_LIMITBONEWEIGHTSPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.cpp b/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.cpp deleted file mode 100644 index 41f50a5ba5..0000000000 --- a/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.cpp +++ /dev/null @@ -1,255 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file Implementation of the post processing step "MakeVerboseFormat" -*/ - - -#include "MakeVerboseFormat.h" -#include <assimp/scene.h> -#include <assimp/DefaultLogger.hpp> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -MakeVerboseFormatProcess::MakeVerboseFormatProcess() -{ - // nothing to do here -} -// ------------------------------------------------------------------------------------------------ -MakeVerboseFormatProcess::~MakeVerboseFormatProcess() -{ - // nothing to do here -} -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void MakeVerboseFormatProcess::Execute( aiScene* pScene) -{ - ai_assert(NULL != pScene); - ASSIMP_LOG_DEBUG("MakeVerboseFormatProcess begin"); - - bool bHas = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) - { - if( MakeVerboseFormat( pScene->mMeshes[a])) - bHas = true; - } - if (bHas) { - ASSIMP_LOG_INFO("MakeVerboseFormatProcess finished. There was much work to do ..."); - } else { - ASSIMP_LOG_DEBUG("MakeVerboseFormatProcess. There was nothing to do."); - } - - pScene->mFlags &= ~AI_SCENE_FLAGS_NON_VERBOSE_FORMAT; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -bool MakeVerboseFormatProcess::MakeVerboseFormat(aiMesh* pcMesh) -{ - ai_assert(NULL != pcMesh); - - unsigned int iOldNumVertices = pcMesh->mNumVertices; - const unsigned int iNumVerts = pcMesh->mNumFaces*3; - - aiVector3D* pvPositions = new aiVector3D[ iNumVerts ]; - - aiVector3D* pvNormals = NULL; - if (pcMesh->HasNormals()) - { - pvNormals = new aiVector3D[iNumVerts]; - } - aiVector3D* pvTangents = NULL, *pvBitangents = NULL; - if (pcMesh->HasTangentsAndBitangents()) - { - pvTangents = new aiVector3D[iNumVerts]; - pvBitangents = new aiVector3D[iNumVerts]; - } - - aiVector3D* apvTextureCoords[AI_MAX_NUMBER_OF_TEXTURECOORDS] = {0}; - aiColor4D* apvColorSets[AI_MAX_NUMBER_OF_COLOR_SETS] = {0}; - - unsigned int p = 0; - while (pcMesh->HasTextureCoords(p)) - apvTextureCoords[p++] = new aiVector3D[iNumVerts]; - - p = 0; - while (pcMesh->HasVertexColors(p)) - apvColorSets[p++] = new aiColor4D[iNumVerts]; - - // allocate enough memory to hold output bones and vertex weights ... - std::vector<aiVertexWeight>* newWeights = new std::vector<aiVertexWeight>[pcMesh->mNumBones]; - for (unsigned int i = 0;i < pcMesh->mNumBones;++i) { - newWeights[i].reserve(pcMesh->mBones[i]->mNumWeights*3); - } - - // iterate through all faces and build a clean list - unsigned int iIndex = 0; - for (unsigned int a = 0; a< pcMesh->mNumFaces;++a) - { - aiFace* pcFace = &pcMesh->mFaces[a]; - for (unsigned int q = 0; q < pcFace->mNumIndices;++q,++iIndex) - { - // need to build a clean list of bones, too - for (unsigned int i = 0;i < pcMesh->mNumBones;++i) - { - for (unsigned int a = 0; a < pcMesh->mBones[i]->mNumWeights;a++) - { - const aiVertexWeight& w = pcMesh->mBones[i]->mWeights[a]; - if(pcFace->mIndices[q] == w.mVertexId) - { - aiVertexWeight wNew; - wNew.mVertexId = iIndex; - wNew.mWeight = w.mWeight; - newWeights[i].push_back(wNew); - } - } - } - - pvPositions[iIndex] = pcMesh->mVertices[pcFace->mIndices[q]]; - - if (pcMesh->HasNormals()) - { - pvNormals[iIndex] = pcMesh->mNormals[pcFace->mIndices[q]]; - } - if (pcMesh->HasTangentsAndBitangents()) - { - pvTangents[iIndex] = pcMesh->mTangents[pcFace->mIndices[q]]; - pvBitangents[iIndex] = pcMesh->mBitangents[pcFace->mIndices[q]]; - } - - unsigned int p = 0; - while (pcMesh->HasTextureCoords(p)) - { - apvTextureCoords[p][iIndex] = pcMesh->mTextureCoords[p][pcFace->mIndices[q]]; - ++p; - } - p = 0; - while (pcMesh->HasVertexColors(p)) - { - apvColorSets[p][iIndex] = pcMesh->mColors[p][pcFace->mIndices[q]]; - ++p; - } - pcFace->mIndices[q] = iIndex; - } - } - - - - // build output vertex weights - for (unsigned int i = 0;i < pcMesh->mNumBones;++i) - { - delete [] pcMesh->mBones[i]->mWeights; - if (!newWeights[i].empty()) { - pcMesh->mBones[i]->mWeights = new aiVertexWeight[newWeights[i].size()]; - aiVertexWeight *weightToCopy = &( newWeights[i][0] ); - memcpy(pcMesh->mBones[i]->mWeights, weightToCopy, - sizeof(aiVertexWeight) * newWeights[i].size()); - } else { - pcMesh->mBones[i]->mWeights = NULL; - } - } - delete[] newWeights; - - // delete the old members - delete[] pcMesh->mVertices; - pcMesh->mVertices = pvPositions; - - p = 0; - while (pcMesh->HasTextureCoords(p)) - { - delete[] pcMesh->mTextureCoords[p]; - pcMesh->mTextureCoords[p] = apvTextureCoords[p]; - ++p; - } - p = 0; - while (pcMesh->HasVertexColors(p)) - { - delete[] pcMesh->mColors[p]; - pcMesh->mColors[p] = apvColorSets[p]; - ++p; - } - pcMesh->mNumVertices = iNumVerts; - - if (pcMesh->HasNormals()) - { - delete[] pcMesh->mNormals; - pcMesh->mNormals = pvNormals; - } - if (pcMesh->HasTangentsAndBitangents()) - { - delete[] pcMesh->mTangents; - pcMesh->mTangents = pvTangents; - delete[] pcMesh->mBitangents; - pcMesh->mBitangents = pvBitangents; - } - return (pcMesh->mNumVertices != iOldNumVertices); -} - - -// ------------------------------------------------------------------------------------------------ -bool IsMeshInVerboseFormat(const aiMesh* mesh) { - // avoid slow vector<bool> specialization - std::vector<unsigned int> seen(mesh->mNumVertices,0); - for(unsigned int i = 0; i < mesh->mNumFaces; ++i) { - const aiFace& f = mesh->mFaces[i]; - for(unsigned int j = 0; j < f.mNumIndices; ++j) { - if(++seen[f.mIndices[j]] == 2) { - // found a duplicate index - return false; - } - } - } - - return true; -} - -// ------------------------------------------------------------------------------------------------ -bool MakeVerboseFormatProcess::IsVerboseFormat(const aiScene* pScene) { - for(unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - if(!IsMeshInVerboseFormat(pScene->mMeshes[i])) { - return false; - } - } - - return true; -} diff --git a/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.h b/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.h deleted file mode 100644 index 8565d5933a..0000000000 --- a/thirdparty/assimp/code/PostProcessing/MakeVerboseFormat.h +++ /dev/null @@ -1,113 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to bring a given scene - into the verbose format that is expected by most postprocess steps. - This is the inverse of the "JoinIdenticalVertices" step. */ -#ifndef AI_MAKEVERBOSEFORMAT_H_INC -#define AI_MAKEVERBOSEFORMAT_H_INC - -#include "Common/BaseProcess.h" - -struct aiMesh; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** MakeVerboseFormatProcess: Class to convert an asset to the verbose - * format which is expected by most postprocess steps. - * - * This is the inverse of what the "JoinIdenticalVertices" step is doing. - * This step has no official flag (since it wouldn't make sense to run it - * during import). It is intended for applications intending to modify the - * returned aiScene. After this step has been executed, they can execute - * other postprocess steps on the data. The code might also be useful to - * quickly adapt code that doesn't result in a verbose representation of - * the scene data. - * The step has been added because it was required by the viewer, however - * it has been moved to the main library since others might find it - * useful, too. */ -class ASSIMP_API_WINONLY MakeVerboseFormatProcess : public BaseProcess -{ -public: - - - MakeVerboseFormatProcess(); - ~MakeVerboseFormatProcess(); - -public: - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not */ - bool IsActive( unsigned int /*pFlags*/ ) const - { - // NOTE: There is no direct flag that corresponds to - // this postprocess step. - return false; - } - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. */ - void Execute( aiScene* pScene); - -public: - - // ------------------------------------------------------------------- - /** Checks whether the scene is already in verbose format. - * @param pScene The data to check. - * @return true if the scene is already in verbose format. */ - static bool IsVerboseFormat(const aiScene* pScene); - -private: - - //! Apply the postprocess step to a given submesh - bool MakeVerboseFormat (aiMesh* pcMesh); -}; - -} // end of namespace Assimp - -#endif // !!AI_KILLNORMALPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/OptimizeGraph.cpp b/thirdparty/assimp/code/PostProcessing/OptimizeGraph.cpp deleted file mode 100644 index 5db51f58b6..0000000000 --- a/thirdparty/assimp/code/PostProcessing/OptimizeGraph.cpp +++ /dev/null @@ -1,351 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file OptimizeGraph.cpp - * @brief Implementation of the aiProcess_OptimizGraph step - */ - - -#ifndef ASSIMP_BUILD_NO_OPTIMIZEGRAPH_PROCESS - -#include "OptimizeGraph.h" -#include "ProcessHelper.h" -#include <assimp/SceneCombiner.h> -#include <assimp/Exceptional.h> -#include <stdio.h> - -using namespace Assimp; - -#define AI_RESERVED_NODE_NAME "$Reserved_And_Evil" - -/* AI_OG_USE_HASHING enables the use of hashing to speed-up std::set lookups. - * The unhashed variant should be faster, except for *very* large data sets - */ -#ifdef AI_OG_USE_HASHING - // Use our standard hashing function to compute the hash -# define AI_OG_GETKEY(str) SuperFastHash(str.data,str.length) -#else - // Otherwise hope that std::string will utilize a static buffer - // for shorter node names. This would avoid endless heap copying. -# define AI_OG_GETKEY(str) std::string(str.data) -#endif - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -OptimizeGraphProcess::OptimizeGraphProcess() -: mScene() -, nodes_in() -, nodes_out() -, count_merged() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -OptimizeGraphProcess::~OptimizeGraphProcess() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool OptimizeGraphProcess::IsActive( unsigned int pFlags) const { - return (0 != (pFlags & aiProcess_OptimizeGraph)); -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties for the post-processing step -void OptimizeGraphProcess::SetupProperties(const Importer* pImp) { - // Get value of AI_CONFIG_PP_OG_EXCLUDE_LIST - std::string tmp = pImp->GetPropertyString(AI_CONFIG_PP_OG_EXCLUDE_LIST,""); - AddLockedNodeList(tmp); -} - -// ------------------------------------------------------------------------------------------------ -// Collect new children -void OptimizeGraphProcess::CollectNewChildren(aiNode* nd, std::list<aiNode*>& nodes) { - nodes_in += nd->mNumChildren; - - // Process children - std::list<aiNode*> child_nodes; - for (unsigned int i = 0; i < nd->mNumChildren; ++i) { - CollectNewChildren(nd->mChildren[i],child_nodes); - nd->mChildren[i] = nullptr; - } - - // Check whether we need this node; if not we can replace it by our own children (warn, danger of incest). - if (locked.find(AI_OG_GETKEY(nd->mName)) == locked.end() ) { - for (std::list<aiNode*>::iterator it = child_nodes.begin(); it != child_nodes.end();) { - - if (locked.find(AI_OG_GETKEY((*it)->mName)) == locked.end()) { - (*it)->mTransformation = nd->mTransformation * (*it)->mTransformation; - nodes.push_back(*it); - - it = child_nodes.erase(it); - continue; - } - ++it; - } - - if (nd->mNumMeshes || !child_nodes.empty()) { - nodes.push_back(nd); - } else { - delete nd; /* bye, node */ - return; - } - } else { - - // Retain our current position in the hierarchy - nodes.push_back(nd); - - // Now check for possible optimizations in our list of child nodes. join as many as possible - aiNode* join_master = NULL; - aiMatrix4x4 inv; - - const LockedSetType::const_iterator end = locked.end(); - - std::list<aiNode*> join; - for (std::list<aiNode*>::iterator it = child_nodes.begin(); it != child_nodes.end();) { - aiNode* child = *it; - if (child->mNumChildren == 0 && locked.find(AI_OG_GETKEY(child->mName)) == end) { - - // There may be no instanced meshes - unsigned int n = 0; - for (; n < child->mNumMeshes;++n) { - if (meshes[child->mMeshes[n]] > 1) { - break; - } - } - if (n == child->mNumMeshes) { - if (!join_master) { - join_master = child; - inv = join_master->mTransformation; - inv.Inverse(); - } else { - child->mTransformation = inv * child->mTransformation ; - - join.push_back(child); - it = child_nodes.erase(it); - continue; - } - } - } - ++it; - } - if (join_master && !join.empty()) { - join_master->mName.length = ::ai_snprintf(join_master->mName.data, MAXLEN, "$MergedNode_%i",count_merged++); - - unsigned int out_meshes = 0; - for (std::list<aiNode*>::iterator it = join.begin(); it != join.end(); ++it) { - out_meshes += (*it)->mNumMeshes; - } - - // copy all mesh references in one array - if (out_meshes) { - unsigned int* meshes = new unsigned int[out_meshes+join_master->mNumMeshes], *tmp = meshes; - for (unsigned int n = 0; n < join_master->mNumMeshes;++n) { - *tmp++ = join_master->mMeshes[n]; - } - - for (std::list<aiNode*>::iterator it = join.begin(); it != join.end(); ++it) { - for (unsigned int n = 0; n < (*it)->mNumMeshes; ++n) { - - *tmp = (*it)->mMeshes[n]; - aiMesh* mesh = mScene->mMeshes[*tmp++]; - - // manually move the mesh into the right coordinate system - const aiMatrix3x3 IT = aiMatrix3x3( (*it)->mTransformation ).Inverse().Transpose(); - for (unsigned int a = 0; a < mesh->mNumVertices; ++a) { - - mesh->mVertices[a] *= (*it)->mTransformation; - - if (mesh->HasNormals()) - mesh->mNormals[a] *= IT; - - if (mesh->HasTangentsAndBitangents()) { - mesh->mTangents[a] *= IT; - mesh->mBitangents[a] *= IT; - } - } - } - delete *it; // bye, node - } - delete[] join_master->mMeshes; - join_master->mMeshes = meshes; - join_master->mNumMeshes += out_meshes; - } - } - } - // reassign children if something changed - if (child_nodes.empty() || child_nodes.size() > nd->mNumChildren) { - - delete[] nd->mChildren; - - if (!child_nodes.empty()) { - nd->mChildren = new aiNode*[child_nodes.size()]; - } - else nd->mChildren = nullptr; - } - - nd->mNumChildren = static_cast<unsigned int>(child_nodes.size()); - - if (nd->mChildren) { - aiNode** tmp = nd->mChildren; - for (std::list<aiNode*>::iterator it = child_nodes.begin(); it != child_nodes.end(); ++it) { - aiNode* node = *tmp++ = *it; - node->mParent = nd; - } - } - - nodes_out += static_cast<unsigned int>(child_nodes.size()); -} - -// ------------------------------------------------------------------------------------------------ -// Execute the post-processing step on the given scene -void OptimizeGraphProcess::Execute( aiScene* pScene) { - ASSIMP_LOG_DEBUG("OptimizeGraphProcess begin"); - nodes_in = nodes_out = count_merged = 0; - mScene = pScene; - - meshes.resize(pScene->mNumMeshes,0); - FindInstancedMeshes(pScene->mRootNode); - - // build a blacklist of identifiers. If the name of a node matches one of these, we won't touch it - locked.clear(); - for (std::list<std::string>::const_iterator it = locked_nodes.begin(); it != locked_nodes.end(); ++it) { -#ifdef AI_OG_USE_HASHING - locked.insert(SuperFastHash((*it).c_str())); -#else - locked.insert(*it); -#endif - } - - for (unsigned int i = 0; i < pScene->mNumAnimations; ++i) { - for (unsigned int a = 0; a < pScene->mAnimations[i]->mNumChannels; ++a) { - aiNodeAnim* anim = pScene->mAnimations[i]->mChannels[a]; - locked.insert(AI_OG_GETKEY(anim->mNodeName)); - } - } - - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - for (unsigned int a = 0; a < pScene->mMeshes[i]->mNumBones; ++a) { - - aiBone* bone = pScene->mMeshes[i]->mBones[a]; - locked.insert(AI_OG_GETKEY(bone->mName)); - - // HACK: Meshes referencing bones may not be transformed; we need to look them. - // The easiest way to do this is to increase their reference counters ... - meshes[i] += 2; - } - } - - for (unsigned int i = 0; i < pScene->mNumCameras; ++i) { - aiCamera* cam = pScene->mCameras[i]; - locked.insert(AI_OG_GETKEY(cam->mName)); - } - - for (unsigned int i = 0; i < pScene->mNumLights; ++i) { - aiLight* lgh = pScene->mLights[i]; - locked.insert(AI_OG_GETKEY(lgh->mName)); - } - - // Insert a dummy master node and make it read-only - aiNode* dummy_root = new aiNode(AI_RESERVED_NODE_NAME); - locked.insert(AI_OG_GETKEY(dummy_root->mName)); - - const aiString prev = pScene->mRootNode->mName; - pScene->mRootNode->mParent = dummy_root; - - dummy_root->mChildren = new aiNode*[dummy_root->mNumChildren = 1]; - dummy_root->mChildren[0] = pScene->mRootNode; - - // Do our recursive processing of scenegraph nodes. For each node collect - // a fully new list of children and allow their children to place themselves - // on the same hierarchy layer as their parents. - std::list<aiNode*> nodes; - CollectNewChildren (dummy_root,nodes); - - ai_assert(nodes.size() == 1); - - if (dummy_root->mNumChildren == 0) { - pScene->mRootNode = NULL; - throw DeadlyImportError("After optimizing the scene graph, no data remains"); - } - - if (dummy_root->mNumChildren > 1) { - pScene->mRootNode = dummy_root; - - // Keep the dummy node but assign the name of the old root node to it - pScene->mRootNode->mName = prev; - } - else { - - // Remove the dummy root node again. - pScene->mRootNode = dummy_root->mChildren[0]; - - dummy_root->mChildren[0] = NULL; - delete dummy_root; - } - - pScene->mRootNode->mParent = NULL; - if (!DefaultLogger::isNullLogger()) { - if ( nodes_in != nodes_out) { - ASSIMP_LOG_INFO_F("OptimizeGraphProcess finished; Input nodes: ", nodes_in, ", Output nodes: ", nodes_out); - } else { - ASSIMP_LOG_DEBUG("OptimizeGraphProcess finished"); - } - } - meshes.clear(); - locked.clear(); -} - -// ------------------------------------------------------------------------------------------------ -// Build a LUT of all instanced meshes -void OptimizeGraphProcess::FindInstancedMeshes (aiNode* pNode) -{ - for (unsigned int i = 0; i < pNode->mNumMeshes;++i) { - ++meshes[pNode->mMeshes[i]]; - } - - for (unsigned int i = 0; i < pNode->mNumChildren; ++i) - FindInstancedMeshes(pNode->mChildren[i]); -} - -#endif // !! ASSIMP_BUILD_NO_OPTIMIZEGRAPH_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/OptimizeGraph.h b/thirdparty/assimp/code/PostProcessing/OptimizeGraph.h deleted file mode 100644 index 82cc5db3fe..0000000000 --- a/thirdparty/assimp/code/PostProcessing/OptimizeGraph.h +++ /dev/null @@ -1,140 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file OptimizeGraph.h - * @brief Declares a post processing step to optimize the scenegraph - */ -#ifndef AI_OPTIMIZEGRAPHPROCESS_H_INC -#define AI_OPTIMIZEGRAPHPROCESS_H_INC - -#include "Common/BaseProcess.h" -#include "PostProcessing/ProcessHelper.h" - -#include <assimp/types.h> - -#include <set> - -// Forward declarations -struct aiMesh; - -class OptimizeGraphProcessTest; - -namespace Assimp { - -// ----------------------------------------------------------------------------- -/** @brief Postprocessing step to optimize the scenegraph - * - * The implementation tries to merge nodes, even if they use different - * transformations. Animations are preserved. - * - * @see aiProcess_OptimizeGraph for a detailed description of the - * algorithm being applied. - */ -class OptimizeGraphProcess : public BaseProcess { -public: - OptimizeGraphProcess(); - ~OptimizeGraphProcess(); - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** @brief Add a list of node names to be locked and not modified. - * @param in List of nodes. See #AI_CONFIG_PP_OG_EXCLUDE_LIST for - * format explanations. - */ - inline void AddLockedNodeList(std::string& in) { - ConvertListToStrings (in,locked_nodes); - } - - // ------------------------------------------------------------------- - /** @brief Add another node to be locked and not modified. - * @param name Name to be locked - */ - inline void AddLockedNode(std::string& name) { - locked_nodes.push_back(name); - } - - // ------------------------------------------------------------------- - /** @brief Remove a node from the list of locked nodes. - * @param name Name to be unlocked - */ - inline void RemoveLockedNode(std::string& name) { - locked_nodes.remove(name); - } - -protected: - void CollectNewChildren(aiNode* nd, std::list<aiNode*>& nodes); - void FindInstancedMeshes (aiNode* pNode); - -private: -#ifdef AI_OG_USE_HASHING - typedef std::set<unsigned int> LockedSetType; -#else - typedef std::set<std::string> LockedSetType; -#endif - - //! Scene we're working with - aiScene* mScene; - - //! List of locked names. Stored is the hash of the name - LockedSetType locked; - - //! List of nodes to be locked in addition to those with animations, lights or cameras assigned. - std::list<std::string> locked_nodes; - - //! Node counters for logging purposes - unsigned int nodes_in,nodes_out, count_merged; - - //! Reference counters for meshes - std::vector<unsigned int> meshes; -}; - -} // end of namespace Assimp - -#endif // AI_OPTIMIZEGRAPHPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.cpp b/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.cpp deleted file mode 100644 index 3f6765f6ca..0000000000 --- a/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.cpp +++ /dev/null @@ -1,256 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file OptimizeMeshes.cpp - * @brief Implementation of the aiProcess_OptimizeMeshes step - */ - - -#ifndef ASSIMP_BUILD_NO_OPTIMIZEMESHES_PROCESS - - -#include "OptimizeMeshes.h" -#include "ProcessHelper.h" -#include <assimp/SceneCombiner.h> -#include <assimp/Exceptional.h> - -using namespace Assimp; - -static const unsigned int NotSet = 0xffffffff; -static const unsigned int DeadBeef = 0xdeadbeef; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -OptimizeMeshesProcess::OptimizeMeshesProcess() - : mScene() - , pts(false) - , max_verts( NotSet ) - , max_faces( NotSet ) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -OptimizeMeshesProcess::~OptimizeMeshesProcess() { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool OptimizeMeshesProcess::IsActive( unsigned int pFlags) const -{ - // Our behaviour needs to be different if the SortByPType or SplitLargeMeshes - // steps are active. Thus we need to query their flags here and store the - // information, although we're breaking const-correctness. - // That's a serious design flaw, consider redesign. - if( 0 != (pFlags & aiProcess_OptimizeMeshes) ) { - pts = (0 != (pFlags & aiProcess_SortByPType)); - max_verts = ( 0 != ( pFlags & aiProcess_SplitLargeMeshes ) ) ? DeadBeef : max_verts; - return true; - } - return false; -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties for the post-processing step -void OptimizeMeshesProcess::SetupProperties(const Importer* pImp) -{ - if( max_verts == DeadBeef /* magic hack */ ) { - max_faces = pImp->GetPropertyInteger(AI_CONFIG_PP_SLM_TRIANGLE_LIMIT,AI_SLM_DEFAULT_MAX_TRIANGLES); - max_verts = pImp->GetPropertyInteger(AI_CONFIG_PP_SLM_VERTEX_LIMIT,AI_SLM_DEFAULT_MAX_VERTICES); - } -} - -// ------------------------------------------------------------------------------------------------ -// Execute step -void OptimizeMeshesProcess::Execute( aiScene* pScene) -{ - const unsigned int num_old = pScene->mNumMeshes; - if (num_old <= 1) { - ASSIMP_LOG_DEBUG("Skipping OptimizeMeshesProcess"); - return; - } - - ASSIMP_LOG_DEBUG("OptimizeMeshesProcess begin"); - mScene = pScene; - - // need to clear persistent members from previous runs - merge_list.resize( 0 ); - output.resize( 0 ); - - // ensure we have the right sizes - merge_list.reserve(pScene->mNumMeshes); - output.reserve(pScene->mNumMeshes); - - // Prepare lookup tables - meshes.resize(pScene->mNumMeshes); - FindInstancedMeshes(pScene->mRootNode); - if( max_verts == DeadBeef ) /* undo the magic hack */ - max_verts = NotSet; - - // ... instanced meshes are immediately processed and added to the output list - for (unsigned int i = 0, n = 0; i < pScene->mNumMeshes;++i) { - meshes[i].vertex_format = GetMeshVFormatUnique(pScene->mMeshes[i]); - - if (meshes[i].instance_cnt > 1 && meshes[i].output_id == NotSet ) { - meshes[i].output_id = n++; - output.push_back(mScene->mMeshes[i]); - } - } - - // and process all nodes in the scenegraph recursively - ProcessNode(pScene->mRootNode); - if (!output.size()) { - throw DeadlyImportError("OptimizeMeshes: No meshes remaining; there's definitely something wrong"); - } - - meshes.resize( 0 ); - ai_assert(output.size() <= num_old); - - mScene->mNumMeshes = static_cast<unsigned int>(output.size()); - std::copy(output.begin(),output.end(),mScene->mMeshes); - - if (output.size() != num_old) { - ASSIMP_LOG_DEBUG_F("OptimizeMeshesProcess finished. Input meshes: ", num_old, ", Output meshes: ", pScene->mNumMeshes); - } else { - ASSIMP_LOG_DEBUG( "OptimizeMeshesProcess finished" ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Process meshes for a single node -void OptimizeMeshesProcess::ProcessNode( aiNode* pNode) -{ - for (unsigned int i = 0; i < pNode->mNumMeshes;++i) { - unsigned int& im = pNode->mMeshes[i]; - - if (meshes[im].instance_cnt > 1) { - im = meshes[im].output_id; - } - else { - merge_list.resize( 0 ); - unsigned int verts = 0, faces = 0; - - // Find meshes to merge with us - for (unsigned int a = i+1; a < pNode->mNumMeshes;++a) { - unsigned int am = pNode->mMeshes[a]; - if (meshes[am].instance_cnt == 1 && CanJoin(im,am,verts,faces)) { - - merge_list.push_back(mScene->mMeshes[am]); - verts += mScene->mMeshes[am]->mNumVertices; - faces += mScene->mMeshes[am]->mNumFaces; - - pNode->mMeshes[a] = pNode->mMeshes[pNode->mNumMeshes - 1]; - --pNode->mNumMeshes; - --a; - } - } - - // and merge all meshes which we found, replace the old ones - if (!merge_list.empty()) { - merge_list.push_back(mScene->mMeshes[im]); - - aiMesh* out; - SceneCombiner::MergeMeshes(&out,0,merge_list.begin(),merge_list.end()); - output.push_back(out); - } else { - output.push_back(mScene->mMeshes[im]); - } - im = static_cast<unsigned int>(output.size()-1); - } - } - - - for( unsigned int i = 0; i < pNode->mNumChildren; ++i ) { - ProcessNode( pNode->mChildren[ i ] ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Check whether two meshes can be joined -bool OptimizeMeshesProcess::CanJoin ( unsigned int a, unsigned int b, unsigned int verts, unsigned int faces ) -{ - if (meshes[a].vertex_format != meshes[b].vertex_format) - return false; - - aiMesh* ma = mScene->mMeshes[a], *mb = mScene->mMeshes[b]; - - if ((NotSet != max_verts && verts+mb->mNumVertices > max_verts) || - (NotSet != max_faces && faces+mb->mNumFaces > max_faces)) { - return false; - } - - // Never merge unskinned meshes with skinned meshes - if (ma->mMaterialIndex != mb->mMaterialIndex || ma->HasBones() != mb->HasBones()) - return false; - - // Never merge meshes with different kinds of primitives if SortByPType did already - // do its work. We would destroy everything again ... - if (pts && ma->mPrimitiveTypes != mb->mPrimitiveTypes) - return false; - - // If both meshes are skinned, check whether we have many bones defined in both meshes. - // If yes, we can join them. - if (ma->HasBones()) { - // TODO - return false; - } - return true; -} - -// ------------------------------------------------------------------------------------------------ -// Build a LUT of all instanced meshes -void OptimizeMeshesProcess::FindInstancedMeshes (aiNode* pNode) -{ - for( unsigned int i = 0; i < pNode->mNumMeshes; ++i ) { - ++meshes[ pNode->mMeshes[ i ] ].instance_cnt; - } - - for( unsigned int i = 0; i < pNode->mNumChildren; ++i ) { - FindInstancedMeshes( pNode->mChildren[ i ] ); - } -} - -// ------------------------------------------------------------------------------------------------ - -#endif // !! ASSIMP_BUILD_NO_OPTIMIZEMESHES_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.h b/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.h deleted file mode 100644 index dec4ab52de..0000000000 --- a/thirdparty/assimp/code/PostProcessing/OptimizeMeshes.h +++ /dev/null @@ -1,186 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file OptimizeMeshes.h - * @brief Declares a post processing step to join meshes, if possible - */ -#ifndef AI_OPTIMIZEMESHESPROCESS_H_INC -#define AI_OPTIMIZEMESHESPROCESS_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/types.h> - -#include <vector> - -struct aiMesh; -struct aiNode; -class OptimizeMeshesProcessTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** @brief Postprocessing step to optimize mesh usage - * - * The implementation looks for meshes that could be joined and joins them. - * Usually this will reduce the number of drawcalls. - * - * @note Instanced meshes are currently not processed. - */ -class OptimizeMeshesProcess : public BaseProcess { -public: - /// @brief The class constructor. - OptimizeMeshesProcess(); - - /// @brief The class destructor. - ~OptimizeMeshesProcess(); - - /** @brief Internal utility to store additional mesh info - */ - struct MeshInfo { - MeshInfo() AI_NO_EXCEPT - : instance_cnt(0) - , vertex_format(0) - , output_id(0xffffffff) { - // empty - } - - //! Number of times this mesh is referenced - unsigned int instance_cnt; - - //! Vertex format id - unsigned int vertex_format; - - //! Output ID - unsigned int output_id; - }; - -public: - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - void SetupProperties(const Importer* pImp); - - - // ------------------------------------------------------------------- - /** @brief Specify whether you want meshes with different - * primitive types to be merged as well. - * - * IsActive() sets this property automatically to true if the - * aiProcess_SortByPType flag is found. - */ - void EnablePrimitiveTypeSorting(bool enable) { - pts = enable; - } - - // Getter - bool IsPrimitiveTypeSortingEnabled () const { - return pts; - } - - - // ------------------------------------------------------------------- - /** @brief Specify a maximum size of a single output mesh. - * - * If a single input mesh already exceeds this limit, it won't - * be split. - * @param verts Maximum number of vertices per mesh - * @param faces Maximum number of faces per mesh - */ - void SetPreferredMeshSizeLimit (unsigned int verts, unsigned int faces) - { - max_verts = verts; - max_faces = faces; - } - - -protected: - - // ------------------------------------------------------------------- - /** @brief Do the actual optimization on all meshes of this node - * @param pNode Node we're working with - */ - void ProcessNode( aiNode* pNode); - - // ------------------------------------------------------------------- - /** @brief Returns true if b can be joined with a - * - * @param verts Number of output verts up to now - * @param faces Number of output faces up to now - */ - bool CanJoin ( unsigned int a, unsigned int b, - unsigned int verts, unsigned int faces ); - - // ------------------------------------------------------------------- - /** @brief Find instanced meshes, for the moment we're excluding - * them from all optimizations - */ - void FindInstancedMeshes (aiNode* pNode); - -private: - - //! Scene we're working with - aiScene* mScene; - - //! Per mesh info - std::vector<MeshInfo> meshes; - - //! Output meshes - std::vector<aiMesh*> output; - - //! @see EnablePrimitiveTypeSorting - mutable bool pts; - - //! @see SetPreferredMeshSizeLimit - mutable unsigned int max_verts,max_faces; - - //! Temporary storage - std::vector<aiMesh*> merge_list; -}; - -} // end of namespace Assimp - -#endif // AI_CALCTANGENTSPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/PretransformVertices.cpp b/thirdparty/assimp/code/PostProcessing/PretransformVertices.cpp deleted file mode 100644 index 52001a0578..0000000000 --- a/thirdparty/assimp/code/PostProcessing/PretransformVertices.cpp +++ /dev/null @@ -1,728 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file PretransformVertices.cpp - * @brief Implementation of the "PretransformVertices" post processing step -*/ - - -#include "PretransformVertices.h" -#include "ProcessHelper.h" -#include <assimp/SceneCombiner.h> -#include <assimp/Exceptional.h> - -using namespace Assimp; - -// some array offsets -#define AI_PTVS_VERTEX 0x0 -#define AI_PTVS_FACE 0x1 - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -PretransformVertices::PretransformVertices() -: configKeepHierarchy (false) -, configNormalize(false) -, configTransform(false) -, configTransformation() -, mConfigPointCloud( false ) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -PretransformVertices::~PretransformVertices() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool PretransformVertices::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_PreTransformVertices) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Setup import configuration -void PretransformVertices::SetupProperties(const Importer* pImp) -{ - // Get the current value of AI_CONFIG_PP_PTV_KEEP_HIERARCHY, AI_CONFIG_PP_PTV_NORMALIZE, - // AI_CONFIG_PP_PTV_ADD_ROOT_TRANSFORMATION and AI_CONFIG_PP_PTV_ROOT_TRANSFORMATION - configKeepHierarchy = (0 != pImp->GetPropertyInteger(AI_CONFIG_PP_PTV_KEEP_HIERARCHY,0)); - configNormalize = (0 != pImp->GetPropertyInteger(AI_CONFIG_PP_PTV_NORMALIZE,0)); - configTransform = (0 != pImp->GetPropertyInteger(AI_CONFIG_PP_PTV_ADD_ROOT_TRANSFORMATION,0)); - - configTransformation = pImp->GetPropertyMatrix(AI_CONFIG_PP_PTV_ROOT_TRANSFORMATION, aiMatrix4x4()); - - mConfigPointCloud = pImp->GetPropertyBool(AI_CONFIG_EXPORT_POINT_CLOUDS); -} - -// ------------------------------------------------------------------------------------------------ -// Count the number of nodes -unsigned int PretransformVertices::CountNodes( aiNode* pcNode ) -{ - unsigned int iRet = 1; - for (unsigned int i = 0;i < pcNode->mNumChildren;++i) - { - iRet += CountNodes(pcNode->mChildren[i]); - } - return iRet; -} - -// ------------------------------------------------------------------------------------------------ -// Get a bitwise combination identifying the vertex format of a mesh -unsigned int PretransformVertices::GetMeshVFormat( aiMesh* pcMesh ) -{ - // the vertex format is stored in aiMesh::mBones for later retrieval. - // there isn't a good reason to compute it a few hundred times - // from scratch. The pointer is unused as animations are lost - // during PretransformVertices. - if (pcMesh->mBones) - return (unsigned int)(uint64_t)pcMesh->mBones; - - - const unsigned int iRet = GetMeshVFormatUnique(pcMesh); - - // store the value for later use - pcMesh->mBones = (aiBone**)(uint64_t)iRet; - return iRet; -} - -// ------------------------------------------------------------------------------------------------ -// Count the number of vertices in the whole scene and a given -// material index -void PretransformVertices::CountVerticesAndFaces( aiScene* pcScene, aiNode* pcNode, unsigned int iMat, - unsigned int iVFormat, unsigned int* piFaces, unsigned int* piVertices) -{ - for (unsigned int i = 0; i < pcNode->mNumMeshes;++i) - { - aiMesh* pcMesh = pcScene->mMeshes[ pcNode->mMeshes[i] ]; - if (iMat == pcMesh->mMaterialIndex && iVFormat == GetMeshVFormat(pcMesh)) - { - *piVertices += pcMesh->mNumVertices; - *piFaces += pcMesh->mNumFaces; - } - } - for (unsigned int i = 0;i < pcNode->mNumChildren;++i) - { - CountVerticesAndFaces(pcScene,pcNode->mChildren[i],iMat, - iVFormat,piFaces,piVertices); - } -} - -// ------------------------------------------------------------------------------------------------ -// Collect vertex/face data -void PretransformVertices::CollectData( aiScene* pcScene, aiNode* pcNode, unsigned int iMat, - unsigned int iVFormat, aiMesh* pcMeshOut, - unsigned int aiCurrent[2], unsigned int* num_refs) -{ - // No need to multiply if there's no transformation - const bool identity = pcNode->mTransformation.IsIdentity(); - for (unsigned int i = 0; i < pcNode->mNumMeshes;++i) - { - aiMesh* pcMesh = pcScene->mMeshes[ pcNode->mMeshes[i] ]; - if (iMat == pcMesh->mMaterialIndex && iVFormat == GetMeshVFormat(pcMesh)) - { - // Decrement mesh reference counter - unsigned int& num_ref = num_refs[pcNode->mMeshes[i]]; - ai_assert(0 != num_ref); - --num_ref; - // Save the name of the last mesh - if (num_ref==0) - { - pcMeshOut->mName = pcMesh->mName; - } - - if (identity) { - // copy positions without modifying them - ::memcpy(pcMeshOut->mVertices + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mVertices, - pcMesh->mNumVertices * sizeof(aiVector3D)); - - if (iVFormat & 0x2) { - // copy normals without modifying them - ::memcpy(pcMeshOut->mNormals + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mNormals, - pcMesh->mNumVertices * sizeof(aiVector3D)); - } - if (iVFormat & 0x4) - { - // copy tangents without modifying them - ::memcpy(pcMeshOut->mTangents + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mTangents, - pcMesh->mNumVertices * sizeof(aiVector3D)); - // copy bitangents without modifying them - ::memcpy(pcMeshOut->mBitangents + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mBitangents, - pcMesh->mNumVertices * sizeof(aiVector3D)); - } - } - else - { - // copy positions, transform them to worldspace - for (unsigned int n = 0; n < pcMesh->mNumVertices;++n) { - pcMeshOut->mVertices[aiCurrent[AI_PTVS_VERTEX]+n] = pcNode->mTransformation * pcMesh->mVertices[n]; - } - aiMatrix4x4 mWorldIT = pcNode->mTransformation; - mWorldIT.Inverse().Transpose(); - - // TODO: implement Inverse() for aiMatrix3x3 - aiMatrix3x3 m = aiMatrix3x3(mWorldIT); - - if (iVFormat & 0x2) - { - // copy normals, transform them to worldspace - for (unsigned int n = 0; n < pcMesh->mNumVertices;++n) { - pcMeshOut->mNormals[aiCurrent[AI_PTVS_VERTEX]+n] = - (m * pcMesh->mNormals[n]).Normalize(); - } - } - if (iVFormat & 0x4) - { - // copy tangents and bitangents, transform them to worldspace - for (unsigned int n = 0; n < pcMesh->mNumVertices;++n) { - pcMeshOut->mTangents [aiCurrent[AI_PTVS_VERTEX]+n] = (m * pcMesh->mTangents[n]).Normalize(); - pcMeshOut->mBitangents[aiCurrent[AI_PTVS_VERTEX]+n] = (m * pcMesh->mBitangents[n]).Normalize(); - } - } - } - unsigned int p = 0; - while (iVFormat & (0x100 << p)) - { - // copy texture coordinates - memcpy(pcMeshOut->mTextureCoords[p] + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mTextureCoords[p], - pcMesh->mNumVertices * sizeof(aiVector3D)); - ++p; - } - p = 0; - while (iVFormat & (0x1000000 << p)) - { - // copy vertex colors - memcpy(pcMeshOut->mColors[p] + aiCurrent[AI_PTVS_VERTEX], - pcMesh->mColors[p], - pcMesh->mNumVertices * sizeof(aiColor4D)); - ++p; - } - // now we need to copy all faces. since we will delete the source mesh afterwards, - // we don't need to reallocate the array of indices except if this mesh is - // referenced multiple times. - for (unsigned int planck = 0;planck < pcMesh->mNumFaces;++planck) - { - aiFace& f_src = pcMesh->mFaces[planck]; - aiFace& f_dst = pcMeshOut->mFaces[aiCurrent[AI_PTVS_FACE]+planck]; - - const unsigned int num_idx = f_src.mNumIndices; - - f_dst.mNumIndices = num_idx; - - unsigned int* pi; - if (!num_ref) { /* if last time the mesh is referenced -> no reallocation */ - pi = f_dst.mIndices = f_src.mIndices; - - // offset all vertex indices - for (unsigned int hahn = 0; hahn < num_idx;++hahn){ - pi[hahn] += aiCurrent[AI_PTVS_VERTEX]; - } - } - else { - pi = f_dst.mIndices = new unsigned int[num_idx]; - - // copy and offset all vertex indices - for (unsigned int hahn = 0; hahn < num_idx;++hahn){ - pi[hahn] = f_src.mIndices[hahn] + aiCurrent[AI_PTVS_VERTEX]; - } - } - - // Update the mPrimitiveTypes member of the mesh - switch (pcMesh->mFaces[planck].mNumIndices) - { - case 0x1: - pcMeshOut->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 0x2: - pcMeshOut->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 0x3: - pcMeshOut->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - pcMeshOut->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - break; - }; - } - aiCurrent[AI_PTVS_VERTEX] += pcMesh->mNumVertices; - aiCurrent[AI_PTVS_FACE] += pcMesh->mNumFaces; - } - } - - // append all children of us - for (unsigned int i = 0;i < pcNode->mNumChildren;++i) { - CollectData(pcScene,pcNode->mChildren[i],iMat, - iVFormat,pcMeshOut,aiCurrent,num_refs); - } -} - -// ------------------------------------------------------------------------------------------------ -// Get a list of all vertex formats that occur for a given material index -// The output list contains duplicate elements -void PretransformVertices::GetVFormatList( aiScene* pcScene, unsigned int iMat, - std::list<unsigned int>& aiOut) -{ - for (unsigned int i = 0; i < pcScene->mNumMeshes;++i) - { - aiMesh* pcMesh = pcScene->mMeshes[ i ]; - if (iMat == pcMesh->mMaterialIndex) { - aiOut.push_back(GetMeshVFormat(pcMesh)); - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Compute the absolute transformation matrices of each node -void PretransformVertices::ComputeAbsoluteTransform( aiNode* pcNode ) -{ - if (pcNode->mParent) { - pcNode->mTransformation = pcNode->mParent->mTransformation*pcNode->mTransformation; - } - - for (unsigned int i = 0;i < pcNode->mNumChildren;++i) { - ComputeAbsoluteTransform(pcNode->mChildren[i]); - } -} - -// ------------------------------------------------------------------------------------------------ -// Apply the node transformation to a mesh -void PretransformVertices::ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat) -{ - // Check whether we need to transform the coordinates at all - if (!mat.IsIdentity()) { - - if (mesh->HasPositions()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mVertices[i] = mat * mesh->mVertices[i]; - } - } - if (mesh->HasNormals() || mesh->HasTangentsAndBitangents()) { - aiMatrix4x4 mWorldIT = mat; - mWorldIT.Inverse().Transpose(); - - // TODO: implement Inverse() for aiMatrix3x3 - aiMatrix3x3 m = aiMatrix3x3(mWorldIT); - - if (mesh->HasNormals()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mNormals[i] = (m * mesh->mNormals[i]).Normalize(); - } - } - if (mesh->HasTangentsAndBitangents()) { - for (unsigned int i = 0; i < mesh->mNumVertices; ++i) { - mesh->mTangents[i] = (m * mesh->mTangents[i]).Normalize(); - mesh->mBitangents[i] = (m * mesh->mBitangents[i]).Normalize(); - } - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Simple routine to build meshes in worldspace, no further optimization -void PretransformVertices::BuildWCSMeshes(std::vector<aiMesh*>& out, aiMesh** in, - unsigned int numIn, aiNode* node) -{ - // NOTE: - // aiMesh::mNumBones store original source mesh, or UINT_MAX if not a copy - // aiMesh::mBones store reference to abs. transform we multiplied with - - // process meshes - for (unsigned int i = 0; i < node->mNumMeshes;++i) { - aiMesh* mesh = in[node->mMeshes[i]]; - - // check whether we can operate on this mesh - if (!mesh->mBones || *reinterpret_cast<aiMatrix4x4*>(mesh->mBones) == node->mTransformation) { - // yes, we can. - mesh->mBones = reinterpret_cast<aiBone**> (&node->mTransformation); - mesh->mNumBones = UINT_MAX; - } - else { - - // try to find us in the list of newly created meshes - for (unsigned int n = 0; n < out.size(); ++n) { - aiMesh* ctz = out[n]; - if (ctz->mNumBones == node->mMeshes[i] && *reinterpret_cast<aiMatrix4x4*>(ctz->mBones) == node->mTransformation) { - - // ok, use this one. Update node mesh index - node->mMeshes[i] = numIn + n; - } - } - if (node->mMeshes[i] < numIn) { - // Worst case. Need to operate on a full copy of the mesh - ASSIMP_LOG_INFO("PretransformVertices: Copying mesh due to mismatching transforms"); - aiMesh* ntz; - - const unsigned int tmp = mesh->mNumBones; // - mesh->mNumBones = 0; - SceneCombiner::Copy(&ntz,mesh); - mesh->mNumBones = tmp; - - ntz->mNumBones = node->mMeshes[i]; - ntz->mBones = reinterpret_cast<aiBone**> (&node->mTransformation); - - out.push_back(ntz); - - node->mMeshes[i] = static_cast<unsigned int>(numIn + out.size() - 1); - } - } - } - - // call children - for (unsigned int i = 0; i < node->mNumChildren;++i) - BuildWCSMeshes(out,in,numIn,node->mChildren[i]); -} - -// ------------------------------------------------------------------------------------------------ -// Reset transformation matrices to identity -void PretransformVertices::MakeIdentityTransform(aiNode* nd) -{ - nd->mTransformation = aiMatrix4x4(); - - // call children - for (unsigned int i = 0; i < nd->mNumChildren;++i) - MakeIdentityTransform(nd->mChildren[i]); -} - -// ------------------------------------------------------------------------------------------------ -// Build reference counters for all meshes -void PretransformVertices::BuildMeshRefCountArray(aiNode* nd, unsigned int * refs) -{ - for (unsigned int i = 0; i< nd->mNumMeshes;++i) - refs[nd->mMeshes[i]]++; - - // call children - for (unsigned int i = 0; i < nd->mNumChildren;++i) - BuildMeshRefCountArray(nd->mChildren[i],refs); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void PretransformVertices::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("PretransformVerticesProcess begin"); - - // Return immediately if we have no meshes - if (!pScene->mNumMeshes) - return; - - const unsigned int iOldMeshes = pScene->mNumMeshes; - const unsigned int iOldAnimationChannels = pScene->mNumAnimations; - const unsigned int iOldNodes = CountNodes(pScene->mRootNode); - - if(configTransform) { - pScene->mRootNode->mTransformation = configTransformation; - } - - // first compute absolute transformation matrices for all nodes - ComputeAbsoluteTransform(pScene->mRootNode); - - // Delete aiMesh::mBones for all meshes. The bones are - // removed during this step and we need the pointer as - // temporary storage - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) { - aiMesh* mesh = pScene->mMeshes[i]; - - for (unsigned int a = 0; a < mesh->mNumBones;++a) - delete mesh->mBones[a]; - - delete[] mesh->mBones; - mesh->mBones = NULL; - } - - // now build a list of output meshes - std::vector<aiMesh*> apcOutMeshes; - - // Keep scene hierarchy? It's an easy job in this case ... - // we go on and transform all meshes, if one is referenced by nodes - // with different absolute transformations a depth copy of the mesh - // is required. - if( configKeepHierarchy ) { - - // Hack: store the matrix we're transforming a mesh with in aiMesh::mBones - BuildWCSMeshes(apcOutMeshes,pScene->mMeshes,pScene->mNumMeshes, pScene->mRootNode); - - // ... if new meshes have been generated, append them to the end of the scene - if (apcOutMeshes.size() > 0) { - aiMesh** npp = new aiMesh*[pScene->mNumMeshes + apcOutMeshes.size()]; - - memcpy(npp,pScene->mMeshes,sizeof(aiMesh*)*pScene->mNumMeshes); - memcpy(npp+pScene->mNumMeshes,&apcOutMeshes[0],sizeof(aiMesh*)*apcOutMeshes.size()); - - pScene->mNumMeshes += static_cast<unsigned int>(apcOutMeshes.size()); - delete[] pScene->mMeshes; pScene->mMeshes = npp; - } - - // now iterate through all meshes and transform them to worldspace - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - ApplyTransform(pScene->mMeshes[i],*reinterpret_cast<aiMatrix4x4*>( pScene->mMeshes[i]->mBones )); - - // prevent improper destruction - pScene->mMeshes[i]->mBones = NULL; - pScene->mMeshes[i]->mNumBones = 0; - } - } else { - apcOutMeshes.reserve(pScene->mNumMaterials<<1u); - std::list<unsigned int> aiVFormats; - - std::vector<unsigned int> s(pScene->mNumMeshes,0); - BuildMeshRefCountArray(pScene->mRootNode,&s[0]); - - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) { - // get the list of all vertex formats for this material - aiVFormats.clear(); - GetVFormatList(pScene,i,aiVFormats); - aiVFormats.sort(); - aiVFormats.unique(); - for (std::list<unsigned int>::const_iterator j = aiVFormats.begin();j != aiVFormats.end();++j) { - unsigned int iVertices = 0; - unsigned int iFaces = 0; - CountVerticesAndFaces(pScene,pScene->mRootNode,i,*j,&iFaces,&iVertices); - if (0 != iFaces && 0 != iVertices) - { - apcOutMeshes.push_back(new aiMesh()); - aiMesh* pcMesh = apcOutMeshes.back(); - pcMesh->mNumFaces = iFaces; - pcMesh->mNumVertices = iVertices; - pcMesh->mFaces = new aiFace[iFaces]; - pcMesh->mVertices = new aiVector3D[iVertices]; - pcMesh->mMaterialIndex = i; - if ((*j) & 0x2)pcMesh->mNormals = new aiVector3D[iVertices]; - if ((*j) & 0x4) - { - pcMesh->mTangents = new aiVector3D[iVertices]; - pcMesh->mBitangents = new aiVector3D[iVertices]; - } - iFaces = 0; - while ((*j) & (0x100 << iFaces)) - { - pcMesh->mTextureCoords[iFaces] = new aiVector3D[iVertices]; - if ((*j) & (0x10000 << iFaces))pcMesh->mNumUVComponents[iFaces] = 3; - else pcMesh->mNumUVComponents[iFaces] = 2; - iFaces++; - } - iFaces = 0; - while ((*j) & (0x1000000 << iFaces)) - pcMesh->mColors[iFaces++] = new aiColor4D[iVertices]; - - // fill the mesh ... - unsigned int aiTemp[2] = {0,0}; - CollectData(pScene,pScene->mRootNode,i,*j,pcMesh,aiTemp,&s[0]); - } - } - } - - // If no meshes are referenced in the node graph it is possible that we get no output meshes. - if (apcOutMeshes.empty()) { - - throw DeadlyImportError("No output meshes: all meshes are orphaned and are not referenced by any nodes"); - } - else - { - // now delete all meshes in the scene and build a new mesh list - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) - { - aiMesh* mesh = pScene->mMeshes[i]; - mesh->mNumBones = 0; - mesh->mBones = NULL; - - // we're reusing the face index arrays. avoid destruction - for (unsigned int a = 0; a < mesh->mNumFaces; ++a) { - mesh->mFaces[a].mNumIndices = 0; - mesh->mFaces[a].mIndices = NULL; - } - - delete mesh; - - // Invalidate the contents of the old mesh array. We will most - // likely have less output meshes now, so the last entries of - // the mesh array are not overridden. We set them to NULL to - // make sure the developer gets notified when his application - // attempts to access these fields ... - mesh = NULL; - } - - // It is impossible that we have more output meshes than - // input meshes, so we can easily reuse the old mesh array - pScene->mNumMeshes = (unsigned int)apcOutMeshes.size(); - for (unsigned int i = 0; i < pScene->mNumMeshes;++i) { - pScene->mMeshes[i] = apcOutMeshes[i]; - } - } - } - - // remove all animations from the scene - for (unsigned int i = 0; i < pScene->mNumAnimations;++i) - delete pScene->mAnimations[i]; - delete[] pScene->mAnimations; - - pScene->mAnimations = NULL; - pScene->mNumAnimations = 0; - - // --- we need to keep all cameras and lights - for (unsigned int i = 0; i < pScene->mNumCameras;++i) - { - aiCamera* cam = pScene->mCameras[i]; - const aiNode* nd = pScene->mRootNode->FindNode(cam->mName); - ai_assert(NULL != nd); - - // multiply all properties of the camera with the absolute - // transformation of the corresponding node - cam->mPosition = nd->mTransformation * cam->mPosition; - cam->mLookAt = aiMatrix3x3( nd->mTransformation ) * cam->mLookAt; - cam->mUp = aiMatrix3x3( nd->mTransformation ) * cam->mUp; - } - - for (unsigned int i = 0; i < pScene->mNumLights;++i) - { - aiLight* l = pScene->mLights[i]; - const aiNode* nd = pScene->mRootNode->FindNode(l->mName); - ai_assert(NULL != nd); - - // multiply all properties of the camera with the absolute - // transformation of the corresponding node - l->mPosition = nd->mTransformation * l->mPosition; - l->mDirection = aiMatrix3x3( nd->mTransformation ) * l->mDirection; - l->mUp = aiMatrix3x3( nd->mTransformation ) * l->mUp; - } - - if( !configKeepHierarchy ) { - - // now delete all nodes in the scene and build a new - // flat node graph with a root node and some level 1 children - aiNode* newRoot = new aiNode(); - newRoot->mName = pScene->mRootNode->mName; - delete pScene->mRootNode; - pScene->mRootNode = newRoot; - - if (1 == pScene->mNumMeshes && !pScene->mNumLights && !pScene->mNumCameras) - { - pScene->mRootNode->mNumMeshes = 1; - pScene->mRootNode->mMeshes = new unsigned int[1]; - pScene->mRootNode->mMeshes[0] = 0; - } - else - { - pScene->mRootNode->mNumChildren = pScene->mNumMeshes+pScene->mNumLights+pScene->mNumCameras; - aiNode** nodes = pScene->mRootNode->mChildren = new aiNode*[pScene->mRootNode->mNumChildren]; - - // generate mesh nodes - for (unsigned int i = 0; i < pScene->mNumMeshes;++i,++nodes) - { - aiNode* pcNode = new aiNode(); - *nodes = pcNode; - pcNode->mParent = pScene->mRootNode; - pcNode->mName = pScene->mMeshes[i]->mName; - - // setup mesh indices - pcNode->mNumMeshes = 1; - pcNode->mMeshes = new unsigned int[1]; - pcNode->mMeshes[0] = i; - } - // generate light nodes - for (unsigned int i = 0; i < pScene->mNumLights;++i,++nodes) - { - aiNode* pcNode = new aiNode(); - *nodes = pcNode; - pcNode->mParent = pScene->mRootNode; - pcNode->mName.length = ai_snprintf(pcNode->mName.data, MAXLEN, "light_%u",i); - pScene->mLights[i]->mName = pcNode->mName; - } - // generate camera nodes - for (unsigned int i = 0; i < pScene->mNumCameras;++i,++nodes) - { - aiNode* pcNode = new aiNode(); - *nodes = pcNode; - pcNode->mParent = pScene->mRootNode; - pcNode->mName.length = ::ai_snprintf(pcNode->mName.data,MAXLEN,"cam_%u",i); - pScene->mCameras[i]->mName = pcNode->mName; - } - } - } - else { - // ... and finally set the transformation matrix of all nodes to identity - MakeIdentityTransform(pScene->mRootNode); - } - - if (configNormalize) { - // compute the boundary of all meshes - aiVector3D min,max; - MinMaxChooser<aiVector3D> ()(min,max); - - for (unsigned int a = 0; a < pScene->mNumMeshes; ++a) { - aiMesh* m = pScene->mMeshes[a]; - for (unsigned int i = 0; i < m->mNumVertices;++i) { - min = std::min(m->mVertices[i],min); - max = std::max(m->mVertices[i],max); - } - } - - // find the dominant axis - aiVector3D d = max-min; - const ai_real div = std::max(d.x,std::max(d.y,d.z))*ai_real( 0.5); - - d = min + d * (ai_real)0.5; - for (unsigned int a = 0; a < pScene->mNumMeshes; ++a) { - aiMesh* m = pScene->mMeshes[a]; - for (unsigned int i = 0; i < m->mNumVertices;++i) { - m->mVertices[i] = (m->mVertices[i]-d)/div; - } - } - } - - // print statistics - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_DEBUG("PretransformVerticesProcess finished"); - - ASSIMP_LOG_INFO_F("Removed ", iOldNodes, " nodes and ", iOldAnimationChannels, " animation channels (", - CountNodes(pScene->mRootNode) ," output nodes)" ); - ASSIMP_LOG_INFO_F("Kept ", pScene->mNumLights, " lights and ", pScene->mNumCameras, " cameras." ); - ASSIMP_LOG_INFO_F("Moved ", iOldMeshes, " meshes to WCS (number of output meshes: ", pScene->mNumMeshes, ")"); - } -} diff --git a/thirdparty/assimp/code/PostProcessing/PretransformVertices.h b/thirdparty/assimp/code/PostProcessing/PretransformVertices.h deleted file mode 100644 index b2982951e0..0000000000 --- a/thirdparty/assimp/code/PostProcessing/PretransformVertices.h +++ /dev/null @@ -1,166 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file PretransformVertices.h - * @brief Defines a post processing step to pretransform all - * vertices in the scenegraph - */ -#ifndef AI_PRETRANSFORMVERTICES_H_INC -#define AI_PRETRANSFORMVERTICES_H_INC - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> - -#include <list> -#include <vector> - -// Forward declarations -struct aiNode; - -class PretransformVerticesTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** The PretransformVertices pre-transforms all vertices in the node tree - * and removes the whole graph. The output is a list of meshes, one for - * each material. -*/ -class ASSIMP_API PretransformVertices : public BaseProcess { -public: - PretransformVertices (); - ~PretransformVertices (); - - // ------------------------------------------------------------------- - // Check whether step is active - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Execute step on a given scene - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - // Setup import settings - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** @brief Toggle the 'keep hierarchy' option - * @param keep true for keep configuration. - */ - void KeepHierarchy(bool keep) { - configKeepHierarchy = keep; - } - - // ------------------------------------------------------------------- - /** @brief Check whether 'keep hierarchy' is currently enabled. - * @return ... - */ - bool IsHierarchyKept() const { - return configKeepHierarchy; - } - -private: - // ------------------------------------------------------------------- - // Count the number of nodes - unsigned int CountNodes( aiNode* pcNode ); - - // ------------------------------------------------------------------- - // Get a bitwise combination identifying the vertex format of a mesh - unsigned int GetMeshVFormat(aiMesh* pcMesh); - - // ------------------------------------------------------------------- - // Count the number of vertices in the whole scene and a given - // material index - void CountVerticesAndFaces( aiScene* pcScene, aiNode* pcNode, - unsigned int iMat, - unsigned int iVFormat, - unsigned int* piFaces, - unsigned int* piVertices); - - // ------------------------------------------------------------------- - // Collect vertex/face data - void CollectData( aiScene* pcScene, aiNode* pcNode, - unsigned int iMat, - unsigned int iVFormat, - aiMesh* pcMeshOut, - unsigned int aiCurrent[2], - unsigned int* num_refs); - - // ------------------------------------------------------------------- - // Get a list of all vertex formats that occur for a given material - // The output list contains duplicate elements - void GetVFormatList( aiScene* pcScene, unsigned int iMat, - std::list<unsigned int>& aiOut); - - // ------------------------------------------------------------------- - // Compute the absolute transformation matrices of each node - void ComputeAbsoluteTransform( aiNode* pcNode ); - - // ------------------------------------------------------------------- - // Simple routine to build meshes in worldspace, no further optimization - void BuildWCSMeshes(std::vector<aiMesh*>& out, aiMesh** in, - unsigned int numIn, aiNode* node); - - // ------------------------------------------------------------------- - // Apply the node transformation to a mesh - void ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat); - - // ------------------------------------------------------------------- - // Reset transformation matrices to identity - void MakeIdentityTransform(aiNode* nd); - - // ------------------------------------------------------------------- - // Build reference counters for all meshes - void BuildMeshRefCountArray(aiNode* nd, unsigned int * refs); - - //! Configuration option: keep scene hierarchy as long as possible - bool configKeepHierarchy; - bool configNormalize; - bool configTransform; - aiMatrix4x4 configTransformation; - bool mConfigPointCloud; -}; - -} // end of namespace Assimp - -#endif // !!AI_GENFACENORMALPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ProcessHelper.cpp b/thirdparty/assimp/code/PostProcessing/ProcessHelper.cpp deleted file mode 100644 index 59869fdff7..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ProcessHelper.cpp +++ /dev/null @@ -1,443 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/// @file ProcessHelper.cpp -/** Implement shared utility functions for postprocessing steps */ - - -#include "ProcessHelper.h" - - -#include <limits> - -namespace Assimp { - -// ------------------------------------------------------------------------------- -void ConvertListToStrings(const std::string& in, std::list<std::string>& out) -{ - const char* s = in.c_str(); - while (*s) { - SkipSpacesAndLineEnd(&s); - if (*s == '\'') { - const char* base = ++s; - while (*s != '\'') { - ++s; - if (*s == '\0') { - ASSIMP_LOG_ERROR("ConvertListToString: String list is ill-formatted"); - return; - } - } - out.push_back(std::string(base,(size_t)(s-base))); - ++s; - } - else { - out.push_back(GetNextToken(s)); - } - } -} - -// ------------------------------------------------------------------------------- -void FindAABBTransformed (const aiMesh* mesh, aiVector3D& min, aiVector3D& max, - const aiMatrix4x4& m) -{ - min = aiVector3D ( ai_real( 10e10 ), ai_real( 10e10 ), ai_real( 10e10 ) ); - max = aiVector3D ( ai_real( -10e10 ), ai_real( -10e10 ), ai_real( -10e10 ) ); - for (unsigned int i = 0;i < mesh->mNumVertices;++i) - { - const aiVector3D v = m * mesh->mVertices[i]; - min = std::min(v,min); - max = std::max(v,max); - } -} - -// ------------------------------------------------------------------------------- -void FindMeshCenter (aiMesh* mesh, aiVector3D& out, aiVector3D& min, aiVector3D& max) -{ - ArrayBounds(mesh->mVertices,mesh->mNumVertices, min,max); - out = min + (max-min)*(ai_real)0.5; -} - -// ------------------------------------------------------------------------------- -void FindSceneCenter (aiScene* scene, aiVector3D& out, aiVector3D& min, aiVector3D& max) { - if ( NULL == scene ) { - return; - } - - if ( 0 == scene->mNumMeshes ) { - return; - } - FindMeshCenter(scene->mMeshes[0], out, min, max); - for (unsigned int i = 1; i < scene->mNumMeshes; ++i) { - aiVector3D tout, tmin, tmax; - FindMeshCenter(scene->mMeshes[i], tout, tmin, tmax); - if (min[0] > tmin[0]) min[0] = tmin[0]; - if (min[1] > tmin[1]) min[1] = tmin[1]; - if (min[2] > tmin[2]) min[2] = tmin[2]; - if (max[0] < tmax[0]) max[0] = tmax[0]; - if (max[1] < tmax[1]) max[1] = tmax[1]; - if (max[2] < tmax[2]) max[2] = tmax[2]; - } - out = min + (max-min)*(ai_real)0.5; -} - - -// ------------------------------------------------------------------------------- -void FindMeshCenterTransformed (aiMesh* mesh, aiVector3D& out, aiVector3D& min, - aiVector3D& max, const aiMatrix4x4& m) -{ - FindAABBTransformed(mesh,min,max,m); - out = min + (max-min)*(ai_real)0.5; -} - -// ------------------------------------------------------------------------------- -void FindMeshCenter (aiMesh* mesh, aiVector3D& out) -{ - aiVector3D min,max; - FindMeshCenter(mesh,out,min,max); -} - -// ------------------------------------------------------------------------------- -void FindMeshCenterTransformed (aiMesh* mesh, aiVector3D& out, - const aiMatrix4x4& m) -{ - aiVector3D min,max; - FindMeshCenterTransformed(mesh,out,min,max,m); -} - -// ------------------------------------------------------------------------------- -ai_real ComputePositionEpsilon(const aiMesh* pMesh) -{ - const ai_real epsilon = ai_real( 1e-4 ); - - // calculate the position bounds so we have a reliable epsilon to check position differences against - aiVector3D minVec, maxVec; - ArrayBounds(pMesh->mVertices,pMesh->mNumVertices,minVec,maxVec); - return (maxVec - minVec).Length() * epsilon; -} - -// ------------------------------------------------------------------------------- -ai_real ComputePositionEpsilon(const aiMesh* const* pMeshes, size_t num) -{ - ai_assert( NULL != pMeshes ); - - const ai_real epsilon = ai_real( 1e-4 ); - - // calculate the position bounds so we have a reliable epsilon to check position differences against - aiVector3D minVec, maxVec, mi, ma; - MinMaxChooser<aiVector3D>()(minVec,maxVec); - - for (size_t a = 0; a < num; ++a) { - const aiMesh* pMesh = pMeshes[a]; - ArrayBounds(pMesh->mVertices,pMesh->mNumVertices,mi,ma); - - minVec = std::min(minVec,mi); - maxVec = std::max(maxVec,ma); - } - return (maxVec - minVec).Length() * epsilon; -} - - -// ------------------------------------------------------------------------------- -unsigned int GetMeshVFormatUnique(const aiMesh* pcMesh) -{ - ai_assert(NULL != pcMesh); - - // FIX: the hash may never be 0. Otherwise a comparison against - // nullptr could be successful - unsigned int iRet = 1; - - // normals - if (pcMesh->HasNormals())iRet |= 0x2; - // tangents and bitangents - if (pcMesh->HasTangentsAndBitangents())iRet |= 0x4; - -#ifdef BOOST_STATIC_ASSERT - BOOST_STATIC_ASSERT(8 >= AI_MAX_NUMBER_OF_COLOR_SETS); - BOOST_STATIC_ASSERT(8 >= AI_MAX_NUMBER_OF_TEXTURECOORDS); -#endif - - // texture coordinates - unsigned int p = 0; - while (pcMesh->HasTextureCoords(p)) - { - iRet |= (0x100 << p); - if (3 == pcMesh->mNumUVComponents[p]) - iRet |= (0x10000 << p); - - ++p; - } - // vertex colors - p = 0; - while (pcMesh->HasVertexColors(p))iRet |= (0x1000000 << p++); - return iRet; -} - -// ------------------------------------------------------------------------------- -VertexWeightTable* ComputeVertexBoneWeightTable(const aiMesh* pMesh) -{ - if (!pMesh || !pMesh->mNumVertices || !pMesh->mNumBones) { - return NULL; - } - - VertexWeightTable* avPerVertexWeights = new VertexWeightTable[pMesh->mNumVertices]; - for (unsigned int i = 0; i < pMesh->mNumBones;++i) { - - aiBone* bone = pMesh->mBones[i]; - for (unsigned int a = 0; a < bone->mNumWeights;++a) { - const aiVertexWeight& weight = bone->mWeights[a]; - avPerVertexWeights[weight.mVertexId].push_back( std::pair<unsigned int,float>(i,weight.mWeight) ); - } - } - return avPerVertexWeights; -} - - -// ------------------------------------------------------------------------------- -const char* TextureTypeToString(aiTextureType in) -{ - switch (in) - { - case aiTextureType_NONE: - return "n/a"; - case aiTextureType_DIFFUSE: - return "Diffuse"; - case aiTextureType_SPECULAR: - return "Specular"; - case aiTextureType_AMBIENT: - return "Ambient"; - case aiTextureType_EMISSIVE: - return "Emissive"; - case aiTextureType_OPACITY: - return "Opacity"; - case aiTextureType_NORMALS: - return "Normals"; - case aiTextureType_HEIGHT: - return "Height"; - case aiTextureType_SHININESS: - return "Shininess"; - case aiTextureType_DISPLACEMENT: - return "Displacement"; - case aiTextureType_LIGHTMAP: - return "Lightmap"; - case aiTextureType_REFLECTION: - return "Reflection"; - case aiTextureType_UNKNOWN: - return "Unknown"; - default: - break; - } - - ai_assert(false); - return "BUG"; -} - -// ------------------------------------------------------------------------------- -const char* MappingTypeToString(aiTextureMapping in) -{ - switch (in) - { - case aiTextureMapping_UV: - return "UV"; - case aiTextureMapping_BOX: - return "Box"; - case aiTextureMapping_SPHERE: - return "Sphere"; - case aiTextureMapping_CYLINDER: - return "Cylinder"; - case aiTextureMapping_PLANE: - return "Plane"; - case aiTextureMapping_OTHER: - return "Other"; - default: - break; - } - - ai_assert(false); - return "BUG"; -} - - -// ------------------------------------------------------------------------------- -aiMesh* MakeSubmesh(const aiMesh *pMesh, const std::vector<unsigned int> &subMeshFaces, unsigned int subFlags) -{ - aiMesh *oMesh = new aiMesh(); - std::vector<unsigned int> vMap(pMesh->mNumVertices,UINT_MAX); - - size_t numSubVerts = 0; - size_t numSubFaces = subMeshFaces.size(); - - for(unsigned int i=0;i<numSubFaces;i++) { - const aiFace &f = pMesh->mFaces[subMeshFaces[i]]; - - for(unsigned int j=0;j<f.mNumIndices;j++) { - if(vMap[f.mIndices[j]]==UINT_MAX) { - vMap[f.mIndices[j]] = static_cast<unsigned int>(numSubVerts++); - } - } - } - - oMesh->mName = pMesh->mName; - - oMesh->mMaterialIndex = pMesh->mMaterialIndex; - oMesh->mPrimitiveTypes = pMesh->mPrimitiveTypes; - - // create all the arrays for this mesh if the old mesh contained them - - oMesh->mNumFaces = static_cast<unsigned int>(subMeshFaces.size()); - oMesh->mNumVertices = static_cast<unsigned int>(numSubVerts); - oMesh->mVertices = new aiVector3D[numSubVerts]; - if( pMesh->HasNormals() ) { - oMesh->mNormals = new aiVector3D[numSubVerts]; - } - - if( pMesh->HasTangentsAndBitangents() ) { - oMesh->mTangents = new aiVector3D[numSubVerts]; - oMesh->mBitangents = new aiVector3D[numSubVerts]; - } - - for( size_t a = 0; pMesh->HasTextureCoords(static_cast<unsigned int>(a)) ; ++a ) { - oMesh->mTextureCoords[a] = new aiVector3D[numSubVerts]; - oMesh->mNumUVComponents[a] = pMesh->mNumUVComponents[a]; - } - - for( size_t a = 0; pMesh->HasVertexColors( static_cast<unsigned int>(a)); ++a ) { - oMesh->mColors[a] = new aiColor4D[numSubVerts]; - } - - // and copy over the data, generating faces with linear indices along the way - oMesh->mFaces = new aiFace[numSubFaces]; - - for(unsigned int a = 0; a < numSubFaces; ++a ) { - - const aiFace& srcFace = pMesh->mFaces[subMeshFaces[a]]; - aiFace& dstFace = oMesh->mFaces[a]; - dstFace.mNumIndices = srcFace.mNumIndices; - dstFace.mIndices = new unsigned int[dstFace.mNumIndices]; - - // accumulate linearly all the vertices of the source face - for( size_t b = 0; b < dstFace.mNumIndices; ++b ) { - dstFace.mIndices[b] = vMap[srcFace.mIndices[b]]; - } - } - - for(unsigned int srcIndex = 0; srcIndex < pMesh->mNumVertices; ++srcIndex ) { - unsigned int nvi = vMap[srcIndex]; - if(nvi==UINT_MAX) { - continue; - } - - oMesh->mVertices[nvi] = pMesh->mVertices[srcIndex]; - if( pMesh->HasNormals() ) { - oMesh->mNormals[nvi] = pMesh->mNormals[srcIndex]; - } - - if( pMesh->HasTangentsAndBitangents() ) { - oMesh->mTangents[nvi] = pMesh->mTangents[srcIndex]; - oMesh->mBitangents[nvi] = pMesh->mBitangents[srcIndex]; - } - for( size_t c = 0, cc = pMesh->GetNumUVChannels(); c < cc; ++c ) { - oMesh->mTextureCoords[c][nvi] = pMesh->mTextureCoords[c][srcIndex]; - } - for( size_t c = 0, cc = pMesh->GetNumColorChannels(); c < cc; ++c ) { - oMesh->mColors[c][nvi] = pMesh->mColors[c][srcIndex]; - } - } - - if(~subFlags&AI_SUBMESH_FLAGS_SANS_BONES) { - std::vector<unsigned int> subBones(pMesh->mNumBones,0); - - for(unsigned int a=0;a<pMesh->mNumBones;++a) { - const aiBone* bone = pMesh->mBones[a]; - - for(unsigned int b=0;b<bone->mNumWeights;b++) { - unsigned int v = vMap[bone->mWeights[b].mVertexId]; - - if(v!=UINT_MAX) { - subBones[a]++; - } - } - } - - for(unsigned int a=0;a<pMesh->mNumBones;++a) { - if(subBones[a]>0) { - oMesh->mNumBones++; - } - } - - if(oMesh->mNumBones) { - oMesh->mBones = new aiBone*[oMesh->mNumBones](); - unsigned int nbParanoia = oMesh->mNumBones; - - oMesh->mNumBones = 0; //rewind - - for(unsigned int a=0;a<pMesh->mNumBones;++a) { - if(subBones[a]==0) { - continue; - } - aiBone *newBone = new aiBone; - oMesh->mBones[oMesh->mNumBones++] = newBone; - - const aiBone* bone = pMesh->mBones[a]; - - newBone->mName = bone->mName; - newBone->mOffsetMatrix = bone->mOffsetMatrix; - newBone->mWeights = new aiVertexWeight[subBones[a]]; - - for(unsigned int b=0;b<bone->mNumWeights;b++) { - const unsigned int v = vMap[bone->mWeights[b].mVertexId]; - - if(v!=UINT_MAX) { - aiVertexWeight w(v,bone->mWeights[b].mWeight); - newBone->mWeights[newBone->mNumWeights++] = w; - } - } - } - - ai_assert(nbParanoia==oMesh->mNumBones); - (void)nbParanoia; // remove compiler warning on release build - } - } - - return oMesh; -} - -} // namespace Assimp diff --git a/thirdparty/assimp/code/PostProcessing/ProcessHelper.h b/thirdparty/assimp/code/PostProcessing/ProcessHelper.h deleted file mode 100644 index 0afcc41420..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ProcessHelper.h +++ /dev/null @@ -1,386 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#ifndef AI_PROCESS_HELPER_H_INCLUDED -#define AI_PROCESS_HELPER_H_INCLUDED - -#include <assimp/postprocess.h> -#include <assimp/anim.h> -#include <assimp/mesh.h> -#include <assimp/material.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> - -#include <assimp/SpatialSort.h> -#include "Common/BaseProcess.h" -#include <assimp/ParsingUtils.h> - -#include <list> - -// ------------------------------------------------------------------------------- -// Some extensions to std namespace. Mainly std::min and std::max for all -// flat data types in the aiScene. They're used to quickly determine the -// min/max bounds of data arrays. -#ifdef __cplusplus -namespace std { - - // std::min for aiVector3D - template <typename TReal> - inline ::aiVector3t<TReal> min (const ::aiVector3t<TReal>& a, const ::aiVector3t<TReal>& b) { - return ::aiVector3t<TReal> (min(a.x,b.x),min(a.y,b.y),min(a.z,b.z)); - } - - // std::max for aiVector3t<TReal> - template <typename TReal> - inline ::aiVector3t<TReal> max (const ::aiVector3t<TReal>& a, const ::aiVector3t<TReal>& b) { - return ::aiVector3t<TReal> (max(a.x,b.x),max(a.y,b.y),max(a.z,b.z)); - } - - // std::min for aiVector2t<TReal> - template <typename TReal> - inline ::aiVector2t<TReal> min (const ::aiVector2t<TReal>& a, const ::aiVector2t<TReal>& b) { - return ::aiVector2t<TReal> (min(a.x,b.x),min(a.y,b.y)); - } - - // std::max for aiVector2t<TReal> - template <typename TReal> - inline ::aiVector2t<TReal> max (const ::aiVector2t<TReal>& a, const ::aiVector2t<TReal>& b) { - return ::aiVector2t<TReal> (max(a.x,b.x),max(a.y,b.y)); - } - - // std::min for aiColor4D - template <typename TReal> - inline ::aiColor4t<TReal> min (const ::aiColor4t<TReal>& a, const ::aiColor4t<TReal>& b) { - return ::aiColor4t<TReal> (min(a.r,b.r),min(a.g,b.g),min(a.b,b.b),min(a.a,b.a)); - } - - // std::max for aiColor4D - template <typename TReal> - inline ::aiColor4t<TReal> max (const ::aiColor4t<TReal>& a, const ::aiColor4t<TReal>& b) { - return ::aiColor4t<TReal> (max(a.r,b.r),max(a.g,b.g),max(a.b,b.b),max(a.a,b.a)); - } - - - // std::min for aiQuaterniont<TReal> - template <typename TReal> - inline ::aiQuaterniont<TReal> min (const ::aiQuaterniont<TReal>& a, const ::aiQuaterniont<TReal>& b) { - return ::aiQuaterniont<TReal> (min(a.w,b.w),min(a.x,b.x),min(a.y,b.y),min(a.z,b.z)); - } - - // std::max for aiQuaterniont<TReal> - template <typename TReal> - inline ::aiQuaterniont<TReal> max (const ::aiQuaterniont<TReal>& a, const ::aiQuaterniont<TReal>& b) { - return ::aiQuaterniont<TReal> (max(a.w,b.w),max(a.x,b.x),max(a.y,b.y),max(a.z,b.z)); - } - - - - // std::min for aiVectorKey - inline ::aiVectorKey min (const ::aiVectorKey& a, const ::aiVectorKey& b) { - return ::aiVectorKey (min(a.mTime,b.mTime),min(a.mValue,b.mValue)); - } - - // std::max for aiVectorKey - inline ::aiVectorKey max (const ::aiVectorKey& a, const ::aiVectorKey& b) { - return ::aiVectorKey (max(a.mTime,b.mTime),max(a.mValue,b.mValue)); - } - - // std::min for aiQuatKey - inline ::aiQuatKey min (const ::aiQuatKey& a, const ::aiQuatKey& b) { - return ::aiQuatKey (min(a.mTime,b.mTime),min(a.mValue,b.mValue)); - } - - // std::max for aiQuatKey - inline ::aiQuatKey max (const ::aiQuatKey& a, const ::aiQuatKey& b) { - return ::aiQuatKey (max(a.mTime,b.mTime),max(a.mValue,b.mValue)); - } - - // std::min for aiVertexWeight - inline ::aiVertexWeight min (const ::aiVertexWeight& a, const ::aiVertexWeight& b) { - return ::aiVertexWeight (min(a.mVertexId,b.mVertexId),min(a.mWeight,b.mWeight)); - } - - // std::max for aiVertexWeight - inline ::aiVertexWeight max (const ::aiVertexWeight& a, const ::aiVertexWeight& b) { - return ::aiVertexWeight (max(a.mVertexId,b.mVertexId),max(a.mWeight,b.mWeight)); - } - -} // end namespace std -#endif // !! C++ - -namespace Assimp { - -// ------------------------------------------------------------------------------- -// Start points for ArrayBounds<T> for all supported Ts -template <typename T> -struct MinMaxChooser; - -template <> struct MinMaxChooser<float> { - void operator ()(float& min,float& max) { - max = -1e10f; - min = 1e10f; -}}; -template <> struct MinMaxChooser<double> { - void operator ()(double& min,double& max) { - max = -1e10; - min = 1e10; -}}; -template <> struct MinMaxChooser<unsigned int> { - void operator ()(unsigned int& min,unsigned int& max) { - max = 0; - min = (1u<<(sizeof(unsigned int)*8-1)); -}}; - -template <typename T> struct MinMaxChooser< aiVector3t<T> > { - void operator ()(aiVector3t<T>& min,aiVector3t<T>& max) { - max = aiVector3t<T>(-1e10f,-1e10f,-1e10f); - min = aiVector3t<T>( 1e10f, 1e10f, 1e10f); -}}; -template <typename T> struct MinMaxChooser< aiVector2t<T> > { - void operator ()(aiVector2t<T>& min,aiVector2t<T>& max) { - max = aiVector2t<T>(-1e10f,-1e10f); - min = aiVector2t<T>( 1e10f, 1e10f); - }}; -template <typename T> struct MinMaxChooser< aiColor4t<T> > { - void operator ()(aiColor4t<T>& min,aiColor4t<T>& max) { - max = aiColor4t<T>(-1e10f,-1e10f,-1e10f,-1e10f); - min = aiColor4t<T>( 1e10f, 1e10f, 1e10f, 1e10f); -}}; - -template <typename T> struct MinMaxChooser< aiQuaterniont<T> > { - void operator ()(aiQuaterniont<T>& min,aiQuaterniont<T>& max) { - max = aiQuaterniont<T>(-1e10f,-1e10f,-1e10f,-1e10f); - min = aiQuaterniont<T>( 1e10f, 1e10f, 1e10f, 1e10f); -}}; - -template <> struct MinMaxChooser<aiVectorKey> { - void operator ()(aiVectorKey& min,aiVectorKey& max) { - MinMaxChooser<double>()(min.mTime,max.mTime); - MinMaxChooser<aiVector3D>()(min.mValue,max.mValue); -}}; -template <> struct MinMaxChooser<aiQuatKey> { - void operator ()(aiQuatKey& min,aiQuatKey& max) { - MinMaxChooser<double>()(min.mTime,max.mTime); - MinMaxChooser<aiQuaternion>()(min.mValue,max.mValue); -}}; - -template <> struct MinMaxChooser<aiVertexWeight> { - void operator ()(aiVertexWeight& min,aiVertexWeight& max) { - MinMaxChooser<unsigned int>()(min.mVertexId,max.mVertexId); - MinMaxChooser<float>()(min.mWeight,max.mWeight); -}}; - -// ------------------------------------------------------------------------------- -/** @brief Find the min/max values of an array of Ts - * @param in Input array - * @param size Number of elements to process - * @param[out] min minimum value - * @param[out] max maximum value - */ -template <typename T> -inline void ArrayBounds(const T* in, unsigned int size, T& min, T& max) -{ - MinMaxChooser<T> ()(min,max); - for (unsigned int i = 0; i < size;++i) { - min = std::min(in[i],min); - max = std::max(in[i],max); - } -} - - -// ------------------------------------------------------------------------------- -/** Little helper function to calculate the quadratic difference - * of two colours. - * @param pColor1 First color - * @param pColor2 second color - * @return Quadratic color difference */ -inline ai_real GetColorDifference( const aiColor4D& pColor1, const aiColor4D& pColor2) -{ - const aiColor4D c (pColor1.r - pColor2.r, pColor1.g - pColor2.g, pColor1.b - pColor2.b, pColor1.a - pColor2.a); - return c.r*c.r + c.g*c.g + c.b*c.b + c.a*c.a; -} - - -// ------------------------------------------------------------------------------- -/** @brief Extract single strings from a list of identifiers - * @param in Input string list. - * @param out Receives a list of clean output strings - * @sdee #AI_CONFIG_PP_OG_EXCLUDE_LIST */ -void ConvertListToStrings(const std::string& in, std::list<std::string>& out); - - -// ------------------------------------------------------------------------------- -/** @brief Compute the AABB of a mesh after applying a given transform - * @param mesh Input mesh - * @param[out] min Receives minimum transformed vertex - * @param[out] max Receives maximum transformed vertex - * @param m Transformation matrix to be applied */ -void FindAABBTransformed (const aiMesh* mesh, aiVector3D& min, aiVector3D& max, const aiMatrix4x4& m); - - -// ------------------------------------------------------------------------------- -/** @brief Helper function to determine the 'real' center of a mesh - * - * That is the center of its axis-aligned bounding box. - * @param mesh Input mesh - * @param[out] min Minimum vertex of the mesh - * @param[out] max maximum vertex of the mesh - * @param[out] out Center point */ -void FindMeshCenter (aiMesh* mesh, aiVector3D& out, aiVector3D& min, aiVector3D& max); - -// ------------------------------------------------------------------------------- -/** @brief Helper function to determine the 'real' center of a scene - * - * That is the center of its axis-aligned bounding box. - * @param scene Input scene - * @param[out] min Minimum vertex of the scene - * @param[out] max maximum vertex of the scene - * @param[out] out Center point */ -void FindSceneCenter (aiScene* scene, aiVector3D& out, aiVector3D& min, aiVector3D& max); - - -// ------------------------------------------------------------------------------- -// Helper function to determine the 'real' center of a mesh after applying a given transform -void FindMeshCenterTransformed (aiMesh* mesh, aiVector3D& out, aiVector3D& min,aiVector3D& max, const aiMatrix4x4& m); - - -// ------------------------------------------------------------------------------- -// Helper function to determine the 'real' center of a mesh -void FindMeshCenter (aiMesh* mesh, aiVector3D& out); - - -// ------------------------------------------------------------------------------- -// Helper function to determine the 'real' center of a mesh after applying a given transform -void FindMeshCenterTransformed (aiMesh* mesh, aiVector3D& out,const aiMatrix4x4& m); - - -// ------------------------------------------------------------------------------- -// Compute a good epsilon value for position comparisons on a mesh -ai_real ComputePositionEpsilon(const aiMesh* pMesh); - - -// ------------------------------------------------------------------------------- -// Compute a good epsilon value for position comparisons on a array of meshes -ai_real ComputePositionEpsilon(const aiMesh* const* pMeshes, size_t num); - - -// ------------------------------------------------------------------------------- -// Compute an unique value for the vertex format of a mesh -unsigned int GetMeshVFormatUnique(const aiMesh* pcMesh); - - -// defs for ComputeVertexBoneWeightTable() -typedef std::pair <unsigned int,float> PerVertexWeight; -typedef std::vector <PerVertexWeight> VertexWeightTable; - -// ------------------------------------------------------------------------------- -// Compute a per-vertex bone weight table -VertexWeightTable* ComputeVertexBoneWeightTable(const aiMesh* pMesh); - - -// ------------------------------------------------------------------------------- -// Get a string for a given aiTextureType -const char* TextureTypeToString(aiTextureType in); - - -// ------------------------------------------------------------------------------- -// Get a string for a given aiTextureMapping -const char* MappingTypeToString(aiTextureMapping in); - - -// flags for MakeSubmesh() -#define AI_SUBMESH_FLAGS_SANS_BONES 0x1 - -// ------------------------------------------------------------------------------- -// Split a mesh given a list of faces to be contained in the sub mesh -aiMesh* MakeSubmesh(const aiMesh *superMesh, const std::vector<unsigned int> &subMeshFaces, unsigned int subFlags); - -// ------------------------------------------------------------------------------- -// Utility postprocess step to share the spatial sort tree between -// all steps which use it to speedup its computations. -class ComputeSpatialSortProcess : public BaseProcess -{ - bool IsActive( unsigned int pFlags) const - { - return NULL != shared && 0 != (pFlags & (aiProcess_CalcTangentSpace | - aiProcess_GenNormals | aiProcess_JoinIdenticalVertices)); - } - - void Execute( aiScene* pScene) - { - typedef std::pair<SpatialSort, ai_real> _Type; - ASSIMP_LOG_DEBUG("Generate spatially-sorted vertex cache"); - - std::vector<_Type>* p = new std::vector<_Type>(pScene->mNumMeshes); - std::vector<_Type>::iterator it = p->begin(); - - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i, ++it) { - aiMesh* mesh = pScene->mMeshes[i]; - _Type& blubb = *it; - blubb.first.Fill(mesh->mVertices,mesh->mNumVertices,sizeof(aiVector3D)); - blubb.second = ComputePositionEpsilon(mesh); - } - - shared->AddProperty(AI_SPP_SPATIAL_SORT,p); - } -}; - -// ------------------------------------------------------------------------------- -// ... and the same again to cleanup the whole stuff -class DestroySpatialSortProcess : public BaseProcess -{ - bool IsActive( unsigned int pFlags) const - { - return NULL != shared && 0 != (pFlags & (aiProcess_CalcTangentSpace | - aiProcess_GenNormals | aiProcess_JoinIdenticalVertices)); - } - - void Execute( aiScene* /*pScene*/) - { - shared->RemoveProperty(AI_SPP_SPATIAL_SORT); - } -}; - - - -} // ! namespace Assimp -#endif // !! AI_PROCESS_HELPER_H_INCLUDED diff --git a/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.cpp b/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.cpp deleted file mode 100644 index 49ec8f5c47..0000000000 --- a/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file RemoveRedundantMaterials.cpp - * @brief Implementation of the "RemoveRedundantMaterials" post processing step -*/ - -// internal headers - -#include "RemoveRedundantMaterials.h" -#include <assimp/ParsingUtils.h> -#include "ProcessHelper.h" -#include "Material/MaterialSystem.h" -#include <stdio.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -RemoveRedundantMatsProcess::RemoveRedundantMatsProcess() -: mConfigFixedMaterials() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -RemoveRedundantMatsProcess::~RemoveRedundantMatsProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool RemoveRedundantMatsProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_RemoveRedundantMaterials) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Setup import properties -void RemoveRedundantMatsProcess::SetupProperties(const Importer* pImp) -{ - // Get value of AI_CONFIG_PP_RRM_EXCLUDE_LIST - mConfigFixedMaterials = pImp->GetPropertyString(AI_CONFIG_PP_RRM_EXCLUDE_LIST,""); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void RemoveRedundantMatsProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("RemoveRedundantMatsProcess begin"); - - unsigned int redundantRemoved = 0, unreferencedRemoved = 0; - if (pScene->mNumMaterials) - { - // Find out which materials are referenced by meshes - std::vector<bool> abReferenced(pScene->mNumMaterials,false); - for (unsigned int i = 0;i < pScene->mNumMeshes;++i) - abReferenced[pScene->mMeshes[i]->mMaterialIndex] = true; - - // If a list of materials to be excluded was given, match the list with - // our imported materials and 'salt' all positive matches to ensure that - // we get unique hashes later. - if (mConfigFixedMaterials.length()) { - - std::list<std::string> strings; - ConvertListToStrings(mConfigFixedMaterials,strings); - - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) { - aiMaterial* mat = pScene->mMaterials[i]; - - aiString name; - mat->Get(AI_MATKEY_NAME,name); - - if (name.length) { - std::list<std::string>::const_iterator it = std::find(strings.begin(), strings.end(), name.data); - if (it != strings.end()) { - - // Our brilliant 'salt': A single material property with ~ as first - // character to mark it as internal and temporary. - const int dummy = 1; - ((aiMaterial*)mat)->AddProperty(&dummy,1,"~RRM.UniqueMaterial",0,0); - - // Keep this material even if no mesh references it - abReferenced[i] = true; - ASSIMP_LOG_DEBUG_F( "Found positive match in exclusion list: \'", name.data, "\'"); - } - } - } - } - - // TODO: re-implement this algorithm to work in-place - unsigned int *aiMappingTable = new unsigned int[pScene->mNumMaterials]; - for ( unsigned int i=0; i<pScene->mNumMaterials; i++ ) { - aiMappingTable[ i ] = 0; - } - unsigned int iNewNum = 0; - - // Iterate through all materials and calculate a hash for them - // store all hashes in a list and so a quick search whether - // we do already have a specific hash. This allows us to - // determine which materials are identical. - uint32_t *aiHashes = new uint32_t[ pScene->mNumMaterials ];; - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) - { - // No mesh is referencing this material, remove it. - if (!abReferenced[i]) { - ++unreferencedRemoved; - delete pScene->mMaterials[i]; - pScene->mMaterials[i] = nullptr; - continue; - } - - // Check all previously mapped materials for a matching hash. - // On a match we can delete this material and just make it ref to the same index. - uint32_t me = aiHashes[i] = ComputeMaterialHash(pScene->mMaterials[i]); - for (unsigned int a = 0; a < i;++a) - { - if (abReferenced[a] && me == aiHashes[a]) { - ++redundantRemoved; - me = 0; - aiMappingTable[i] = aiMappingTable[a]; - delete pScene->mMaterials[i]; - pScene->mMaterials[i] = nullptr; - break; - } - } - // This is a new material that is referenced, add to the map. - if (me) { - aiMappingTable[i] = iNewNum++; - } - } - // If the new material count differs from the original, - // we need to rebuild the material list and remap mesh material indexes. - if (iNewNum != pScene->mNumMaterials) { - ai_assert(iNewNum > 0); - aiMaterial** ppcMaterials = new aiMaterial*[iNewNum]; - ::memset(ppcMaterials,0,sizeof(void*)*iNewNum); - for (unsigned int p = 0; p < pScene->mNumMaterials;++p) - { - // if the material is not referenced ... remove it - if (!abReferenced[p]) { - continue; - } - - // generate new names for modified materials that had no names - const unsigned int idx = aiMappingTable[p]; - if (ppcMaterials[idx]) { - aiString sz; - if( ppcMaterials[idx]->Get(AI_MATKEY_NAME, sz) != AI_SUCCESS ) { - sz.length = ::ai_snprintf(sz.data,MAXLEN,"JoinedMaterial_#%u",p); - ((aiMaterial*)ppcMaterials[idx])->AddProperty(&sz,AI_MATKEY_NAME); - } - } else { - ppcMaterials[idx] = pScene->mMaterials[p]; - } - } - // update all material indices - for (unsigned int p = 0; p < pScene->mNumMeshes;++p) { - aiMesh* mesh = pScene->mMeshes[p]; - ai_assert( NULL!=mesh ); - mesh->mMaterialIndex = aiMappingTable[mesh->mMaterialIndex]; - } - // delete the old material list - delete[] pScene->mMaterials; - pScene->mMaterials = ppcMaterials; - pScene->mNumMaterials = iNewNum; - } - // delete temporary storage - delete[] aiHashes; - delete[] aiMappingTable; - } - if (redundantRemoved == 0 && unreferencedRemoved == 0) - { - ASSIMP_LOG_DEBUG("RemoveRedundantMatsProcess finished "); - } - else - { - ASSIMP_LOG_INFO_F("RemoveRedundantMatsProcess finished. Removed ", redundantRemoved, " redundant and ", - unreferencedRemoved, " unused materials."); - } -} diff --git a/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.h b/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.h deleted file mode 100644 index 1f32a0abfb..0000000000 --- a/thirdparty/assimp/code/PostProcessing/RemoveRedundantMaterials.h +++ /dev/null @@ -1,103 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file RemoveRedundantMaterials.h - * @brief Defines a post processing step to remove redundant materials - */ -#ifndef AI_REMOVEREDUNDANTMATERIALS_H_INC -#define AI_REMOVEREDUNDANTMATERIALS_H_INC - -#include "Common/BaseProcess.h" -#include <assimp/mesh.h> - -class RemoveRedundantMatsTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** RemoveRedundantMatsProcess: Post-processing step to remove redundant - * materials from the imported scene. - */ -class ASSIMP_API RemoveRedundantMatsProcess : public BaseProcess { -public: - /// The default class constructor. - RemoveRedundantMatsProcess(); - - /// The class destructor. - ~RemoveRedundantMatsProcess(); - - // ------------------------------------------------------------------- - // Check whether step is active - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - // Execute step on a given scene - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - // Setup import settings - void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** @brief Set list of fixed (inmutable) materials - * @param fixed See #AI_CONFIG_PP_RRM_EXCLUDE_LIST - */ - void SetFixedMaterialsString(const std::string& fixed = "") { - mConfigFixedMaterials = fixed; - } - - // ------------------------------------------------------------------- - /** @brief Get list of fixed (inmutable) materials - * @return See #AI_CONFIG_PP_RRM_EXCLUDE_LIST - */ - const std::string& GetFixedMaterialsString() const { - return mConfigFixedMaterials; - } - -private: - //! Configuration option: list of all fixed materials - std::string mConfigFixedMaterials; -}; - -} // end of namespace Assimp - -#endif // !!AI_REMOVEREDUNDANTMATERIALS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.cpp b/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.cpp deleted file mode 100644 index 99fd47a3aa..0000000000 --- a/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.cpp +++ /dev/null @@ -1,337 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file Implementation of the post processing step to remove - * any parts of the mesh structure from the imported data. -*/ - - -#include "RemoveVCProcess.h" -#include <assimp/postprocess.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -RemoveVCProcess::RemoveVCProcess() : - configDeleteFlags() - , mScene() -{} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -RemoveVCProcess::~RemoveVCProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool RemoveVCProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_RemoveComponent) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Small helper function to delete all elements in a T** aray using delete -template <typename T> -inline void ArrayDelete(T**& in, unsigned int& num) -{ - for (unsigned int i = 0; i < num; ++i) - delete in[i]; - - delete[] in; - in = NULL; - num = 0; -} - -#if 0 -// ------------------------------------------------------------------------------------------------ -// Updates the node graph - removes all nodes which have the "remove" flag set and the -// "don't remove" flag not set. Nodes with meshes are never deleted. -bool UpdateNodeGraph(aiNode* node,std::list<aiNode*>& childsOfParent,bool root) -{ - bool b = false; - - std::list<aiNode*> mine; - for (unsigned int i = 0; i < node->mNumChildren;++i) - { - if(UpdateNodeGraph(node->mChildren[i],mine,false)) - b = true; - } - - // somewhat tricky ... mNumMeshes must be originally 0 and MSB2 may not be set, - // so we can do a simple comparison against MSB here - if (!root && AI_RC_UINT_MSB == node->mNumMeshes ) - { - // this node needs to be removed - if(node->mNumChildren) - { - childsOfParent.insert(childsOfParent.end(),mine.begin(),mine.end()); - - // set all children to NULL to make sure they are not deleted when we delete ourself - for (unsigned int i = 0; i < node->mNumChildren;++i) - node->mChildren[i] = NULL; - } - b = true; - delete node; - } - else - { - AI_RC_UNMASK(node->mNumMeshes); - childsOfParent.push_back(node); - - if (b) - { - // reallocate the array of our children here - node->mNumChildren = (unsigned int)mine.size(); - aiNode** const children = new aiNode*[mine.size()]; - aiNode** ptr = children; - - for (std::list<aiNode*>::iterator it = mine.begin(), end = mine.end(); - it != end; ++it) - { - *ptr++ = *it; - } - delete[] node->mChildren; - node->mChildren = children; - return false; - } - } - return b; -} -#endif - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void RemoveVCProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("RemoveVCProcess begin"); - bool bHas = false; //,bMasked = false; - - mScene = pScene; - - // handle animations - if ( configDeleteFlags & aiComponent_ANIMATIONS) - { - - bHas = true; - ArrayDelete(pScene->mAnimations,pScene->mNumAnimations); - } - - // handle textures - if ( configDeleteFlags & aiComponent_TEXTURES) - { - bHas = true; - ArrayDelete(pScene->mTextures,pScene->mNumTextures); - } - - // handle materials - if ( configDeleteFlags & aiComponent_MATERIALS && pScene->mNumMaterials) - { - bHas = true; - for (unsigned int i = 1;i < pScene->mNumMaterials;++i) - delete pScene->mMaterials[i]; - - pScene->mNumMaterials = 1; - aiMaterial* helper = (aiMaterial*) pScene->mMaterials[0]; - ai_assert(NULL != helper); - helper->Clear(); - - // gray - aiColor3D clr(0.6f,0.6f,0.6f); - helper->AddProperty(&clr,1,AI_MATKEY_COLOR_DIFFUSE); - - // add a small ambient color value - clr = aiColor3D(0.05f,0.05f,0.05f); - helper->AddProperty(&clr,1,AI_MATKEY_COLOR_AMBIENT); - - aiString s; - s.Set("Dummy_MaterialsRemoved"); - helper->AddProperty(&s,AI_MATKEY_NAME); - } - - // handle light sources - if ( configDeleteFlags & aiComponent_LIGHTS) - { - bHas = true; - ArrayDelete(pScene->mLights,pScene->mNumLights); - } - - // handle camneras - if ( configDeleteFlags & aiComponent_CAMERAS) - { - bHas = true; - ArrayDelete(pScene->mCameras,pScene->mNumCameras); - } - - // handle meshes - if (configDeleteFlags & aiComponent_MESHES) - { - bHas = true; - ArrayDelete(pScene->mMeshes,pScene->mNumMeshes); - } - else - { - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) - { - if( ProcessMesh( pScene->mMeshes[a])) - bHas = true; - } - } - - - // now check whether the result is still a full scene - if (!pScene->mNumMeshes || !pScene->mNumMaterials) - { - pScene->mFlags |= AI_SCENE_FLAGS_INCOMPLETE; - ASSIMP_LOG_DEBUG("Setting AI_SCENE_FLAGS_INCOMPLETE flag"); - - // If we have no meshes anymore we should also clear another flag ... - if (!pScene->mNumMeshes) - pScene->mFlags &= ~AI_SCENE_FLAGS_NON_VERBOSE_FORMAT; - } - - if (bHas) { - ASSIMP_LOG_INFO("RemoveVCProcess finished. Data structure cleanup has been done."); - } else { - ASSIMP_LOG_DEBUG("RemoveVCProcess finished. Nothing to be done ..."); - } -} - -// ------------------------------------------------------------------------------------------------ -// Setup configuration properties for the step -void RemoveVCProcess::SetupProperties(const Importer* pImp) -{ - configDeleteFlags = pImp->GetPropertyInteger(AI_CONFIG_PP_RVC_FLAGS,0x0); - if (!configDeleteFlags) - { - ASSIMP_LOG_WARN("RemoveVCProcess: AI_CONFIG_PP_RVC_FLAGS is zero."); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -bool RemoveVCProcess::ProcessMesh(aiMesh* pMesh) -{ - bool ret = false; - - // if all materials have been deleted let the material - // index of the mesh point to the created default material - if ( configDeleteFlags & aiComponent_MATERIALS) - pMesh->mMaterialIndex = 0; - - // handle normals - if (configDeleteFlags & aiComponent_NORMALS && pMesh->mNormals) - { - delete[] pMesh->mNormals; - pMesh->mNormals = NULL; - ret = true; - } - - // handle tangents and bitangents - if (configDeleteFlags & aiComponent_TANGENTS_AND_BITANGENTS && pMesh->mTangents) - { - delete[] pMesh->mTangents; - pMesh->mTangents = NULL; - - delete[] pMesh->mBitangents; - pMesh->mBitangents = NULL; - ret = true; - } - - // handle texture coordinates - bool b = (0 != (configDeleteFlags & aiComponent_TEXCOORDS)); - for (unsigned int i = 0, real = 0; real < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++real) - { - if (!pMesh->mTextureCoords[i])break; - if (configDeleteFlags & aiComponent_TEXCOORDSn(real) || b) - { - delete [] pMesh->mTextureCoords[i]; - pMesh->mTextureCoords[i] = NULL; - ret = true; - - if (!b) - { - // collapse the rest of the array - for (unsigned int a = i+1; a < AI_MAX_NUMBER_OF_TEXTURECOORDS;++a) - pMesh->mTextureCoords[a-1] = pMesh->mTextureCoords[a]; - - pMesh->mTextureCoords[AI_MAX_NUMBER_OF_TEXTURECOORDS-1] = NULL; - continue; - } - } - ++i; - } - - // handle vertex colors - b = (0 != (configDeleteFlags & aiComponent_COLORS)); - for (unsigned int i = 0, real = 0; real < AI_MAX_NUMBER_OF_COLOR_SETS; ++real) - { - if (!pMesh->mColors[i])break; - if (configDeleteFlags & aiComponent_COLORSn(i) || b) - { - delete [] pMesh->mColors[i]; - pMesh->mColors[i] = NULL; - ret = true; - - if (!b) - { - // collapse the rest of the array - for (unsigned int a = i+1; a < AI_MAX_NUMBER_OF_COLOR_SETS;++a) - pMesh->mColors[a-1] = pMesh->mColors[a]; - - pMesh->mColors[AI_MAX_NUMBER_OF_COLOR_SETS-1] = NULL; - continue; - } - } - ++i; - } - - // handle bones - if (configDeleteFlags & aiComponent_BONEWEIGHTS && pMesh->mBones) - { - ArrayDelete(pMesh->mBones,pMesh->mNumBones); - ret = true; - } - return ret; -} diff --git a/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.h b/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.h deleted file mode 100644 index 7bb21a8330..0000000000 --- a/thirdparty/assimp/code/PostProcessing/RemoveVCProcess.h +++ /dev/null @@ -1,124 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to remove specific parts of the scene */ -#ifndef AI_REMOVEVCPROCESS_H_INCLUDED -#define AI_REMOVEVCPROCESS_H_INCLUDED - -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> - -class RemoveVCProcessTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** RemoveVCProcess: Class to exclude specific parts of the data structure - * from further processing by removing them, -*/ -class ASSIMP_API RemoveVCProcess : public BaseProcess { -public: - /// The default class constructor. - RemoveVCProcess(); - - /// The class destructor. - ~RemoveVCProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - virtual void SetupProperties(const Importer* pImp); - - // ------------------------------------------------------------------- - /** Manually setup the configuration flags for the step - * - * @param Bitwise combination of the #aiComponent enumerated values. - */ - void SetDeleteFlags(unsigned int f) - { - configDeleteFlags = f; - } - - // ------------------------------------------------------------------- - /** Query the current configuration. - */ - unsigned int GetDeleteFlags() const - { - return configDeleteFlags; - } - -private: - - bool ProcessMesh (aiMesh* pcMesh); - - /** Configuration flag - */ - unsigned int configDeleteFlags; - - /** The scene we're working with - */ - aiScene* mScene; -}; - -// --------------------------------------------------------------------------- - -} // end of namespace Assimp - -#endif // !!AI_REMOVEVCPROCESS_H_INCLUDED diff --git a/thirdparty/assimp/code/PostProcessing/ScaleProcess.cpp b/thirdparty/assimp/code/PostProcessing/ScaleProcess.cpp deleted file mode 100644 index ac770c41f2..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ScaleProcess.cpp +++ /dev/null @@ -1,208 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#include "ScaleProcess.h" - -#include <assimp/scene.h> -#include <assimp/postprocess.h> -#include <assimp/BaseImporter.h> - -namespace Assimp { - -ScaleProcess::ScaleProcess() -: BaseProcess() -, mScale( AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT ) { -} - -ScaleProcess::~ScaleProcess() { - // empty -} - -void ScaleProcess::setScale( ai_real scale ) { - mScale = scale; -} - -ai_real ScaleProcess::getScale() const { - return mScale; -} - -bool ScaleProcess::IsActive( unsigned int pFlags ) const { - return ( pFlags & aiProcess_GlobalScale ) != 0; -} - -void ScaleProcess::SetupProperties( const Importer* pImp ) { - // User scaling - mScale = pImp->GetPropertyFloat( AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY, 1.0f ); - - // File scaling * Application Scaling - float importerScale = pImp->GetPropertyFloat( AI_CONFIG_APP_SCALE_KEY, 1.0f ); - - // apply scale to the scale - // helps prevent bugs with backward compatibility for anyone using normal scaling. - mScale *= importerScale; -} - -void ScaleProcess::Execute( aiScene* pScene ) { - if(mScale == 1.0f) { - return; // nothing to scale - } - - ai_assert( mScale != 0 ); - ai_assert( nullptr != pScene ); - ai_assert( nullptr != pScene->mRootNode ); - - if ( nullptr == pScene ) { - return; - } - - if ( nullptr == pScene->mRootNode ) { - return; - } - - // Process animations and update position transform to new unit system - for( unsigned int animationID = 0; animationID < pScene->mNumAnimations; animationID++ ) - { - aiAnimation* animation = pScene->mAnimations[animationID]; - - for( unsigned int animationChannel = 0; animationChannel < animation->mNumChannels; animationChannel++) - { - aiNodeAnim* anim = animation->mChannels[animationChannel]; - - for( unsigned int posKey = 0; posKey < anim->mNumPositionKeys; posKey++) - { - aiVectorKey& vectorKey = anim->mPositionKeys[posKey]; - vectorKey.mValue *= mScale; - } - } - } - - for( unsigned int meshID = 0; meshID < pScene->mNumMeshes; meshID++) - { - aiMesh *mesh = pScene->mMeshes[meshID]; - - // Reconstruct mesh vertexes to the new unit system - for( unsigned int vertexID = 0; vertexID < mesh->mNumVertices; vertexID++) - { - aiVector3D& vertex = mesh->mVertices[vertexID]; - vertex *= mScale; - } - - - // bone placement / scaling - for( unsigned int boneID = 0; boneID < mesh->mNumBones; boneID++) - { - // Reconstruct matrix by transform rather than by scale - // This prevent scale values being changed which can - // be meaningful in some cases - // like when you want the modeller to see 1:1 compatibility. - aiBone* bone = mesh->mBones[boneID]; - - aiVector3D pos, scale; - aiQuaternion rotation; - - bone->mOffsetMatrix.Decompose( scale, rotation, pos); - - aiMatrix4x4 translation; - aiMatrix4x4::Translation( pos * mScale, translation ); - - aiMatrix4x4 scaling; - aiMatrix4x4::Scaling( aiVector3D(scale), scaling ); - - aiMatrix4x4 RotMatrix = aiMatrix4x4 (rotation.GetMatrix()); - - bone->mOffsetMatrix = translation * RotMatrix * scaling; - } - - - // animation mesh processing - // convert by position rather than scale. - for( unsigned int animMeshID = 0; animMeshID < mesh->mNumAnimMeshes; animMeshID++) - { - aiAnimMesh * animMesh = mesh->mAnimMeshes[animMeshID]; - - for( unsigned int vertexID = 0; vertexID < animMesh->mNumVertices; vertexID++) - { - aiVector3D& vertex = animMesh->mVertices[vertexID]; - vertex *= mScale; - } - } - } - - traverseNodes( pScene->mRootNode ); -} - -void ScaleProcess::traverseNodes( aiNode *node, unsigned int nested_node_id ) { - applyScaling( node ); - - for( size_t i = 0; i < node->mNumChildren; i++) - { - // recurse into the tree until we are done! - traverseNodes( node->mChildren[i], nested_node_id+1 ); - } -} - -void ScaleProcess::applyScaling( aiNode *currentNode ) { - if ( nullptr != currentNode ) { - // Reconstruct matrix by transform rather than by scale - // This prevent scale values being changed which can - // be meaningful in some cases - // like when you want the modeller to - // see 1:1 compatibility. - - aiVector3D pos, scale; - aiQuaternion rotation; - currentNode->mTransformation.Decompose( scale, rotation, pos); - - aiMatrix4x4 translation; - aiMatrix4x4::Translation( pos * mScale, translation ); - - aiMatrix4x4 scaling; - - // note: we do not use mScale here, this is on purpose. - aiMatrix4x4::Scaling( scale, scaling ); - - aiMatrix4x4 RotMatrix = aiMatrix4x4 (rotation.GetMatrix()); - - currentNode->mTransformation = translation * RotMatrix * scaling; - } -} - -} // Namespace Assimp diff --git a/thirdparty/assimp/code/PostProcessing/ScaleProcess.h b/thirdparty/assimp/code/PostProcessing/ScaleProcess.h deleted file mode 100644 index 468a216736..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ScaleProcess.h +++ /dev/null @@ -1,97 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#ifndef SCALE_PROCESS_H_ -#define SCALE_PROCESS_H_ - -#include "Common/BaseProcess.h" - -struct aiNode; - -#if (!defined AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT) -# define AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** ScaleProcess: Class to rescale the whole model. - * Now rescales animations, bones, and blend shapes properly. - * Please note this will not write to 'scale' transform it will rewrite mesh - * and matrixes so that your scale values - * from your model package are preserved, so this is completely intentional - * bugs should be reported as soon as they are found. -*/ -class ASSIMP_API ScaleProcess : public BaseProcess { -public: - /// The default class constructor. - ScaleProcess(); - - /// The class destructor. - virtual ~ScaleProcess(); - - /// Will set the scale manually. - void setScale( ai_real scale ); - - /// Returns the current scaling value. - ai_real getScale() const; - - /// Overwritten, @see BaseProcess - virtual bool IsActive( unsigned int pFlags ) const; - - /// Overwritten, @see BaseProcess - virtual void SetupProperties( const Importer* pImp ); - - /// Overwritten, @see BaseProcess - virtual void Execute( aiScene* pScene ); - -private: - void traverseNodes( aiNode *currentNode, unsigned int nested_node_id = 0 ); - void applyScaling( aiNode *currentNode ); - -private: - ai_real mScale; -}; - -} // Namespace Assimp - - -#endif // SCALE_PROCESS_H_
\ No newline at end of file diff --git a/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.cpp b/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.cpp deleted file mode 100644 index be8405a17b..0000000000 --- a/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.cpp +++ /dev/null @@ -1,403 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Implementation of the DeterminePTypeHelperProcess and - * SortByPTypeProcess post-process steps. -*/ - - - -// internal headers -#include "ProcessHelper.h" -#include "SortByPTypeProcess.h" -#include <assimp/Exceptional.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -SortByPTypeProcess::SortByPTypeProcess() -: mConfigRemoveMeshes( 0 ) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -SortByPTypeProcess::~SortByPTypeProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool SortByPTypeProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_SortByPType) != 0; -} - -// ------------------------------------------------------------------------------------------------ -void SortByPTypeProcess::SetupProperties(const Importer* pImp) -{ - mConfigRemoveMeshes = pImp->GetPropertyInteger(AI_CONFIG_PP_SBP_REMOVE,0); -} - -// ------------------------------------------------------------------------------------------------ -// Update changed meshes in all nodes -void UpdateNodes(const std::vector<unsigned int>& replaceMeshIndex, aiNode* node) -{ - if (node->mNumMeshes) - { - unsigned int newSize = 0; - for (unsigned int m = 0; m< node->mNumMeshes; ++m) - { - unsigned int add = node->mMeshes[m]<<2; - for (unsigned int i = 0; i < 4;++i) - { - if (UINT_MAX != replaceMeshIndex[add+i])++newSize; - } - } - if (!newSize) - { - delete[] node->mMeshes; - node->mNumMeshes = 0; - node->mMeshes = NULL; - } - else - { - // Try to reuse the old array if possible - unsigned int* newMeshes = (newSize > node->mNumMeshes - ? new unsigned int[newSize] : node->mMeshes); - - for (unsigned int m = 0; m< node->mNumMeshes; ++m) - { - unsigned int add = node->mMeshes[m]<<2; - for (unsigned int i = 0; i < 4;++i) - { - if (UINT_MAX != replaceMeshIndex[add+i]) - *newMeshes++ = replaceMeshIndex[add+i]; - } - } - if (newSize > node->mNumMeshes) - delete[] node->mMeshes; - - node->mMeshes = newMeshes-(node->mNumMeshes = newSize); - } - } - - // call all subnodes recursively - for (unsigned int m = 0; m < node->mNumChildren; ++m) - UpdateNodes(replaceMeshIndex,node->mChildren[m]); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SortByPTypeProcess::Execute( aiScene* pScene) { - if ( 0 == pScene->mNumMeshes) { - ASSIMP_LOG_DEBUG("SortByPTypeProcess skipped, there are no meshes"); - return; - } - - ASSIMP_LOG_DEBUG("SortByPTypeProcess begin"); - - unsigned int aiNumMeshesPerPType[4] = {0,0,0,0}; - - std::vector<aiMesh*> outMeshes; - outMeshes.reserve(pScene->mNumMeshes<<1u); - - bool bAnyChanges = false; - - std::vector<unsigned int> replaceMeshIndex(pScene->mNumMeshes*4,UINT_MAX); - std::vector<unsigned int>::iterator meshIdx = replaceMeshIndex.begin(); - for (unsigned int i = 0; i < pScene->mNumMeshes; ++i) { - aiMesh* const mesh = pScene->mMeshes[i]; - ai_assert(0 != mesh->mPrimitiveTypes); - - // if there's just one primitive type in the mesh there's nothing to do for us - unsigned int num = 0; - if (mesh->mPrimitiveTypes & aiPrimitiveType_POINT) { - ++aiNumMeshesPerPType[0]; - ++num; - } - if (mesh->mPrimitiveTypes & aiPrimitiveType_LINE) { - ++aiNumMeshesPerPType[1]; - ++num; - } - if (mesh->mPrimitiveTypes & aiPrimitiveType_TRIANGLE) { - ++aiNumMeshesPerPType[2]; - ++num; - } - if (mesh->mPrimitiveTypes & aiPrimitiveType_POLYGON) { - ++aiNumMeshesPerPType[3]; - ++num; - } - - if (1 == num) { - if (!(mConfigRemoveMeshes & mesh->mPrimitiveTypes)) { - *meshIdx = static_cast<unsigned int>( outMeshes.size() ); - outMeshes.push_back(mesh); - } else { - delete mesh; - pScene->mMeshes[ i ] = nullptr; - bAnyChanges = true; - } - - meshIdx += 4; - continue; - } - bAnyChanges = true; - - // reuse our current mesh arrays for the submesh - // with the largest number of primitives - unsigned int aiNumPerPType[4] = {0,0,0,0}; - aiFace* pFirstFace = mesh->mFaces; - aiFace* const pLastFace = pFirstFace + mesh->mNumFaces; - - unsigned int numPolyVerts = 0; - for (;pFirstFace != pLastFace; ++pFirstFace) { - if (pFirstFace->mNumIndices <= 3) - ++aiNumPerPType[pFirstFace->mNumIndices-1]; - else - { - ++aiNumPerPType[3]; - numPolyVerts += pFirstFace-> mNumIndices; - } - } - - VertexWeightTable* avw = ComputeVertexBoneWeightTable(mesh); - for (unsigned int real = 0; real < 4; ++real,++meshIdx) - { - if ( !aiNumPerPType[real] || mConfigRemoveMeshes & (1u << real)) - { - continue; - } - - *meshIdx = (unsigned int) outMeshes.size(); - outMeshes.push_back(new aiMesh()); - aiMesh* out = outMeshes.back(); - - // the name carries the adjacency information between the meshes - out->mName = mesh->mName; - - // copy data members - out->mPrimitiveTypes = 1u << real; - out->mMaterialIndex = mesh->mMaterialIndex; - - // allocate output storage - out->mNumFaces = aiNumPerPType[real]; - aiFace* outFaces = out->mFaces = new aiFace[out->mNumFaces]; - - out->mNumVertices = (3 == real ? numPolyVerts : out->mNumFaces * (real+1)); - - aiVector3D *vert(nullptr), *nor(nullptr), *tan(nullptr), *bit(nullptr); - aiVector3D *uv [AI_MAX_NUMBER_OF_TEXTURECOORDS]; - aiColor4D *cols [AI_MAX_NUMBER_OF_COLOR_SETS]; - - if (mesh->mVertices) { - vert = out->mVertices = new aiVector3D[out->mNumVertices]; - } - - if (mesh->mNormals) { - nor = out->mNormals = new aiVector3D[out->mNumVertices]; - } - - if (mesh->mTangents) { - tan = out->mTangents = new aiVector3D[out->mNumVertices]; - bit = out->mBitangents = new aiVector3D[out->mNumVertices]; - } - - for (unsigned int j = 0; j < AI_MAX_NUMBER_OF_TEXTURECOORDS;++j) { - uv[j] = nullptr; - if (mesh->mTextureCoords[j]) { - uv[j] = out->mTextureCoords[j] = new aiVector3D[out->mNumVertices]; - } - - out->mNumUVComponents[j] = mesh->mNumUVComponents[j]; - } - - for (unsigned int j = 0; j < AI_MAX_NUMBER_OF_COLOR_SETS;++j) { - cols[j] = nullptr; - if (mesh->mColors[j]) { - cols[j] = out->mColors[j] = new aiColor4D[out->mNumVertices]; - } - } - - typedef std::vector< aiVertexWeight > TempBoneInfo; - std::vector< TempBoneInfo > tempBones(mesh->mNumBones); - - // try to guess how much storage we'll need - for (unsigned int q = 0; q < mesh->mNumBones;++q) - { - tempBones[q].reserve(mesh->mBones[q]->mNumWeights / (num-1)); - } - - unsigned int outIdx = 0; - for (unsigned int m = 0; m < mesh->mNumFaces; ++m) - { - aiFace& in = mesh->mFaces[m]; - if ((real == 3 && in.mNumIndices <= 3) || (real != 3 && in.mNumIndices != real+1)) - { - continue; - } - - outFaces->mNumIndices = in.mNumIndices; - outFaces->mIndices = in.mIndices; - - for (unsigned int q = 0; q < in.mNumIndices; ++q) - { - unsigned int idx = in.mIndices[q]; - - // process all bones of this index - if (avw) - { - VertexWeightTable& tbl = avw[idx]; - for (VertexWeightTable::const_iterator it = tbl.begin(), end = tbl.end(); - it != end; ++it) - { - tempBones[ (*it).first ].push_back( aiVertexWeight(outIdx, (*it).second) ); - } - } - - if (vert) - { - *vert++ = mesh->mVertices[idx]; - //mesh->mVertices[idx].x = get_qnan(); - } - if (nor )*nor++ = mesh->mNormals[idx]; - if (tan ) - { - *tan++ = mesh->mTangents[idx]; - *bit++ = mesh->mBitangents[idx]; - } - - for (unsigned int pp = 0; pp < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++pp) - { - if (!uv[pp])break; - *uv[pp]++ = mesh->mTextureCoords[pp][idx]; - } - - for (unsigned int pp = 0; pp < AI_MAX_NUMBER_OF_COLOR_SETS; ++pp) - { - if (!cols[pp])break; - *cols[pp]++ = mesh->mColors[pp][idx]; - } - - in.mIndices[q] = outIdx++; - } - - in.mIndices = nullptr; - ++outFaces; - } - ai_assert(outFaces == out->mFaces + out->mNumFaces); - - // now generate output bones - for (unsigned int q = 0; q < mesh->mNumBones;++q) - if (!tempBones[q].empty())++out->mNumBones; - - if (out->mNumBones) - { - out->mBones = new aiBone*[out->mNumBones]; - for (unsigned int q = 0, real = 0; q < mesh->mNumBones;++q) - { - TempBoneInfo& in = tempBones[q]; - if (in.empty())continue; - - aiBone* srcBone = mesh->mBones[q]; - aiBone* bone = out->mBones[real] = new aiBone(); - - bone->mName = srcBone->mName; - bone->mOffsetMatrix = srcBone->mOffsetMatrix; - - bone->mNumWeights = (unsigned int)in.size(); - bone->mWeights = new aiVertexWeight[bone->mNumWeights]; - - ::memcpy(bone->mWeights,&in[0],bone->mNumWeights*sizeof(aiVertexWeight)); - - ++real; - } - } - } - - // delete the per-vertex bone weights table - delete[] avw; - - // delete the input mesh - delete mesh; - - // avoid invalid pointer - pScene->mMeshes[i] = NULL; - } - - if (outMeshes.empty()) - { - // This should not occur - throw DeadlyImportError("No meshes remaining"); - } - - // If we added at least one mesh process all nodes in the node - // graph and update their respective mesh indices. - if (bAnyChanges) - { - UpdateNodes(replaceMeshIndex,pScene->mRootNode); - } - - if (outMeshes.size() != pScene->mNumMeshes) - { - delete[] pScene->mMeshes; - pScene->mNumMeshes = (unsigned int)outMeshes.size(); - pScene->mMeshes = new aiMesh*[pScene->mNumMeshes]; - } - ::memcpy(pScene->mMeshes,&outMeshes[0],pScene->mNumMeshes*sizeof(void*)); - - if (!DefaultLogger::isNullLogger()) - { - char buffer[1024]; - ::ai_snprintf(buffer,1024,"Points: %u%s, Lines: %u%s, Triangles: %u%s, Polygons: %u%s (Meshes, X = removed)", - aiNumMeshesPerPType[0], ((mConfigRemoveMeshes & aiPrimitiveType_POINT) ? "X" : ""), - aiNumMeshesPerPType[1], ((mConfigRemoveMeshes & aiPrimitiveType_LINE) ? "X" : ""), - aiNumMeshesPerPType[2], ((mConfigRemoveMeshes & aiPrimitiveType_TRIANGLE) ? "X" : ""), - aiNumMeshesPerPType[3], ((mConfigRemoveMeshes & aiPrimitiveType_POLYGON) ? "X" : "")); - ASSIMP_LOG_INFO(buffer); - ASSIMP_LOG_DEBUG("SortByPTypeProcess finished"); - } -} - diff --git a/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.h b/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.h deleted file mode 100644 index 1d7ccfc152..0000000000 --- a/thirdparty/assimp/code/PostProcessing/SortByPTypeProcess.h +++ /dev/null @@ -1,82 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to sort meshes by the types - of primitives they contain */ -#ifndef AI_SORTBYPTYPEPROCESS_H_INC -#define AI_SORTBYPTYPEPROCESS_H_INC - -#include "Common/BaseProcess.h" -#include <assimp/mesh.h> - -class SortByPTypeProcessTest; - -namespace Assimp { - - -// --------------------------------------------------------------------------- -/** SortByPTypeProcess: Sorts meshes by the types of primitives they contain. - * A mesh with 5 lines, 3 points and 145 triangles would be split in 3 - * submeshes. -*/ -class ASSIMP_API SortByPTypeProcess : public BaseProcess { -public: - SortByPTypeProcess(); - ~SortByPTypeProcess(); - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - void SetupProperties(const Importer* pImp); - -private: - int mConfigRemoveMeshes; -}; - - -} // end of namespace Assimp - -#endif // !!AI_SORTBYPTYPEPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.cpp b/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.cpp deleted file mode 100644 index 1797b28d5a..0000000000 --- a/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.cpp +++ /dev/null @@ -1,623 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** - * @file Implementation of the SplitLargeMeshes postprocessing step - */ - -// internal headers of the post-processing framework -#include "SplitLargeMeshes.h" -#include "ProcessHelper.h" - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -SplitLargeMeshesProcess_Triangle::SplitLargeMeshesProcess_Triangle() { - LIMIT = AI_SLM_DEFAULT_MAX_TRIANGLES; -} - -// ------------------------------------------------------------------------------------------------ -SplitLargeMeshesProcess_Triangle::~SplitLargeMeshesProcess_Triangle() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool SplitLargeMeshesProcess_Triangle::IsActive( unsigned int pFlags) const { - return (pFlags & aiProcess_SplitLargeMeshes) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SplitLargeMeshesProcess_Triangle::Execute( aiScene* pScene) { - if (0xffffffff == this->LIMIT || nullptr == pScene ) { - return; - } - - ASSIMP_LOG_DEBUG("SplitLargeMeshesProcess_Triangle begin"); - std::vector<std::pair<aiMesh*, unsigned int> > avList; - - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a) { - this->SplitMesh(a, pScene->mMeshes[a],avList); - } - - if (avList.size() != pScene->mNumMeshes) { - // it seems something has been split. rebuild the mesh list - delete[] pScene->mMeshes; - pScene->mNumMeshes = (unsigned int)avList.size(); - pScene->mMeshes = new aiMesh*[avList.size()]; - - for (unsigned int i = 0; i < avList.size();++i) { - pScene->mMeshes[i] = avList[i].first; - } - - // now we need to update all nodes - this->UpdateNode(pScene->mRootNode,avList); - ASSIMP_LOG_INFO("SplitLargeMeshesProcess_Triangle finished. Meshes have been split"); - } else { - ASSIMP_LOG_DEBUG("SplitLargeMeshesProcess_Triangle finished. There was nothing to do"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties -void SplitLargeMeshesProcess_Triangle::SetupProperties( const Importer* pImp) { - // get the current value of the split property - this->LIMIT = pImp->GetPropertyInteger(AI_CONFIG_PP_SLM_TRIANGLE_LIMIT,AI_SLM_DEFAULT_MAX_TRIANGLES); -} - -// ------------------------------------------------------------------------------------------------ -// Update a node after some meshes have been split -void SplitLargeMeshesProcess_Triangle::UpdateNode(aiNode* pcNode, - const std::vector<std::pair<aiMesh*, unsigned int> >& avList) { - // for every index in out list build a new entry - std::vector<unsigned int> aiEntries; - aiEntries.reserve(pcNode->mNumMeshes + 1); - for (unsigned int i = 0; i < pcNode->mNumMeshes;++i) { - for (unsigned int a = 0; a < avList.size();++a) { - if (avList[a].second == pcNode->mMeshes[i]) { - aiEntries.push_back(a); - } - } - } - - // now build the new list - delete[] pcNode->mMeshes; - pcNode->mNumMeshes = (unsigned int)aiEntries.size(); - pcNode->mMeshes = new unsigned int[pcNode->mNumMeshes]; - - for (unsigned int b = 0; b < pcNode->mNumMeshes;++b) { - pcNode->mMeshes[b] = aiEntries[b]; - } - - // recusively update all other nodes - for (unsigned int i = 0; i < pcNode->mNumChildren;++i) { - UpdateNode ( pcNode->mChildren[i], avList ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SplitLargeMeshesProcess_Triangle::SplitMesh( - unsigned int a, - aiMesh* pMesh, - std::vector<std::pair<aiMesh*, unsigned int> >& avList) { - if (pMesh->mNumFaces > SplitLargeMeshesProcess_Triangle::LIMIT) { - ASSIMP_LOG_INFO("Mesh exceeds the triangle limit. It will be split ..."); - - // we need to split this mesh into sub meshes - // determine the size of a submesh - const unsigned int iSubMeshes = (pMesh->mNumFaces / LIMIT) + 1; - - const unsigned int iOutFaceNum = pMesh->mNumFaces / iSubMeshes; - const unsigned int iOutVertexNum = iOutFaceNum * 3; - - // now generate all submeshes - for (unsigned int i = 0; i < iSubMeshes;++i) { - aiMesh* pcMesh = new aiMesh; - pcMesh->mNumFaces = iOutFaceNum; - pcMesh->mMaterialIndex = pMesh->mMaterialIndex; - - // the name carries the adjacency information between the meshes - pcMesh->mName = pMesh->mName; - - if (i == iSubMeshes-1) { - pcMesh->mNumFaces = iOutFaceNum + ( - pMesh->mNumFaces - iOutFaceNum * iSubMeshes); - } - // copy the list of faces - pcMesh->mFaces = new aiFace[pcMesh->mNumFaces]; - - const unsigned int iBase = iOutFaceNum * i; - - // get the total number of indices - unsigned int iCnt = 0; - for (unsigned int p = iBase; p < pcMesh->mNumFaces + iBase;++p) { - iCnt += pMesh->mFaces[p].mNumIndices; - } - pcMesh->mNumVertices = iCnt; - - // allocate storage - if (pMesh->mVertices != nullptr) { - pcMesh->mVertices = new aiVector3D[iCnt]; - } - - if (pMesh->HasNormals()) { - pcMesh->mNormals = new aiVector3D[iCnt]; - } - - if (pMesh->HasTangentsAndBitangents()) { - pcMesh->mTangents = new aiVector3D[iCnt]; - pcMesh->mBitangents = new aiVector3D[iCnt]; - } - - // texture coordinates - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS;++c) { - pcMesh->mNumUVComponents[c] = pMesh->mNumUVComponents[c]; - if (pMesh->HasTextureCoords( c)) { - pcMesh->mTextureCoords[c] = new aiVector3D[iCnt]; - } - } - - // vertex colors - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS;++c) { - if (pMesh->HasVertexColors( c)) { - pcMesh->mColors[c] = new aiColor4D[iCnt]; - } - } - - if (pMesh->HasBones()) { - // assume the number of bones won't change in most cases - pcMesh->mBones = new aiBone*[pMesh->mNumBones]; - - // iterate through all bones of the mesh and find those which - // need to be copied to the split mesh - std::vector<aiVertexWeight> avTempWeights; - for (unsigned int p = 0; p < pcMesh->mNumBones;++p) { - aiBone* const bone = pcMesh->mBones[p]; - avTempWeights.clear(); - avTempWeights.reserve(bone->mNumWeights / iSubMeshes); - - for (unsigned int q = 0; q < bone->mNumWeights;++q) { - aiVertexWeight& weight = bone->mWeights[q]; - if(weight.mVertexId >= iBase && weight.mVertexId < iBase + iOutVertexNum) { - avTempWeights.push_back(weight); - weight = avTempWeights.back(); - weight.mVertexId -= iBase; - } - } - - if (!avTempWeights.empty()) { - // we'll need this bone. Copy it ... - aiBone* pc = new aiBone(); - pcMesh->mBones[pcMesh->mNumBones++] = pc; - pc->mName = aiString(bone->mName); - pc->mNumWeights = (unsigned int)avTempWeights.size(); - pc->mOffsetMatrix = bone->mOffsetMatrix; - - // no need to reallocate the array for the last submesh. - // Here we can reuse the (large) source array, although - // we'll waste some memory - if (iSubMeshes-1 == i) { - pc->mWeights = bone->mWeights; - bone->mWeights = nullptr; - } else { - pc->mWeights = new aiVertexWeight[pc->mNumWeights]; - } - - // copy the weights - ::memcpy(pc->mWeights,&avTempWeights[0],sizeof(aiVertexWeight)*pc->mNumWeights); - } - } - } - - // (we will also need to copy the array of indices) - unsigned int iCurrent = 0; - for (unsigned int p = 0; p < pcMesh->mNumFaces;++p) { - pcMesh->mFaces[p].mNumIndices = 3; - // allocate a new array - const unsigned int iTemp = p + iBase; - const unsigned int iNumIndices = pMesh->mFaces[iTemp].mNumIndices; - - // setup face type and number of indices - pcMesh->mFaces[p].mNumIndices = iNumIndices; - unsigned int* pi = pMesh->mFaces[iTemp].mIndices; - unsigned int* piOut = pcMesh->mFaces[p].mIndices = new unsigned int[iNumIndices]; - - // need to update the output primitive types - switch (iNumIndices) { - case 1: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 2: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 3: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - } - - // and copy the contents of the old array, offset by current base - for (unsigned int v = 0; v < iNumIndices;++v) { - unsigned int iIndex = pi[v]; - unsigned int iIndexOut = iCurrent++; - piOut[v] = iIndexOut; - - // copy positions - if (pMesh->mVertices != nullptr) { - pcMesh->mVertices[iIndexOut] = pMesh->mVertices[iIndex]; - } - - // copy normals - if (pMesh->HasNormals()) { - pcMesh->mNormals[iIndexOut] = pMesh->mNormals[iIndex]; - } - - // copy tangents/bitangents - if (pMesh->HasTangentsAndBitangents()) { - pcMesh->mTangents[iIndexOut] = pMesh->mTangents[iIndex]; - pcMesh->mBitangents[iIndexOut] = pMesh->mBitangents[iIndex]; - } - - // texture coordinates - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS;++c) { - if (pMesh->HasTextureCoords( c ) ) { - pcMesh->mTextureCoords[c][iIndexOut] = pMesh->mTextureCoords[c][iIndex]; - } - } - // vertex colors - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS;++c) { - if (pMesh->HasVertexColors( c)) { - pcMesh->mColors[c][iIndexOut] = pMesh->mColors[c][iIndex]; - } - } - } - } - - // add the newly created mesh to the list - avList.push_back(std::pair<aiMesh*, unsigned int>(pcMesh,a)); - } - - // now delete the old mesh data - delete pMesh; - } else { - avList.push_back(std::pair<aiMesh*, unsigned int>(pMesh,a)); - } -} - -// ------------------------------------------------------------------------------------------------ -SplitLargeMeshesProcess_Vertex::SplitLargeMeshesProcess_Vertex() { - LIMIT = AI_SLM_DEFAULT_MAX_VERTICES; -} - -// ------------------------------------------------------------------------------------------------ -SplitLargeMeshesProcess_Vertex::~SplitLargeMeshesProcess_Vertex() { - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool SplitLargeMeshesProcess_Vertex::IsActive( unsigned int pFlags) const { - return (pFlags & aiProcess_SplitLargeMeshes) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SplitLargeMeshesProcess_Vertex::Execute( aiScene* pScene) { - if (0xffffffff == this->LIMIT || nullptr == pScene ) { - return; - } - - ASSIMP_LOG_DEBUG("SplitLargeMeshesProcess_Vertex begin"); - - std::vector<std::pair<aiMesh*, unsigned int> > avList; - - //Check for point cloud first, - //Do not process point cloud, splitMesh works only with faces data - for (unsigned int a = 0; a < pScene->mNumMeshes; a++) { - if ( pScene->mMeshes[a]->mPrimitiveTypes == aiPrimitiveType_POINT ) { - return; - } - } - - for( unsigned int a = 0; a < pScene->mNumMeshes; ++a ) { - this->SplitMesh(a, pScene->mMeshes[a], avList); - } - - if (avList.size() != pScene->mNumMeshes) { - // it seems something has been split. rebuild the mesh list - delete[] pScene->mMeshes; - pScene->mNumMeshes = (unsigned int)avList.size(); - pScene->mMeshes = new aiMesh*[avList.size()]; - - for (unsigned int i = 0; i < avList.size();++i) { - pScene->mMeshes[i] = avList[i].first; - } - - // now we need to update all nodes - SplitLargeMeshesProcess_Triangle::UpdateNode(pScene->mRootNode,avList); - ASSIMP_LOG_INFO("SplitLargeMeshesProcess_Vertex finished. Meshes have been split"); - } else { - ASSIMP_LOG_DEBUG("SplitLargeMeshesProcess_Vertex finished. There was nothing to do"); - } -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties -void SplitLargeMeshesProcess_Vertex::SetupProperties( const Importer* pImp) { - this->LIMIT = pImp->GetPropertyInteger(AI_CONFIG_PP_SLM_VERTEX_LIMIT,AI_SLM_DEFAULT_MAX_VERTICES); -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void SplitLargeMeshesProcess_Vertex::SplitMesh( - unsigned int a, - aiMesh* pMesh, - std::vector<std::pair<aiMesh*, unsigned int> >& avList) { - if (pMesh->mNumVertices > SplitLargeMeshesProcess_Vertex::LIMIT) { - typedef std::vector< std::pair<unsigned int,float> > VertexWeightTable; - - // build a per-vertex weight list if necessary - VertexWeightTable* avPerVertexWeights = ComputeVertexBoneWeightTable(pMesh); - - // we need to split this mesh into sub meshes - // determine the estimated size of a submesh - // (this could be too large. Max waste is a single digit percentage) - const unsigned int iSubMeshes = (pMesh->mNumVertices / SplitLargeMeshesProcess_Vertex::LIMIT) + 1; - - // create a std::vector<unsigned int> to indicate which vertices - // have already been copied - std::vector<unsigned int> avWasCopied; - avWasCopied.resize(pMesh->mNumVertices,0xFFFFFFFF); - - // try to find a good estimate for the number of output faces - // per mesh. Add 12.5% as buffer - unsigned int iEstimatedSize = pMesh->mNumFaces / iSubMeshes; - iEstimatedSize += iEstimatedSize >> 3; - - // now generate all submeshes - unsigned int iBase( 0 ); - while (true) { - const unsigned int iOutVertexNum = SplitLargeMeshesProcess_Vertex::LIMIT; - aiMesh* pcMesh = new aiMesh; - pcMesh->mNumVertices = 0; - pcMesh->mMaterialIndex = pMesh->mMaterialIndex; - - // the name carries the adjacency information between the meshes - pcMesh->mName = pMesh->mName; - - typedef std::vector<aiVertexWeight> BoneWeightList; - if (pMesh->HasBones()) { - pcMesh->mBones = new aiBone*[pMesh->mNumBones]; - ::memset(pcMesh->mBones,0,sizeof(void*)*pMesh->mNumBones); - } - - // clear the temporary helper array - if (iBase) { - // we can't use memset here we unsigned int needn' be 32 bits - for (auto &elem : avWasCopied) { - elem = 0xffffffff; - } - } - - // output vectors - std::vector<aiFace> vFaces; - - // reserve enough storage for most cases - if (pMesh->HasPositions()) { - pcMesh->mVertices = new aiVector3D[iOutVertexNum]; - } - if (pMesh->HasNormals()) { - pcMesh->mNormals = new aiVector3D[iOutVertexNum]; - } - if (pMesh->HasTangentsAndBitangents()) { - pcMesh->mTangents = new aiVector3D[iOutVertexNum]; - pcMesh->mBitangents = new aiVector3D[iOutVertexNum]; - } - for (unsigned int c = 0; pMesh->HasVertexColors(c);++c) { - pcMesh->mColors[c] = new aiColor4D[iOutVertexNum]; - } - for (unsigned int c = 0; pMesh->HasTextureCoords(c);++c) { - pcMesh->mNumUVComponents[c] = pMesh->mNumUVComponents[c]; - pcMesh->mTextureCoords[c] = new aiVector3D[iOutVertexNum]; - } - vFaces.reserve(iEstimatedSize); - - // (we will also need to copy the array of indices) - while (iBase < pMesh->mNumFaces) { - // allocate a new array - const unsigned int iNumIndices = pMesh->mFaces[iBase].mNumIndices; - - // doesn't catch degenerates but is quite fast - unsigned int iNeed = 0; - for (unsigned int v = 0; v < iNumIndices;++v) { - unsigned int iIndex = pMesh->mFaces[iBase].mIndices[v]; - - // check whether we do already have this vertex - if (0xFFFFFFFF == avWasCopied[iIndex]) { - iNeed++; - } - } - if (pcMesh->mNumVertices + iNeed > iOutVertexNum) { - // don't use this face - break; - } - - vFaces.push_back(aiFace()); - aiFace& rFace = vFaces.back(); - - // setup face type and number of indices - rFace.mNumIndices = iNumIndices; - rFace.mIndices = new unsigned int[iNumIndices]; - - // need to update the output primitive types - switch (rFace.mNumIndices) { - case 1: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_POINT; - break; - case 2: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_LINE; - break; - case 3: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - break; - default: - pcMesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON; - } - - // and copy the contents of the old array, offset by current base - for (unsigned int v = 0; v < iNumIndices;++v) { - unsigned int iIndex = pMesh->mFaces[iBase].mIndices[v]; - - // check whether we do already have this vertex - if (0xFFFFFFFF != avWasCopied[iIndex]) { - rFace.mIndices[v] = avWasCopied[iIndex]; - continue; - } - - // copy positions - pcMesh->mVertices[pcMesh->mNumVertices] = (pMesh->mVertices[iIndex]); - - // copy normals - if (pMesh->HasNormals()) { - pcMesh->mNormals[pcMesh->mNumVertices] = (pMesh->mNormals[iIndex]); - } - - // copy tangents/bitangents - if (pMesh->HasTangentsAndBitangents()) { - pcMesh->mTangents[pcMesh->mNumVertices] = (pMesh->mTangents[iIndex]); - pcMesh->mBitangents[pcMesh->mNumVertices] = (pMesh->mBitangents[iIndex]); - } - - // texture coordinates - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS;++c) { - if (pMesh->HasTextureCoords( c)) { - pcMesh->mTextureCoords[c][pcMesh->mNumVertices] = pMesh->mTextureCoords[c][iIndex]; - } - } - // vertex colors - for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS;++c) { - if (pMesh->HasVertexColors( c)) { - pcMesh->mColors[c][pcMesh->mNumVertices] = pMesh->mColors[c][iIndex]; - } - } - // check whether we have bone weights assigned to this vertex - rFace.mIndices[v] = pcMesh->mNumVertices; - if (avPerVertexWeights) { - VertexWeightTable& table = avPerVertexWeights[ pcMesh->mNumVertices ]; - if( !table.empty() ) { - for (VertexWeightTable::const_iterator iter = table.begin(); - iter != table.end();++iter) { - // allocate the bone weight array if necessary - BoneWeightList* pcWeightList = (BoneWeightList*)pcMesh->mBones[(*iter).first]; - if (nullptr == pcWeightList) { - pcMesh->mBones[(*iter).first] = (aiBone*)(pcWeightList = new BoneWeightList()); - } - pcWeightList->push_back(aiVertexWeight(pcMesh->mNumVertices,(*iter).second)); - } - } - } - - avWasCopied[iIndex] = pcMesh->mNumVertices; - pcMesh->mNumVertices++; - } - ++iBase; - if(pcMesh->mNumVertices == iOutVertexNum) { - // break here. The face is only added if it was complete - break; - } - } - - // check which bones we'll need to create for this submesh - if (pMesh->HasBones()) { - aiBone** ppCurrent = pcMesh->mBones; - for (unsigned int k = 0; k < pMesh->mNumBones;++k) { - // check whether the bone is existing - BoneWeightList* pcWeightList; - if ((pcWeightList = (BoneWeightList*)pcMesh->mBones[k])) { - aiBone* pcOldBone = pMesh->mBones[k]; - aiBone* pcOut( nullptr ); - *ppCurrent++ = pcOut = new aiBone(); - pcOut->mName = aiString(pcOldBone->mName); - pcOut->mOffsetMatrix = pcOldBone->mOffsetMatrix; - pcOut->mNumWeights = (unsigned int)pcWeightList->size(); - pcOut->mWeights = new aiVertexWeight[pcOut->mNumWeights]; - - // copy the vertex weights - ::memcpy(pcOut->mWeights,&pcWeightList->operator[](0), - pcOut->mNumWeights * sizeof(aiVertexWeight)); - - // delete the temporary bone weight list - delete pcWeightList; - pcMesh->mNumBones++; - } - } - } - - // copy the face list to the mesh - pcMesh->mFaces = new aiFace[vFaces.size()]; - pcMesh->mNumFaces = (unsigned int)vFaces.size(); - - for (unsigned int p = 0; p < pcMesh->mNumFaces;++p) { - pcMesh->mFaces[p] = vFaces[p]; - } - - // add the newly created mesh to the list - avList.push_back(std::pair<aiMesh*, unsigned int>(pcMesh,a)); - - if (iBase == pMesh->mNumFaces) { - // have all faces ... finish the outer loop, too - break; - } - } - - // delete the per-vertex weight list again - delete[] avPerVertexWeights; - - // now delete the old mesh data - delete pMesh; - return; - } - avList.push_back(std::pair<aiMesh*, unsigned int>(pMesh,a)); -} diff --git a/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.h b/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.h deleted file mode 100644 index 3f90576ea9..0000000000 --- a/thirdparty/assimp/code/PostProcessing/SplitLargeMeshes.h +++ /dev/null @@ -1,209 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to split large meshes into sub-meshes - */ -#ifndef AI_SPLITLARGEMESHES_H_INC -#define AI_SPLITLARGEMESHES_H_INC - -#include <vector> -#include "Common/BaseProcess.h" - -#include <assimp/mesh.h> -#include <assimp/scene.h> - -// Forward declarations -class SplitLargeMeshesTest; - -namespace Assimp { - -class SplitLargeMeshesProcess_Triangle; -class SplitLargeMeshesProcess_Vertex; - -// NOTE: If you change these limits, don't forget to change the -// corresponding values in all Assimp ports - -// ********************************************************** -// Java: ConfigProperty.java, -// ConfigProperty.DEFAULT_VERTEX_SPLIT_LIMIT -// ConfigProperty.DEFAULT_TRIANGLE_SPLIT_LIMIT -// ********************************************************** - -// default limit for vertices -#if (!defined AI_SLM_DEFAULT_MAX_VERTICES) -# define AI_SLM_DEFAULT_MAX_VERTICES 1000000 -#endif - -// default limit for triangles -#if (!defined AI_SLM_DEFAULT_MAX_TRIANGLES) -# define AI_SLM_DEFAULT_MAX_TRIANGLES 1000000 -#endif - -// --------------------------------------------------------------------------- -/** Post-processing filter to split large meshes into sub-meshes - * - * Applied BEFORE the JoinVertices-Step occurs. - * Returns NON-UNIQUE vertices, splits by triangle number. -*/ -class ASSIMP_API SplitLargeMeshesProcess_Triangle : public BaseProcess -{ - friend class SplitLargeMeshesProcess_Vertex; - -public: - - SplitLargeMeshesProcess_Triangle(); - ~SplitLargeMeshesProcess_Triangle(); - -public: - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag. - * @param pFlags The processing flags the importer was called with. A - * bitwise combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, - * false if not. - */ - bool IsActive( unsigned int pFlags) const; - - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - virtual void SetupProperties(const Importer* pImp); - - - //! Set the split limit - needed for unit testing - inline void SetLimit(unsigned int l) - {LIMIT = l;} - - //! Get the split limit - inline unsigned int GetLimit() const - {return LIMIT;} - -public: - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - //! Apply the algorithm to a given mesh - void SplitMesh (unsigned int a, aiMesh* pcMesh, - std::vector<std::pair<aiMesh*, unsigned int> >& avList); - - // ------------------------------------------------------------------- - //! Update a node in the asset after a few of its meshes - //! have been split - static void UpdateNode(aiNode* pcNode, - const std::vector<std::pair<aiMesh*, unsigned int> >& avList); - -public: - //! Triangle limit - unsigned int LIMIT; -}; - - -// --------------------------------------------------------------------------- -/** Post-processing filter to split large meshes into sub-meshes - * - * Applied AFTER the JoinVertices-Step occurs. - * Returns UNIQUE vertices, splits by vertex number. -*/ -class ASSIMP_API SplitLargeMeshesProcess_Vertex : public BaseProcess -{ -public: - - SplitLargeMeshesProcess_Vertex(); - ~SplitLargeMeshesProcess_Vertex(); - -public: - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Called prior to ExecuteOnScene(). - * The function is a request to the process to update its configuration - * basing on the Importer's configuration property list. - */ - virtual void SetupProperties(const Importer* pImp); - - - //! Set the split limit - needed for unit testing - inline void SetLimit(unsigned int l) - {LIMIT = l;} - - //! Get the split limit - inline unsigned int GetLimit() const - {return LIMIT;} - -public: - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - //! Apply the algorithm to a given mesh - void SplitMesh (unsigned int a, aiMesh* pcMesh, - std::vector<std::pair<aiMesh*, unsigned int> >& avList); - - // NOTE: Reuse SplitLargeMeshesProcess_Triangle::UpdateNode() - -public: - //! Triangle limit - unsigned int LIMIT; -}; - -} // end of namespace Assimp - -#endif // !!AI_SPLITLARGEMESHES_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/TextureTransform.cpp b/thirdparty/assimp/code/PostProcessing/TextureTransform.cpp deleted file mode 100644 index 8ae2ba7218..0000000000 --- a/thirdparty/assimp/code/PostProcessing/TextureTransform.cpp +++ /dev/null @@ -1,566 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file A helper class that processes texture transformations */ - - - -#include <assimp/Importer.hpp> -#include <assimp/postprocess.h> -#include <assimp/DefaultLogger.hpp> -#include <assimp/scene.h> - -#include "TextureTransform.h" -#include <assimp/StringUtils.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -TextureTransformStep::TextureTransformStep() : - configFlags() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -TextureTransformStep::~TextureTransformStep() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool TextureTransformStep::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_TransformUVCoords) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Setup properties -void TextureTransformStep::SetupProperties(const Importer* pImp) -{ - configFlags = pImp->GetPropertyInteger(AI_CONFIG_PP_TUV_EVALUATE,AI_UVTRAFO_ALL); -} - -// ------------------------------------------------------------------------------------------------ -void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info) -{ - /* This function tries to simplify the input UV transformation. - * That's very important as it allows us to reduce the number - * of output UV channels. The order in which the transformations - * are applied is - as always - scaling, rotation, translation. - */ - - char szTemp[512]; - int rounded = 0; - - - /* Optimize the rotation angle. That's slightly difficult as - * we have an inprecise floating-point number (when comparing - * UV transformations we'll take that into account by using - * an epsilon of 5 degrees). If there is a rotation value, we can't - * perform any further optimizations. - */ - if (info.mRotation) - { - float out = info.mRotation; - if ((rounded = static_cast<int>((info.mRotation / static_cast<float>(AI_MATH_TWO_PI))))) - { - out -= rounded * static_cast<float>(AI_MATH_PI); - ASSIMP_LOG_INFO_F("Texture coordinate rotation ", info.mRotation, " can be simplified to ", out); - } - - // Next step - convert negative rotation angles to positives - if (out < 0.f) - out = (float)AI_MATH_TWO_PI * 2 + out; - - info.mRotation = out; - return; - } - - - /* Optimize UV translation in the U direction. To determine whether - * or not we can optimize we need to look at the requested mapping - * type (e.g. if mirroring is active there IS a difference between - * offset 2 and 3) - */ - if ((rounded = (int)info.mTranslation.x)) { - float out = 0.0f; - szTemp[0] = 0; - if (aiTextureMapMode_Wrap == info.mapU) { - // Wrap - simple take the fraction of the field - out = info.mTranslation.x-(float)rounded; - ai_snprintf(szTemp, 512, "[w] UV U offset %f can be simplified to %f", info.mTranslation.x, out); - } - else if (aiTextureMapMode_Mirror == info.mapU && 1 != rounded) { - // Mirror - if (rounded % 2) - rounded--; - out = info.mTranslation.x-(float)rounded; - - ai_snprintf(szTemp,512,"[m/d] UV U offset %f can be simplified to %f",info.mTranslation.x,out); - } - else if (aiTextureMapMode_Clamp == info.mapU || aiTextureMapMode_Decal == info.mapU) { - // Clamp - translations beyond 1,1 are senseless - ai_snprintf(szTemp,512,"[c] UV U offset %f can be clamped to 1.0f",info.mTranslation.x); - - out = 1.f; - } - if (szTemp[0]) { - ASSIMP_LOG_INFO(szTemp); - info.mTranslation.x = out; - } - } - - /* Optimize UV translation in the V direction. To determine whether - * or not we can optimize we need to look at the requested mapping - * type (e.g. if mirroring is active there IS a difference between - * offset 2 and 3) - */ - if ((rounded = (int)info.mTranslation.y)) { - float out = 0.0f; - szTemp[0] = 0; - if (aiTextureMapMode_Wrap == info.mapV) { - // Wrap - simple take the fraction of the field - out = info.mTranslation.y-(float)rounded; - ::ai_snprintf(szTemp,512,"[w] UV V offset %f can be simplified to %f",info.mTranslation.y,out); - } - else if (aiTextureMapMode_Mirror == info.mapV && 1 != rounded) { - // Mirror - if (rounded % 2) - rounded--; - out = info.mTranslation.x-(float)rounded; - - ::ai_snprintf(szTemp,512,"[m/d] UV V offset %f can be simplified to %f",info.mTranslation.y,out); - } - else if (aiTextureMapMode_Clamp == info.mapV || aiTextureMapMode_Decal == info.mapV) { - // Clamp - translations beyond 1,1 are senseless - ::ai_snprintf(szTemp,512,"[c] UV V offset %f canbe clamped to 1.0f",info.mTranslation.y); - - out = 1.f; - } - if (szTemp[0]) { - ASSIMP_LOG_INFO(szTemp); - info.mTranslation.y = out; - } - } - return; -} - -// ------------------------------------------------------------------------------------------------ -void UpdateUVIndex(const std::list<TTUpdateInfo>& l, unsigned int n) -{ - // Don't set if == 0 && wasn't set before - for (std::list<TTUpdateInfo>::const_iterator it = l.begin();it != l.end(); ++it) { - const TTUpdateInfo& info = *it; - - if (info.directShortcut) - *info.directShortcut = n; - else if (!n) - { - info.mat->AddProperty<int>((int*)&n,1,AI_MATKEY_UVWSRC(info.semantic,info.index)); - } - } -} - -// ------------------------------------------------------------------------------------------------ -inline const char* MappingModeToChar(aiTextureMapMode map) -{ - if (aiTextureMapMode_Wrap == map) - return "-w"; - - if (aiTextureMapMode_Mirror == map) - return "-m"; - - return "-c"; -} - -// ------------------------------------------------------------------------------------------------ -void TextureTransformStep::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("TransformUVCoordsProcess begin"); - - - /* We build a per-mesh list of texture transformations we'll need - * to apply. To achieve this, we iterate through all materials, - * find all textures and get their transformations and UV indices. - * Then we search for all meshes using this material. - */ - typedef std::list<STransformVecInfo> MeshTrafoList; - std::vector<MeshTrafoList> meshLists(pScene->mNumMeshes); - - for (unsigned int i = 0; i < pScene->mNumMaterials;++i) { - - aiMaterial* mat = pScene->mMaterials[i]; - for (unsigned int a = 0; a < mat->mNumProperties;++a) { - - aiMaterialProperty* prop = mat->mProperties[a]; - if (!::strcmp( prop->mKey.data, "$tex.file")) { - STransformVecInfo info; - - // Setup a shortcut structure to allow for a fast updating - // of the UV index later - TTUpdateInfo update; - update.mat = (aiMaterial*) mat; - update.semantic = prop->mSemantic; - update.index = prop->mIndex; - - // Get textured properties and transform - for (unsigned int a2 = 0; a2 < mat->mNumProperties;++a2) { - aiMaterialProperty* prop2 = mat->mProperties[a2]; - if (prop2->mSemantic != prop->mSemantic || prop2->mIndex != prop->mIndex) { - continue; - } - - if ( !::strcmp( prop2->mKey.data, "$tex.uvwsrc")) { - info.uvIndex = *((int*)prop2->mData); - - // Store a direct pointer for later use - update.directShortcut = (unsigned int*) prop2->mData; - } - - else if ( !::strcmp( prop2->mKey.data, "$tex.mapmodeu")) { - info.mapU = *((aiTextureMapMode*)prop2->mData); - } - else if ( !::strcmp( prop2->mKey.data, "$tex.mapmodev")) { - info.mapV = *((aiTextureMapMode*)prop2->mData); - } - else if ( !::strcmp( prop2->mKey.data, "$tex.uvtrafo")) { - // ValidateDS should check this - ai_assert(prop2->mDataLength >= 20); - ::memcpy(&info.mTranslation.x,prop2->mData,sizeof(float)*5); - - // Directly remove this property from the list - mat->mNumProperties--; - for (unsigned int a3 = a2; a3 < mat->mNumProperties;++a3) { - mat->mProperties[a3] = mat->mProperties[a3+1]; - } - - delete prop2; - - // Warn: could be an underflow, but this does not invoke undefined behaviour - --a2; - } - } - - // Find out which transformations are to be evaluated - if (!(configFlags & AI_UVTRAFO_ROTATION)) { - info.mRotation = 0.f; - } - if (!(configFlags & AI_UVTRAFO_SCALING)) { - info.mScaling = aiVector2D(1.f,1.f); - } - if (!(configFlags & AI_UVTRAFO_TRANSLATION)) { - info.mTranslation = aiVector2D(0.f,0.f); - } - - // Do some preprocessing - PreProcessUVTransform(info); - info.uvIndex = std::min(info.uvIndex,AI_MAX_NUMBER_OF_TEXTURECOORDS -1u); - - // Find out whether this material is used by more than - // one mesh. This will make our task much, much more difficult! - unsigned int cnt = 0; - for (unsigned int n = 0; n < pScene->mNumMeshes;++n) { - if (pScene->mMeshes[n]->mMaterialIndex == i) - ++cnt; - } - - if (!cnt) - continue; - else if (1 != cnt) { - // This material is referenced by more than one mesh! - // So we need to make sure the UV index for the texture - // is identical for each of it ... - info.lockedPos = AI_TT_UV_IDX_LOCK_TBD; - } - - // Get all corresponding meshes - for (unsigned int n = 0; n < pScene->mNumMeshes;++n) { - aiMesh* mesh = pScene->mMeshes[n]; - if (mesh->mMaterialIndex != i || !mesh->mTextureCoords[0]) - continue; - - unsigned int uv = info.uvIndex; - if (!mesh->mTextureCoords[uv]) { - // If the requested UV index is not available, take the first one instead. - uv = 0; - } - - if (mesh->mNumUVComponents[info.uvIndex] >= 3){ - ASSIMP_LOG_WARN("UV transformations on 3D mapping channels are not supported"); - continue; - } - - MeshTrafoList::iterator it; - - // Check whether we have this transform setup already - for (it = meshLists[n].begin();it != meshLists[n].end(); ++it) { - - if ((*it) == info && (*it).uvIndex == uv) { - (*it).updateList.push_back(update); - break; - } - } - - if (it == meshLists[n].end()) { - meshLists[n].push_back(info); - meshLists[n].back().uvIndex = uv; - meshLists[n].back().updateList.push_back(update); - } - } - } - } - } - - char buffer[1024]; // should be sufficiently large - unsigned int outChannels = 0, inChannels = 0, transformedChannels = 0; - - // Now process all meshes. Important: we don't remove unreferenced UV channels. - // This is a job for the RemoveUnreferencedData-Step. - for (unsigned int q = 0; q < pScene->mNumMeshes;++q) { - - aiMesh* mesh = pScene->mMeshes[q]; - MeshTrafoList& trafo = meshLists[q]; - - inChannels += mesh->GetNumUVChannels(); - - if (!mesh->mTextureCoords[0] || trafo.empty() || (trafo.size() == 1 && trafo.begin()->IsUntransformed())) { - outChannels += mesh->GetNumUVChannels(); - continue; - } - - // Move untransformed UV channels to the first position in the list .... - // except if we need a new locked index which should be as small as possible - bool veto = false, need = false; - unsigned int cnt = 0; - unsigned int untransformed = 0; - - MeshTrafoList::iterator it,it2; - for (it = trafo.begin();it != trafo.end(); ++it,++cnt) { - - if (!(*it).IsUntransformed()) { - need = true; - } - - if ((*it).lockedPos == AI_TT_UV_IDX_LOCK_TBD) { - // Lock this index and make sure it won't be changed - (*it).lockedPos = cnt; - veto = true; - continue; - } - - if (!veto && it != trafo.begin() && (*it).IsUntransformed()) { - for (it2 = trafo.begin();it2 != it; ++it2) { - if (!(*it2).IsUntransformed()) - break; - } - trafo.insert(it2,*it); - trafo.erase(it); - break; - } - } - if (!need) - continue; - - // Find all that are not at their 'locked' position and move them to it. - // Conflicts are possible but quite unlikely. - cnt = 0; - for (it = trafo.begin();it != trafo.end(); ++it,++cnt) { - if ((*it).lockedPos != AI_TT_UV_IDX_LOCK_NONE && (*it).lockedPos != cnt) { - it2 = trafo.begin();unsigned int t = 0; - while (t != (*it).lockedPos) - ++it2; - - if ((*it2).lockedPos != AI_TT_UV_IDX_LOCK_NONE) { - ASSIMP_LOG_ERROR("Channel mismatch, can't compute all transformations properly [design bug]"); - continue; - } - - std::swap(*it2,*it); - if ((*it).lockedPos == untransformed) - untransformed = cnt; - } - } - - // ... and add dummies for all unreferenced channels - // at the end of the list - bool ref[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - for (unsigned int n = 0; n < AI_MAX_NUMBER_OF_TEXTURECOORDS;++n) - ref[n] = (!mesh->mTextureCoords[n] ? true : false); - - for (it = trafo.begin();it != trafo.end(); ++it) - ref[(*it).uvIndex] = true; - - for (unsigned int n = 0; n < AI_MAX_NUMBER_OF_TEXTURECOORDS;++n) { - if (ref[n]) - continue; - trafo.push_back(STransformVecInfo()); - trafo.back().uvIndex = n; - } - - // Then check whether this list breaks the channel limit. - // The unimportant ones are at the end of the list, so - // it shouldn't be too worse if we remove them. - unsigned int size = (unsigned int)trafo.size(); - if (size > AI_MAX_NUMBER_OF_TEXTURECOORDS) { - - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_ERROR_F(static_cast<unsigned int>(trafo.size()), " UV channels required but just ", - AI_MAX_NUMBER_OF_TEXTURECOORDS, " available"); - } - size = AI_MAX_NUMBER_OF_TEXTURECOORDS; - } - - - aiVector3D* old[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - for (unsigned int n = 0; n < AI_MAX_NUMBER_OF_TEXTURECOORDS;++n) - old[n] = mesh->mTextureCoords[n]; - - // Now continue and generate the output channels. Channels - // that we're not going to need later can be overridden. - it = trafo.begin(); - for (unsigned int n = 0; n < trafo.size();++n,++it) { - - if (n >= size) { - // Try to use an untransformed channel for all channels we threw over board - UpdateUVIndex((*it).updateList,untransformed); - continue; - } - - outChannels++; - - // Write to the log - if (!DefaultLogger::isNullLogger()) { - ::ai_snprintf(buffer,1024,"Mesh %u, channel %u: t(%.3f,%.3f), s(%.3f,%.3f), r(%.3f), %s%s", - q,n, - (*it).mTranslation.x, - (*it).mTranslation.y, - (*it).mScaling.x, - (*it).mScaling.y, - AI_RAD_TO_DEG( (*it).mRotation), - MappingModeToChar ((*it).mapU), - MappingModeToChar ((*it).mapV)); - - ASSIMP_LOG_INFO(buffer); - } - - // Check whether we need a new buffer here - if (mesh->mTextureCoords[n]) { - - it2 = it;++it2; - for (unsigned int m = n+1; m < size;++m, ++it2) { - - if ((*it2).uvIndex == n){ - it2 = trafo.begin(); - break; - } - } - if (it2 == trafo.begin()){ - mesh->mTextureCoords[n] = new aiVector3D[mesh->mNumVertices]; - } - } - else mesh->mTextureCoords[n] = new aiVector3D[mesh->mNumVertices]; - - aiVector3D* src = old[(*it).uvIndex]; - aiVector3D* dest, *end; - dest = mesh->mTextureCoords[n]; - - ai_assert(NULL != src); - - // Copy the data to the destination array - if (dest != src) - ::memcpy(dest,src,sizeof(aiVector3D)*mesh->mNumVertices); - - end = dest + mesh->mNumVertices; - - // Build a transformation matrix and transform all UV coords with it - if (!(*it).IsUntransformed()) { - const aiVector2D& trl = (*it).mTranslation; - const aiVector2D& scl = (*it).mScaling; - - // fixme: simplify .. - ++transformedChannels; - aiMatrix3x3 matrix; - - aiMatrix3x3 m2,m3,m4,m5; - - m4.a1 = scl.x; - m4.b2 = scl.y; - - m2.a3 = m2.b3 = 0.5f; - m3.a3 = m3.b3 = -0.5f; - - if ((*it).mRotation > AI_TT_ROTATION_EPSILON ) - aiMatrix3x3::RotationZ((*it).mRotation,matrix); - - m5.a3 += trl.x; m5.b3 += trl.y; - matrix = m2 * m4 * matrix * m3 * m5; - - for (src = dest; src != end; ++src) { /* manual homogenious divide */ - src->z = 1.f; - *src = matrix * *src; - src->x /= src->z; - src->y /= src->z; - src->z = 0.f; - } - } - - // Update all UV indices - UpdateUVIndex((*it).updateList,n); - } - } - - // Print some detailed statistics into the log - if (!DefaultLogger::isNullLogger()) { - - if (transformedChannels) { - ASSIMP_LOG_INFO_F("TransformUVCoordsProcess end: ", outChannels, " output channels (in: ", inChannels, ", modified: ", transformedChannels,")"); - } else { - ASSIMP_LOG_DEBUG("TransformUVCoordsProcess finished"); - } - } -} - - diff --git a/thirdparty/assimp/code/PostProcessing/TextureTransform.h b/thirdparty/assimp/code/PostProcessing/TextureTransform.h deleted file mode 100644 index 2a5d623d7f..0000000000 --- a/thirdparty/assimp/code/PostProcessing/TextureTransform.h +++ /dev/null @@ -1,232 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Definition of a helper step that processes texture transformations */ -#ifndef AI_TEXTURE_TRANSFORM_H_INCLUDED -#define AI_TEXTURE_TRANSFORM_H_INCLUDED - -#include <assimp/BaseImporter.h> -#include "Common/BaseProcess.h" - -#include <assimp/material.h> -#include <list> - -struct aiNode; -struct aiMaterial; - -namespace Assimp { - -#define AI_TT_UV_IDX_LOCK_TBD 0xffffffff -#define AI_TT_UV_IDX_LOCK_NONE 0xeeeeeeee - - -#define AI_TT_ROTATION_EPSILON ((float)AI_DEG_TO_RAD(0.5)) - -// --------------------------------------------------------------------------- -/** Small helper structure representing a shortcut into the material list - * to be able to update some values quickly. -*/ -struct TTUpdateInfo { - TTUpdateInfo() AI_NO_EXCEPT - : directShortcut(nullptr) - , mat(nullptr) - , semantic(0) - , index(0) { - // empty - } - - //! Direct shortcut, if available - unsigned int* directShortcut; - - //! Material - aiMaterial *mat; - - //! Texture type and index - unsigned int semantic, index; -}; - - -// --------------------------------------------------------------------------- -/** Helper class representing texture coordinate transformations -*/ -struct STransformVecInfo : public aiUVTransform { - STransformVecInfo() AI_NO_EXCEPT - : uvIndex(0) - , mapU(aiTextureMapMode_Wrap) - , mapV(aiTextureMapMode_Wrap) - , lockedPos(AI_TT_UV_IDX_LOCK_NONE) { - // empty - } - - //! Source texture coordinate index - unsigned int uvIndex; - - //! Texture mapping mode in the u, v direction - aiTextureMapMode mapU,mapV; - - //! Locked destination UV index - //! AI_TT_UV_IDX_LOCK_TBD - to be determined - //! AI_TT_UV_IDX_LOCK_NONE - none (default) - unsigned int lockedPos; - - //! Update info - shortcuts into all materials - //! that are referencing this transform setup - std::list<TTUpdateInfo> updateList; - - - // ------------------------------------------------------------------- - /** Compare two transform setups - */ - inline bool operator== (const STransformVecInfo& other) const - { - // We use a small epsilon here - const static float epsilon = 0.05f; - - if (std::fabs( mTranslation.x - other.mTranslation.x ) > epsilon || - std::fabs( mTranslation.y - other.mTranslation.y ) > epsilon) - { - return false; - } - - if (std::fabs( mScaling.x - other.mScaling.x ) > epsilon || - std::fabs( mScaling.y - other.mScaling.y ) > epsilon) - { - return false; - } - - if (std::fabs( mRotation - other.mRotation) > epsilon) - { - return false; - } - return true; - } - - inline bool operator!= (const STransformVecInfo& other) const - { - return !(*this == other); - } - - - // ------------------------------------------------------------------- - /** Returns whether this is an untransformed texture coordinate set - */ - inline bool IsUntransformed() const - { - return (1.0f == mScaling.x && 1.f == mScaling.y && - !mTranslation.x && !mTranslation.y && - mRotation < AI_TT_ROTATION_EPSILON); - } - - // ------------------------------------------------------------------- - /** Build a 3x3 matrix from the transformations - */ - inline void GetMatrix(aiMatrix3x3& mOut) - { - mOut = aiMatrix3x3(); - - if (1.0f != mScaling.x || 1.0f != mScaling.y) - { - aiMatrix3x3 mScale; - mScale.a1 = mScaling.x; - mScale.b2 = mScaling.y; - mOut = mScale; - } - if (mRotation) - { - aiMatrix3x3 mRot; - mRot.a1 = mRot.b2 = std::cos(mRotation); - mRot.a2 = mRot.b1 = std::sin(mRotation); - mRot.a2 = -mRot.a2; - mOut *= mRot; - } - if (mTranslation.x || mTranslation.y) - { - aiMatrix3x3 mTrans; - mTrans.a3 = mTranslation.x; - mTrans.b3 = mTranslation.y; - mOut *= mTrans; - } - } -}; - - -// --------------------------------------------------------------------------- -/** Helper step to compute final UV coordinate sets if there are scalings - * or rotations in the original data read from the file. -*/ -class TextureTransformStep : public BaseProcess -{ -public: - - TextureTransformStep(); - ~TextureTransformStep(); - -public: - - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - void SetupProperties(const Importer* pImp); - - -protected: - - - // ------------------------------------------------------------------- - /** Preprocess a specific UV transformation setup - * - * @param info Transformation setup to be preprocessed. - */ - void PreProcessUVTransform(STransformVecInfo& info); - -private: - - unsigned int configFlags; -}; - -} - -#endif //! AI_TEXTURE_TRANSFORM_H_INCLUDED diff --git a/thirdparty/assimp/code/PostProcessing/TriangulateProcess.cpp b/thirdparty/assimp/code/PostProcessing/TriangulateProcess.cpp deleted file mode 100644 index 1040836bbe..0000000000 --- a/thirdparty/assimp/code/PostProcessing/TriangulateProcess.cpp +++ /dev/null @@ -1,530 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file TriangulateProcess.cpp - * @brief Implementation of the post processing step to split up - * all faces with more than three indices into triangles. - * - * - * The triangulation algorithm will handle concave or convex polygons. - * Self-intersecting or non-planar polygons are not rejected, but - * they're probably not triangulated correctly. - * - * DEBUG SWITCHES - do not enable any of them in release builds: - * - * AI_BUILD_TRIANGULATE_COLOR_FACE_WINDING - * - generates vertex colors to represent the face winding order. - * the first vertex of a polygon becomes red, the last blue. - * AI_BUILD_TRIANGULATE_DEBUG_POLYS - * - dump all polygons and their triangulation sequences to - * a file - */ -#ifndef ASSIMP_BUILD_NO_TRIANGULATE_PROCESS - -#include "PostProcessing/TriangulateProcess.h" -#include "PostProcessing/ProcessHelper.h" -#include "Common/PolyTools.h" - -#include <memory> - -//#define AI_BUILD_TRIANGULATE_COLOR_FACE_WINDING -//#define AI_BUILD_TRIANGULATE_DEBUG_POLYS - -#define POLY_GRID_Y 40 -#define POLY_GRID_X 70 -#define POLY_GRID_XPAD 20 -#define POLY_OUTPUT_FILE "assimp_polygons_debug.txt" - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -TriangulateProcess::TriangulateProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -TriangulateProcess::~TriangulateProcess() -{ - // nothing to do here -} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool TriangulateProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_Triangulate) != 0; -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void TriangulateProcess::Execute( aiScene* pScene) -{ - ASSIMP_LOG_DEBUG("TriangulateProcess begin"); - - bool bHas = false; - for( unsigned int a = 0; a < pScene->mNumMeshes; a++) - { - if (pScene->mMeshes[ a ]) { - if ( TriangulateMesh( pScene->mMeshes[ a ] ) ) { - bHas = true; - } - } - } - if ( bHas ) { - ASSIMP_LOG_INFO( "TriangulateProcess finished. All polygons have been triangulated." ); - } else { - ASSIMP_LOG_DEBUG( "TriangulateProcess finished. There was nothing to be done." ); - } -} - -// ------------------------------------------------------------------------------------------------ -// Triangulates the given mesh. -bool TriangulateProcess::TriangulateMesh( aiMesh* pMesh) -{ - // Now we have aiMesh::mPrimitiveTypes, so this is only here for test cases - if (!pMesh->mPrimitiveTypes) { - bool bNeed = false; - - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) { - const aiFace& face = pMesh->mFaces[a]; - - if( face.mNumIndices != 3) { - bNeed = true; - } - } - if (!bNeed) - return false; - } - else if (!(pMesh->mPrimitiveTypes & aiPrimitiveType_POLYGON)) { - return false; - } - - // Find out how many output faces we'll get - unsigned int numOut = 0, max_out = 0; - bool get_normals = true; - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) { - aiFace& face = pMesh->mFaces[a]; - if (face.mNumIndices <= 4) { - get_normals = false; - } - if( face.mNumIndices <= 3) { - numOut++; - - } - else { - numOut += face.mNumIndices-2; - max_out = std::max(max_out,face.mNumIndices); - } - } - - // Just another check whether aiMesh::mPrimitiveTypes is correct - ai_assert(numOut != pMesh->mNumFaces); - - aiVector3D* nor_out = NULL; - - // if we don't have normals yet, but expect them to be a cheap side - // product of triangulation anyway, allocate storage for them. - if (!pMesh->mNormals && get_normals) { - // XXX need a mechanism to inform the GenVertexNormals process to treat these normals as preprocessed per-face normals - // nor_out = pMesh->mNormals = new aiVector3D[pMesh->mNumVertices]; - } - - // the output mesh will contain triangles, but no polys anymore - pMesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE; - pMesh->mPrimitiveTypes &= ~aiPrimitiveType_POLYGON; - - aiFace* out = new aiFace[numOut](), *curOut = out; - std::vector<aiVector3D> temp_verts3d(max_out+2); /* temporary storage for vertices */ - std::vector<aiVector2D> temp_verts(max_out+2); - - // Apply vertex colors to represent the face winding? -#ifdef AI_BUILD_TRIANGULATE_COLOR_FACE_WINDING - if (!pMesh->mColors[0]) - pMesh->mColors[0] = new aiColor4D[pMesh->mNumVertices]; - else - new(pMesh->mColors[0]) aiColor4D[pMesh->mNumVertices]; - - aiColor4D* clr = pMesh->mColors[0]; -#endif - -#ifdef AI_BUILD_TRIANGULATE_DEBUG_POLYS - FILE* fout = fopen(POLY_OUTPUT_FILE,"a"); -#endif - - const aiVector3D* verts = pMesh->mVertices; - - // use std::unique_ptr to avoid slow std::vector<bool> specialiations - std::unique_ptr<bool[]> done(new bool[max_out]); - for( unsigned int a = 0; a < pMesh->mNumFaces; a++) { - aiFace& face = pMesh->mFaces[a]; - - unsigned int* idx = face.mIndices; - int num = (int)face.mNumIndices, ear = 0, tmp, prev = num-1, next = 0, max = num; - - // Apply vertex colors to represent the face winding? -#ifdef AI_BUILD_TRIANGULATE_COLOR_FACE_WINDING - for (unsigned int i = 0; i < face.mNumIndices; ++i) { - aiColor4D& c = clr[idx[i]]; - c.r = (i+1) / (float)max; - c.b = 1.f - c.r; - } -#endif - - aiFace* const last_face = curOut; - - // if it's a simple point,line or triangle: just copy it - if( face.mNumIndices <= 3) - { - aiFace& nface = *curOut++; - nface.mNumIndices = face.mNumIndices; - nface.mIndices = face.mIndices; - - face.mIndices = NULL; - continue; - } - // optimized code for quadrilaterals - else if ( face.mNumIndices == 4) { - - // quads can have at maximum one concave vertex. Determine - // this vertex (if it exists) and start tri-fanning from - // it. - unsigned int start_vertex = 0; - for (unsigned int i = 0; i < 4; ++i) { - const aiVector3D& v0 = verts[face.mIndices[(i+3) % 4]]; - const aiVector3D& v1 = verts[face.mIndices[(i+2) % 4]]; - const aiVector3D& v2 = verts[face.mIndices[(i+1) % 4]]; - - const aiVector3D& v = verts[face.mIndices[i]]; - - aiVector3D left = (v0-v); - aiVector3D diag = (v1-v); - aiVector3D right = (v2-v); - - left.Normalize(); - diag.Normalize(); - right.Normalize(); - - const float angle = std::acos(left*diag) + std::acos(right*diag); - if (angle > AI_MATH_PI_F) { - // this is the concave point - start_vertex = i; - break; - } - } - - const unsigned int temp[] = {face.mIndices[0], face.mIndices[1], face.mIndices[2], face.mIndices[3]}; - - aiFace& nface = *curOut++; - nface.mNumIndices = 3; - nface.mIndices = face.mIndices; - - nface.mIndices[0] = temp[start_vertex]; - nface.mIndices[1] = temp[(start_vertex + 1) % 4]; - nface.mIndices[2] = temp[(start_vertex + 2) % 4]; - - aiFace& sface = *curOut++; - sface.mNumIndices = 3; - sface.mIndices = new unsigned int[3]; - - sface.mIndices[0] = temp[start_vertex]; - sface.mIndices[1] = temp[(start_vertex + 2) % 4]; - sface.mIndices[2] = temp[(start_vertex + 3) % 4]; - - // prevent double deletion of the indices field - face.mIndices = NULL; - continue; - } - else - { - // A polygon with more than 3 vertices can be either concave or convex. - // Usually everything we're getting is convex and we could easily - // triangulate by tri-fanning. However, LightWave is probably the only - // modeling suite to make extensive use of highly concave, monster polygons ... - // so we need to apply the full 'ear cutting' algorithm to get it right. - - // RERQUIREMENT: polygon is expected to be simple and *nearly* planar. - // We project it onto a plane to get a 2d triangle. - - // Collect all vertices of of the polygon. - for (tmp = 0; tmp < max; ++tmp) { - temp_verts3d[tmp] = verts[idx[tmp]]; - } - - // Get newell normal of the polygon. Store it for future use if it's a polygon-only mesh - aiVector3D n; - NewellNormal<3,3,3>(n,max,&temp_verts3d.front().x,&temp_verts3d.front().y,&temp_verts3d.front().z); - if (nor_out) { - for (tmp = 0; tmp < max; ++tmp) - nor_out[idx[tmp]] = n; - } - - // Select largest normal coordinate to ignore for projection - const float ax = (n.x>0 ? n.x : -n.x); - const float ay = (n.y>0 ? n.y : -n.y); - const float az = (n.z>0 ? n.z : -n.z); - - unsigned int ac = 0, bc = 1; /* no z coord. projection to xy */ - float inv = n.z; - if (ax > ay) { - if (ax > az) { /* no x coord. projection to yz */ - ac = 1; bc = 2; - inv = n.x; - } - } - else if (ay > az) { /* no y coord. projection to zy */ - ac = 2; bc = 0; - inv = n.y; - } - - // Swap projection axes to take the negated projection vector into account - if (inv < 0.f) { - std::swap(ac,bc); - } - - for (tmp =0; tmp < max; ++tmp) { - temp_verts[tmp].x = verts[idx[tmp]][ac]; - temp_verts[tmp].y = verts[idx[tmp]][bc]; - done[tmp] = false; - } - -#ifdef AI_BUILD_TRIANGULATE_DEBUG_POLYS - // plot the plane onto which we mapped the polygon to a 2D ASCII pic - aiVector2D bmin,bmax; - ArrayBounds(&temp_verts[0],max,bmin,bmax); - - char grid[POLY_GRID_Y][POLY_GRID_X+POLY_GRID_XPAD]; - std::fill_n((char*)grid,POLY_GRID_Y*(POLY_GRID_X+POLY_GRID_XPAD),' '); - - for (int i =0; i < max; ++i) { - const aiVector2D& v = (temp_verts[i] - bmin) / (bmax-bmin); - const size_t x = static_cast<size_t>(v.x*(POLY_GRID_X-1)), y = static_cast<size_t>(v.y*(POLY_GRID_Y-1)); - char* loc = grid[y]+x; - if (grid[y][x] != ' ') { - for(;*loc != ' '; ++loc); - *loc++ = '_'; - } - *(loc+::ai_snprintf(loc, POLY_GRID_XPAD,"%i",i)) = ' '; - } - - - for(size_t y = 0; y < POLY_GRID_Y; ++y) { - grid[y][POLY_GRID_X+POLY_GRID_XPAD-1] = '\0'; - fprintf(fout,"%s\n",grid[y]); - } - - fprintf(fout,"\ntriangulation sequence: "); -#endif - - // - // FIXME: currently this is the slow O(kn) variant with a worst case - // complexity of O(n^2) (I think). Can be done in O(n). - while (num > 3) { - - // Find the next ear of the polygon - int num_found = 0; - for (ear = next;;prev = ear,ear = next) { - - // break after we looped two times without a positive match - for (next=ear+1;done[(next>=max?next=0:next)];++next); - if (next < ear) { - if (++num_found == 2) { - break; - } - } - const aiVector2D* pnt1 = &temp_verts[ear], - *pnt0 = &temp_verts[prev], - *pnt2 = &temp_verts[next]; - - // Must be a convex point. Assuming ccw winding, it must be on the right of the line between p-1 and p+1. - if (OnLeftSideOfLine2D(*pnt0,*pnt2,*pnt1)) { - continue; - } - - // and no other point may be contained in this triangle - for ( tmp = 0; tmp < max; ++tmp) { - - // We need to compare the actual values because it's possible that multiple indexes in - // the polygon are referring to the same position. concave_polygon.obj is a sample - // - // FIXME: Use 'epsiloned' comparisons instead? Due to numeric inaccuracies in - // PointInTriangle() I'm guessing that it's actually possible to construct - // input data that would cause us to end up with no ears. The problem is, - // which epsilon? If we chose a too large value, we'd get wrong results - const aiVector2D& vtmp = temp_verts[tmp]; - if ( vtmp != *pnt1 && vtmp != *pnt2 && vtmp != *pnt0 && PointInTriangle2D(*pnt0,*pnt1,*pnt2,vtmp)) { - break; - } - } - if (tmp != max) { - continue; - } - - // this vertex is an ear - break; - } - if (num_found == 2) { - - // Due to the 'two ear theorem', every simple polygon with more than three points must - // have 2 'ears'. Here's definitely something wrong ... but we don't give up yet. - // - - // Instead we're continuing with the standard tri-fanning algorithm which we'd - // use if we had only convex polygons. That's life. - ASSIMP_LOG_ERROR("Failed to triangulate polygon (no ear found). Probably not a simple polygon?"); - -#ifdef AI_BUILD_TRIANGULATE_DEBUG_POLYS - fprintf(fout,"critical error here, no ear found! "); -#endif - num = 0; - break; - - curOut -= (max-num); /* undo all previous work */ - for (tmp = 0; tmp < max-2; ++tmp) { - aiFace& nface = *curOut++; - - nface.mNumIndices = 3; - if (!nface.mIndices) - nface.mIndices = new unsigned int[3]; - - nface.mIndices[0] = 0; - nface.mIndices[1] = tmp+1; - nface.mIndices[2] = tmp+2; - - } - num = 0; - break; - } - - aiFace& nface = *curOut++; - nface.mNumIndices = 3; - - if (!nface.mIndices) { - nface.mIndices = new unsigned int[3]; - } - - // setup indices for the new triangle ... - nface.mIndices[0] = prev; - nface.mIndices[1] = ear; - nface.mIndices[2] = next; - - // exclude the ear from most further processing - done[ear] = true; - --num; - } - if (num > 0) { - // We have three indices forming the last 'ear' remaining. Collect them. - aiFace& nface = *curOut++; - nface.mNumIndices = 3; - if (!nface.mIndices) { - nface.mIndices = new unsigned int[3]; - } - - for (tmp = 0; done[tmp]; ++tmp); - nface.mIndices[0] = tmp; - - for (++tmp; done[tmp]; ++tmp); - nface.mIndices[1] = tmp; - - for (++tmp; done[tmp]; ++tmp); - nface.mIndices[2] = tmp; - - } - } - -#ifdef AI_BUILD_TRIANGULATE_DEBUG_POLYS - - for(aiFace* f = last_face; f != curOut; ++f) { - unsigned int* i = f->mIndices; - fprintf(fout," (%i %i %i)",i[0],i[1],i[2]); - } - - fprintf(fout,"\n*********************************************************************\n"); - fflush(fout); - -#endif - - for(aiFace* f = last_face; f != curOut; ) { - unsigned int* i = f->mIndices; - - // drop dumb 0-area triangles - deactivated for now: - //FindDegenerates post processing step can do the same thing - //if (std::fabs(GetArea2D(temp_verts[i[0]],temp_verts[i[1]],temp_verts[i[2]])) < 1e-5f) { - // ASSIMP_LOG_DEBUG("Dropping triangle with area 0"); - // --curOut; - - // delete[] f->mIndices; - // f->mIndices = nullptr; - - // for(aiFace* ff = f; ff != curOut; ++ff) { - // ff->mNumIndices = (ff+1)->mNumIndices; - // ff->mIndices = (ff+1)->mIndices; - // (ff+1)->mIndices = nullptr; - // } - // continue; - //} - - i[0] = idx[i[0]]; - i[1] = idx[i[1]]; - i[2] = idx[i[2]]; - ++f; - } - - delete[] face.mIndices; - face.mIndices = NULL; - } - -#ifdef AI_BUILD_TRIANGULATE_DEBUG_POLYS - fclose(fout); -#endif - - // kill the old faces - delete [] pMesh->mFaces; - - // ... and store the new ones - pMesh->mFaces = out; - pMesh->mNumFaces = (unsigned int)(curOut-out); /* not necessarily equal to numOut */ - return true; -} - -#endif // !! ASSIMP_BUILD_NO_TRIANGULATE_PROCESS diff --git a/thirdparty/assimp/code/PostProcessing/TriangulateProcess.h b/thirdparty/assimp/code/PostProcessing/TriangulateProcess.h deleted file mode 100644 index 916b5103dd..0000000000 --- a/thirdparty/assimp/code/PostProcessing/TriangulateProcess.h +++ /dev/null @@ -1,91 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a post processing step to triangulate all faces - with more than three vertices. - */ -#ifndef AI_TRIANGULATEPROCESS_H_INC -#define AI_TRIANGULATEPROCESS_H_INC - -#include "Common/BaseProcess.h" - -struct aiMesh; - -class TriangulateProcessTest; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** The TriangulateProcess splits up all faces with more than three indices - * into triangles. You usually want this to happen because the graphics cards - * need their data as triangles. - */ -class ASSIMP_API TriangulateProcess : public BaseProcess { -public: - TriangulateProcess(); - ~TriangulateProcess(); - - // ------------------------------------------------------------------- - /** Returns whether the processing step is present in the given flag field. - * @param pFlags The processing flags the importer was called with. A bitwise - * combination of #aiPostProcessSteps. - * @return true if the process is present in this flag fields, false if not. - */ - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Executes the post processing step on the given imported data. - * At the moment a process is not supposed to fail. - * @param pScene The imported data to work at. - */ - void Execute( aiScene* pScene); - - // ------------------------------------------------------------------- - /** Triangulates the given mesh. - * @param pMesh The mesh to triangulate. - */ - bool TriangulateMesh( aiMesh* pMesh); -}; - -} // end of namespace Assimp - -#endif // AI_TRIANGULATEPROCESS_H_INC diff --git a/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.cpp b/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.cpp deleted file mode 100644 index 75d1b6ef78..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.cpp +++ /dev/null @@ -1,1034 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file ValidateDataStructure.cpp - * @brief Implementation of the post processing step to validate - * the data structure returned by Assimp. - */ - -// internal headers -#include "ValidateDataStructure.h" -#include <assimp/BaseImporter.h> -#include <assimp/fast_atof.h> -#include "ProcessHelper.h" -#include <memory> - -// CRT headers -#include <stdarg.h> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -// Constructor to be privately used by Importer -ValidateDSProcess::ValidateDSProcess() : - mScene() -{} - -// ------------------------------------------------------------------------------------------------ -// Destructor, private as well -ValidateDSProcess::~ValidateDSProcess() -{} - -// ------------------------------------------------------------------------------------------------ -// Returns whether the processing step is present in the given flag field. -bool ValidateDSProcess::IsActive( unsigned int pFlags) const -{ - return (pFlags & aiProcess_ValidateDataStructure) != 0; -} -// ------------------------------------------------------------------------------------------------ -AI_WONT_RETURN void ValidateDSProcess::ReportError(const char* msg,...) -{ - ai_assert(NULL != msg); - - va_list args; - va_start(args,msg); - - char szBuffer[3000]; - const int iLen = vsprintf(szBuffer,msg,args); - ai_assert(iLen > 0); - - va_end(args); - - throw DeadlyImportError("Validation failed: " + std::string(szBuffer,iLen)); -} -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::ReportWarning(const char* msg,...) -{ - ai_assert(NULL != msg); - - va_list args; - va_start(args,msg); - - char szBuffer[3000]; - const int iLen = vsprintf(szBuffer,msg,args); - ai_assert(iLen > 0); - - va_end(args); - ASSIMP_LOG_WARN("Validation warning: " + std::string(szBuffer,iLen)); -} - -// ------------------------------------------------------------------------------------------------ -inline -int HasNameMatch(const aiString& in, aiNode* node) { - int result = (node->mName == in ? 1 : 0 ); - for (unsigned int i = 0; i < node->mNumChildren;++i) { - result += HasNameMatch(in,node->mChildren[i]); - } - return result; -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -void ValidateDSProcess::DoValidation(T** parray, unsigned int size, const char* firstName, const char* secondName) { - // validate all entries - if (size) - { - if (!parray) - { - ReportError("aiScene::%s is NULL (aiScene::%s is %i)", - firstName, secondName, size); - } - for (unsigned int i = 0; i < size;++i) - { - if (!parray[i]) - { - ReportError("aiScene::%s[%i] is NULL (aiScene::%s is %i)", - firstName,i,secondName,size); - } - Validate(parray[i]); - } - } -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline void ValidateDSProcess::DoValidationEx(T** parray, unsigned int size, - const char* firstName, const char* secondName) -{ - // validate all entries - if (size) - { - if (!parray) { - ReportError("aiScene::%s is NULL (aiScene::%s is %i)", - firstName, secondName, size); - } - for (unsigned int i = 0; i < size;++i) - { - if (!parray[i]) - { - ReportError("aiScene::%s[%u] is NULL (aiScene::%s is %u)", - firstName,i,secondName,size); - } - Validate(parray[i]); - - // check whether there are duplicate names - for (unsigned int a = i+1; a < size;++a) - { - if (parray[i]->mName == parray[a]->mName) - { - ReportError("aiScene::%s[%u] has the same name as " - "aiScene::%s[%u]",firstName, i,secondName, a); - } - } - } - } -} - -// ------------------------------------------------------------------------------------------------ -template <typename T> -inline -void ValidateDSProcess::DoValidationWithNameCheck(T** array, unsigned int size, const char* firstName, - const char* secondName) { - // validate all entries - DoValidationEx(array,size,firstName,secondName); - - for (unsigned int i = 0; i < size;++i) { - int res = HasNameMatch(array[i]->mName,mScene->mRootNode); - if (0 == res) { - const std::string name = static_cast<char*>(array[i]->mName.data); - ReportError("aiScene::%s[%i] has no corresponding node in the scene graph (%s)", - firstName,i, name.c_str()); - } else if (1 != res) { - const std::string name = static_cast<char*>(array[i]->mName.data); - ReportError("aiScene::%s[%i]: there are more than one nodes with %s as name", - firstName,i, name.c_str()); - } - } -} - -// ------------------------------------------------------------------------------------------------ -// Executes the post processing step on the given imported data. -void ValidateDSProcess::Execute( aiScene* pScene) { - mScene = pScene; - ASSIMP_LOG_DEBUG("ValidateDataStructureProcess begin"); - - // validate the node graph of the scene - Validate(pScene->mRootNode); - - // validate all meshes - if (pScene->mNumMeshes) { - DoValidation(pScene->mMeshes,pScene->mNumMeshes,"mMeshes","mNumMeshes"); - } - else if (!(mScene->mFlags & AI_SCENE_FLAGS_INCOMPLETE)) { - ReportError("aiScene::mNumMeshes is 0. At least one mesh must be there"); - } - else if (pScene->mMeshes) { - ReportError("aiScene::mMeshes is non-null although there are no meshes"); - } - - // validate all animations - if (pScene->mNumAnimations) { - DoValidation(pScene->mAnimations,pScene->mNumAnimations, - "mAnimations","mNumAnimations"); - } - else if (pScene->mAnimations) { - ReportError("aiScene::mAnimations is non-null although there are no animations"); - } - - // validate all cameras - if (pScene->mNumCameras) { - DoValidationWithNameCheck(pScene->mCameras,pScene->mNumCameras, - "mCameras","mNumCameras"); - } - else if (pScene->mCameras) { - ReportError("aiScene::mCameras is non-null although there are no cameras"); - } - - // validate all lights - if (pScene->mNumLights) { - DoValidationWithNameCheck(pScene->mLights,pScene->mNumLights, - "mLights","mNumLights"); - } - else if (pScene->mLights) { - ReportError("aiScene::mLights is non-null although there are no lights"); - } - - // validate all textures - if (pScene->mNumTextures) { - DoValidation(pScene->mTextures,pScene->mNumTextures, - "mTextures","mNumTextures"); - } - else if (pScene->mTextures) { - ReportError("aiScene::mTextures is non-null although there are no textures"); - } - - // validate all materials - if (pScene->mNumMaterials) { - DoValidation(pScene->mMaterials,pScene->mNumMaterials,"mMaterials","mNumMaterials"); - } -#if 0 - // NOTE: ScenePreprocessor generates a default material if none is there - else if (!(mScene->mFlags & AI_SCENE_FLAGS_INCOMPLETE)) { - ReportError("aiScene::mNumMaterials is 0. At least one material must be there"); - } -#endif - else if (pScene->mMaterials) { - ReportError("aiScene::mMaterials is non-null although there are no materials"); - } - -// if (!has)ReportError("The aiScene data structure is empty"); - ASSIMP_LOG_DEBUG("ValidateDataStructureProcess end"); -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiLight* pLight) -{ - if (pLight->mType == aiLightSource_UNDEFINED) - ReportWarning("aiLight::mType is aiLightSource_UNDEFINED"); - - if (!pLight->mAttenuationConstant && - !pLight->mAttenuationLinear && - !pLight->mAttenuationQuadratic) { - ReportWarning("aiLight::mAttenuationXXX - all are zero"); - } - - if (pLight->mAngleInnerCone > pLight->mAngleOuterCone) - ReportError("aiLight::mAngleInnerCone is larger than aiLight::mAngleOuterCone"); - - if (pLight->mColorDiffuse.IsBlack() && pLight->mColorAmbient.IsBlack() - && pLight->mColorSpecular.IsBlack()) - { - ReportWarning("aiLight::mColorXXX - all are black and won't have any influence"); - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiCamera* pCamera) -{ - if (pCamera->mClipPlaneFar <= pCamera->mClipPlaneNear) - ReportError("aiCamera::mClipPlaneFar must be >= aiCamera::mClipPlaneNear"); - - // FIX: there are many 3ds files with invalid FOVs. No reason to - // reject them at all ... a warning is appropriate. - if (!pCamera->mHorizontalFOV || pCamera->mHorizontalFOV >= (float)AI_MATH_PI) - ReportWarning("%f is not a valid value for aiCamera::mHorizontalFOV",pCamera->mHorizontalFOV); -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiMesh* pMesh) -{ - // validate the material index of the mesh - if (mScene->mNumMaterials && pMesh->mMaterialIndex >= mScene->mNumMaterials) - { - ReportError("aiMesh::mMaterialIndex is invalid (value: %i maximum: %i)", - pMesh->mMaterialIndex,mScene->mNumMaterials-1); - } - - Validate(&pMesh->mName); - - for (unsigned int i = 0; i < pMesh->mNumFaces; ++i) - { - aiFace& face = pMesh->mFaces[i]; - - if (pMesh->mPrimitiveTypes) - { - switch (face.mNumIndices) - { - case 0: - ReportError("aiMesh::mFaces[%i].mNumIndices is 0",i); - break; - case 1: - if (0 == (pMesh->mPrimitiveTypes & aiPrimitiveType_POINT)) - { - ReportError("aiMesh::mFaces[%i] is a POINT but aiMesh::mPrimitiveTypes " - "does not report the POINT flag",i); - } - break; - case 2: - if (0 == (pMesh->mPrimitiveTypes & aiPrimitiveType_LINE)) - { - ReportError("aiMesh::mFaces[%i] is a LINE but aiMesh::mPrimitiveTypes " - "does not report the LINE flag",i); - } - break; - case 3: - if (0 == (pMesh->mPrimitiveTypes & aiPrimitiveType_TRIANGLE)) - { - ReportError("aiMesh::mFaces[%i] is a TRIANGLE but aiMesh::mPrimitiveTypes " - "does not report the TRIANGLE flag",i); - } - break; - default: - if (0 == (pMesh->mPrimitiveTypes & aiPrimitiveType_POLYGON)) - { - this->ReportError("aiMesh::mFaces[%i] is a POLYGON but aiMesh::mPrimitiveTypes " - "does not report the POLYGON flag",i); - } - break; - }; - } - - if (!face.mIndices) - ReportError("aiMesh::mFaces[%i].mIndices is NULL",i); - } - - // positions must always be there ... - if (!pMesh->mNumVertices || (!pMesh->mVertices && !mScene->mFlags)) { - ReportError("The mesh %s contains no vertices", pMesh->mName.C_Str()); - } - - if (pMesh->mNumVertices > AI_MAX_VERTICES) { - ReportError("Mesh has too many vertices: %u, but the limit is %u",pMesh->mNumVertices,AI_MAX_VERTICES); - } - if (pMesh->mNumFaces > AI_MAX_FACES) { - ReportError("Mesh has too many faces: %u, but the limit is %u",pMesh->mNumFaces,AI_MAX_FACES); - } - - // if tangents are there there must also be bitangent vectors ... - if ((pMesh->mTangents != NULL) != (pMesh->mBitangents != NULL)) { - ReportError("If there are tangents, bitangent vectors must be present as well"); - } - - // faces, too - if (!pMesh->mNumFaces || (!pMesh->mFaces && !mScene->mFlags)) { - ReportError("Mesh %s contains no faces", pMesh->mName.C_Str()); - } - - // now check whether the face indexing layout is correct: - // unique vertices, pseudo-indexed. - std::vector<bool> abRefList; - abRefList.resize(pMesh->mNumVertices,false); - for (unsigned int i = 0; i < pMesh->mNumFaces;++i) - { - aiFace& face = pMesh->mFaces[i]; - if (face.mNumIndices > AI_MAX_FACE_INDICES) { - ReportError("Face %u has too many faces: %u, but the limit is %u",i,face.mNumIndices,AI_MAX_FACE_INDICES); - } - - for (unsigned int a = 0; a < face.mNumIndices;++a) - { - if (face.mIndices[a] >= pMesh->mNumVertices) { - ReportError("aiMesh::mFaces[%i]::mIndices[%i] is out of range",i,a); - } - // the MSB flag is temporarily used by the extra verbose - // mode to tell us that the JoinVerticesProcess might have - // been executed already. - /*if ( !(this->mScene->mFlags & AI_SCENE_FLAGS_NON_VERBOSE_FORMAT ) && !(this->mScene->mFlags & AI_SCENE_FLAGS_ALLOW_SHARED) && - abRefList[face.mIndices[a]]) - { - ReportError("aiMesh::mVertices[%i] is referenced twice - second " - "time by aiMesh::mFaces[%i]::mIndices[%i]",face.mIndices[a],i,a); - }*/ - abRefList[face.mIndices[a]] = true; - } - } - - // check whether there are vertices that aren't referenced by a face - bool b = false; - for (unsigned int i = 0; i < pMesh->mNumVertices;++i) { - if (!abRefList[i])b = true; - } - abRefList.clear(); - if (b) { - ReportWarning("There are unreferenced vertices"); - } - - // texture channel 2 may not be set if channel 1 is zero ... - { - unsigned int i = 0; - for (;i < AI_MAX_NUMBER_OF_TEXTURECOORDS;++i) - { - if (!pMesh->HasTextureCoords(i))break; - } - for (;i < AI_MAX_NUMBER_OF_TEXTURECOORDS;++i) - if (pMesh->HasTextureCoords(i)) - { - ReportError("Texture coordinate channel %i exists " - "although the previous channel was NULL.",i); - } - } - // the same for the vertex colors - { - unsigned int i = 0; - for (;i < AI_MAX_NUMBER_OF_COLOR_SETS;++i) - { - if (!pMesh->HasVertexColors(i))break; - } - for (;i < AI_MAX_NUMBER_OF_COLOR_SETS;++i) - if (pMesh->HasVertexColors(i)) - { - ReportError("Vertex color channel %i is exists " - "although the previous channel was NULL.",i); - } - } - - - // now validate all bones - if (pMesh->mNumBones) - { - if (!pMesh->mBones) - { - ReportError("aiMesh::mBones is NULL (aiMesh::mNumBones is %i)", - pMesh->mNumBones); - } - std::unique_ptr<float[]> afSum(nullptr); - if (pMesh->mNumVertices) - { - afSum.reset(new float[pMesh->mNumVertices]); - for (unsigned int i = 0; i < pMesh->mNumVertices;++i) - afSum[i] = 0.0f; - } - - // check whether there are duplicate bone names - for (unsigned int i = 0; i < pMesh->mNumBones;++i) - { - const aiBone* bone = pMesh->mBones[i]; - if (bone->mNumWeights > AI_MAX_BONE_WEIGHTS) { - ReportError("Bone %u has too many weights: %u, but the limit is %u",i,bone->mNumWeights,AI_MAX_BONE_WEIGHTS); - } - - if (!pMesh->mBones[i]) - { - ReportError("aiMesh::mBones[%i] is NULL (aiMesh::mNumBones is %i)", - i,pMesh->mNumBones); - } - Validate(pMesh,pMesh->mBones[i],afSum.get()); - - for (unsigned int a = i+1; a < pMesh->mNumBones;++a) - { - if (pMesh->mBones[i]->mName == pMesh->mBones[a]->mName) - { - const char *name = "unknown"; - if (nullptr != pMesh->mBones[ i ]->mName.C_Str()) { - name = pMesh->mBones[ i ]->mName.C_Str(); - } - ReportError("aiMesh::mBones[%i], name = \"%s\" has the same name as " - "aiMesh::mBones[%i]", i, name, a ); - } - } - } - // check whether all bone weights for a vertex sum to 1.0 ... - for (unsigned int i = 0; i < pMesh->mNumVertices;++i) - { - if (afSum[i] && (afSum[i] <= 0.94 || afSum[i] >= 1.05)) { - ReportWarning("aiMesh::mVertices[%i]: bone weight sum != 1.0 (sum is %f)",i,afSum[i]); - } - } - } - else if (pMesh->mBones) - { - ReportError("aiMesh::mBones is non-null although there are no bones"); - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiMesh* pMesh, const aiBone* pBone,float* afSum) { - this->Validate(&pBone->mName); - - if (!pBone->mNumWeights) { - //ReportError("aiBone::mNumWeights is zero"); - } - - // check whether all vertices affected by this bone are valid - for (unsigned int i = 0; i < pBone->mNumWeights;++i) - { - if (pBone->mWeights[i].mVertexId >= pMesh->mNumVertices) { - ReportError("aiBone::mWeights[%i].mVertexId is out of range",i); - } - else if (!pBone->mWeights[i].mWeight || pBone->mWeights[i].mWeight > 1.0f) { - ReportWarning("aiBone::mWeights[%i].mWeight has an invalid value",i); - } - afSum[pBone->mWeights[i].mVertexId] += pBone->mWeights[i].mWeight; - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiAnimation* pAnimation) -{ - Validate(&pAnimation->mName); - - // validate all animations - if (pAnimation->mNumChannels || pAnimation->mNumMorphMeshChannels) - { - if (!pAnimation->mChannels && pAnimation->mNumChannels) { - ReportError("aiAnimation::mChannels is NULL (aiAnimation::mNumChannels is %i)", - pAnimation->mNumChannels); - } - if (!pAnimation->mMorphMeshChannels && pAnimation->mNumMorphMeshChannels) { - ReportError("aiAnimation::mMorphMeshChannels is NULL (aiAnimation::mNumMorphMeshChannels is %i)", - pAnimation->mNumMorphMeshChannels); - } - for (unsigned int i = 0; i < pAnimation->mNumChannels;++i) - { - if (!pAnimation->mChannels[i]) - { - ReportError("aiAnimation::mChannels[%i] is NULL (aiAnimation::mNumChannels is %i)", - i, pAnimation->mNumChannels); - } - Validate(pAnimation, pAnimation->mChannels[i]); - } - for (unsigned int i = 0; i < pAnimation->mNumMorphMeshChannels;++i) - { - if (!pAnimation->mMorphMeshChannels[i]) - { - ReportError("aiAnimation::mMorphMeshChannels[%i] is NULL (aiAnimation::mNumMorphMeshChannels is %i)", - i, pAnimation->mNumMorphMeshChannels); - } - Validate(pAnimation, pAnimation->mMorphMeshChannels[i]); - } - } - else { - ReportError("aiAnimation::mNumChannels is 0. At least one node animation channel must be there."); - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::SearchForInvalidTextures(const aiMaterial* pMaterial, - aiTextureType type) -{ - const char* szType = TextureTypeToString(type); - - // **************************************************************************** - // Search all keys of the material ... - // textures must be specified with ascending indices - // (e.g. diffuse #2 may not be specified if diffuse #1 is not there ...) - // **************************************************************************** - - int iNumIndices = 0; - int iIndex = -1; - for (unsigned int i = 0; i < pMaterial->mNumProperties;++i) { - aiMaterialProperty* prop = pMaterial->mProperties[ i ]; - ai_assert(nullptr != prop); - if ( !::strcmp(prop->mKey.data,"$tex.file") && prop->mSemantic == static_cast<unsigned int>(type)) { - iIndex = std::max(iIndex, (int) prop->mIndex); - ++iNumIndices; - - if (aiPTI_String != prop->mType) { - ReportError("Material property %s is expected to be a string", prop->mKey.data); - } - } - } - if (iIndex +1 != iNumIndices) { - ReportError("%s #%i is set, but there are only %i %s textures", - szType,iIndex,iNumIndices,szType); - } - if (!iNumIndices)return; - std::vector<aiTextureMapping> mappings(iNumIndices); - - // Now check whether all UV indices are valid ... - bool bNoSpecified = true; - for (unsigned int i = 0; i < pMaterial->mNumProperties;++i) - { - aiMaterialProperty* prop = pMaterial->mProperties[i]; - if (prop->mSemantic != type)continue; - - if ((int)prop->mIndex >= iNumIndices) - { - ReportError("Found texture property with index %i, although there " - "are only %i textures of type %s", - prop->mIndex, iNumIndices, szType); - } - - if (!::strcmp(prop->mKey.data,"$tex.mapping")) { - if (aiPTI_Integer != prop->mType || prop->mDataLength < sizeof(aiTextureMapping)) - { - ReportError("Material property %s%i is expected to be an integer (size is %i)", - prop->mKey.data,prop->mIndex,prop->mDataLength); - } - mappings[prop->mIndex] = *((aiTextureMapping*)prop->mData); - } - else if (!::strcmp(prop->mKey.data,"$tex.uvtrafo")) { - if (aiPTI_Float != prop->mType || prop->mDataLength < sizeof(aiUVTransform)) - { - ReportError("Material property %s%i is expected to be 5 floats large (size is %i)", - prop->mKey.data,prop->mIndex, prop->mDataLength); - } - mappings[prop->mIndex] = *((aiTextureMapping*)prop->mData); - } - else if (!::strcmp(prop->mKey.data,"$tex.uvwsrc")) { - if (aiPTI_Integer != prop->mType || sizeof(int) > prop->mDataLength) - { - ReportError("Material property %s%i is expected to be an integer (size is %i)", - prop->mKey.data,prop->mIndex,prop->mDataLength); - } - bNoSpecified = false; - - // Ignore UV indices for texture channels that are not there ... - - // Get the value - iIndex = *((unsigned int*)prop->mData); - - // Check whether there is a mesh using this material - // which has not enough UV channels ... - for (unsigned int a = 0; a < mScene->mNumMeshes;++a) - { - aiMesh* mesh = this->mScene->mMeshes[a]; - if(mesh->mMaterialIndex == (unsigned int)i) - { - int iChannels = 0; - while (mesh->HasTextureCoords(iChannels))++iChannels; - if (iIndex >= iChannels) - { - ReportWarning("Invalid UV index: %i (key %s). Mesh %i has only %i UV channels", - iIndex,prop->mKey.data,a,iChannels); - } - } - } - } - } - if (bNoSpecified) - { - // Assume that all textures are using the first UV channel - for (unsigned int a = 0; a < mScene->mNumMeshes;++a) - { - aiMesh* mesh = mScene->mMeshes[a]; - if(mesh->mMaterialIndex == (unsigned int)iIndex && mappings[0] == aiTextureMapping_UV) - { - if (!mesh->mTextureCoords[0]) - { - // This is a special case ... it could be that the - // original mesh format intended the use of a special - // mapping here. - ReportWarning("UV-mapped texture, but there are no UV coords"); - } - } - } - } -} -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiMaterial* pMaterial) -{ - // check whether there are material keys that are obviously not legal - for (unsigned int i = 0; i < pMaterial->mNumProperties;++i) - { - const aiMaterialProperty* prop = pMaterial->mProperties[i]; - if (!prop) { - ReportError("aiMaterial::mProperties[%i] is NULL (aiMaterial::mNumProperties is %i)", - i,pMaterial->mNumProperties); - } - if (!prop->mDataLength || !prop->mData) { - ReportError("aiMaterial::mProperties[%i].mDataLength or " - "aiMaterial::mProperties[%i].mData is 0",i,i); - } - // check all predefined types - if (aiPTI_String == prop->mType) { - // FIX: strings are now stored in a less expensive way, but we can't use the - // validation routine for 'normal' aiStrings - if (prop->mDataLength < 5 || prop->mDataLength < 4 + (*reinterpret_cast<uint32_t*>(prop->mData)) + 1) { - ReportError("aiMaterial::mProperties[%i].mDataLength is " - "too small to contain a string (%i, needed: %i)", - i,prop->mDataLength,static_cast<int>(sizeof(aiString))); - } - if(prop->mData[prop->mDataLength-1]) { - ReportError("Missing null-terminator in string material property"); - } - // Validate((const aiString*)prop->mData); - } - else if (aiPTI_Float == prop->mType) { - if (prop->mDataLength < sizeof(float)) { - ReportError("aiMaterial::mProperties[%i].mDataLength is " - "too small to contain a float (%i, needed: %i)", - i,prop->mDataLength, static_cast<int>(sizeof(float))); - } - } - else if (aiPTI_Integer == prop->mType) { - if (prop->mDataLength < sizeof(int)) { - ReportError("aiMaterial::mProperties[%i].mDataLength is " - "too small to contain an integer (%i, needed: %i)", - i,prop->mDataLength, static_cast<int>(sizeof(int))); - } - } - // TODO: check whether there is a key with an unknown name ... - } - - // make some more specific tests - ai_real fTemp; - int iShading; - if (AI_SUCCESS == aiGetMaterialInteger( pMaterial,AI_MATKEY_SHADING_MODEL,&iShading)) { - switch ((aiShadingMode)iShading) - { - case aiShadingMode_Blinn: - case aiShadingMode_CookTorrance: - case aiShadingMode_Phong: - - if (AI_SUCCESS != aiGetMaterialFloat(pMaterial,AI_MATKEY_SHININESS,&fTemp)) { - ReportWarning("A specular shading model is specified but there is no " - "AI_MATKEY_SHININESS key"); - } - if (AI_SUCCESS == aiGetMaterialFloat(pMaterial,AI_MATKEY_SHININESS_STRENGTH,&fTemp) && !fTemp) { - ReportWarning("A specular shading model is specified but the value of the " - "AI_MATKEY_SHININESS_STRENGTH key is 0.0"); - } - break; - default: - break; - } - } - - if (AI_SUCCESS == aiGetMaterialFloat( pMaterial,AI_MATKEY_OPACITY,&fTemp) && (!fTemp || fTemp > 1.01)) { - ReportWarning("Invalid opacity value (must be 0 < opacity < 1.0)"); - } - - // Check whether there are invalid texture keys - // TODO: that's a relict of the past, where texture type and index were baked - // into the material string ... we could do that in one single pass. - SearchForInvalidTextures(pMaterial,aiTextureType_DIFFUSE); - SearchForInvalidTextures(pMaterial,aiTextureType_SPECULAR); - SearchForInvalidTextures(pMaterial,aiTextureType_AMBIENT); - SearchForInvalidTextures(pMaterial,aiTextureType_EMISSIVE); - SearchForInvalidTextures(pMaterial,aiTextureType_OPACITY); - SearchForInvalidTextures(pMaterial,aiTextureType_SHININESS); - SearchForInvalidTextures(pMaterial,aiTextureType_HEIGHT); - SearchForInvalidTextures(pMaterial,aiTextureType_NORMALS); - SearchForInvalidTextures(pMaterial,aiTextureType_DISPLACEMENT); - SearchForInvalidTextures(pMaterial,aiTextureType_LIGHTMAP); - SearchForInvalidTextures(pMaterial,aiTextureType_REFLECTION); -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiTexture* pTexture) -{ - // the data section may NEVER be NULL - if (!pTexture->pcData) { - ReportError("aiTexture::pcData is NULL"); - } - if (pTexture->mHeight) - { - if (!pTexture->mWidth){ - ReportError("aiTexture::mWidth is zero (aiTexture::mHeight is %i, uncompressed texture)", - pTexture->mHeight); - } - } - else - { - if (!pTexture->mWidth) { - ReportError("aiTexture::mWidth is zero (compressed texture)"); - } - if ('\0' != pTexture->achFormatHint[3]) { - ReportWarning("aiTexture::achFormatHint must be zero-terminated"); - } - else if ('.' == pTexture->achFormatHint[0]) { - ReportWarning("aiTexture::achFormatHint should contain a file extension " - "without a leading dot (format hint: %s).",pTexture->achFormatHint); - } - } - - const char* sz = pTexture->achFormatHint; - if ((sz[0] >= 'A' && sz[0] <= 'Z') || - (sz[1] >= 'A' && sz[1] <= 'Z') || - (sz[2] >= 'A' && sz[2] <= 'Z') || - (sz[3] >= 'A' && sz[3] <= 'Z')) { - ReportError("aiTexture::achFormatHint contains non-lowercase letters"); - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiAnimation* pAnimation, - const aiNodeAnim* pNodeAnim) -{ - Validate(&pNodeAnim->mNodeName); - - if (!pNodeAnim->mNumPositionKeys && !pNodeAnim->mScalingKeys && !pNodeAnim->mNumRotationKeys) { - ReportError("Empty node animation channel"); - } - // otherwise check whether one of the keys exceeds the total duration of the animation - if (pNodeAnim->mNumPositionKeys) - { - if (!pNodeAnim->mPositionKeys) - { - ReportError("aiNodeAnim::mPositionKeys is NULL (aiNodeAnim::mNumPositionKeys is %i)", - pNodeAnim->mNumPositionKeys); - } - double dLast = -10e10; - for (unsigned int i = 0; i < pNodeAnim->mNumPositionKeys;++i) - { - // ScenePreprocessor will compute the duration if still the default value - // (Aramis) Add small epsilon, comparison tended to fail if max_time == duration, - // seems to be due the compilers register usage/width. - if (pAnimation->mDuration > 0. && pNodeAnim->mPositionKeys[i].mTime > pAnimation->mDuration+0.001) - { - ReportError("aiNodeAnim::mPositionKeys[%i].mTime (%.5f) is larger " - "than aiAnimation::mDuration (which is %.5f)",i, - (float)pNodeAnim->mPositionKeys[i].mTime, - (float)pAnimation->mDuration); - } - if (i && pNodeAnim->mPositionKeys[i].mTime <= dLast) - { - ReportWarning("aiNodeAnim::mPositionKeys[%i].mTime (%.5f) is smaller " - "than aiAnimation::mPositionKeys[%i] (which is %.5f)",i, - (float)pNodeAnim->mPositionKeys[i].mTime, - i-1, (float)dLast); - } - dLast = pNodeAnim->mPositionKeys[i].mTime; - } - } - // rotation keys - if (pNodeAnim->mNumRotationKeys) - { - if (!pNodeAnim->mRotationKeys) - { - ReportError("aiNodeAnim::mRotationKeys is NULL (aiNodeAnim::mNumRotationKeys is %i)", - pNodeAnim->mNumRotationKeys); - } - double dLast = -10e10; - for (unsigned int i = 0; i < pNodeAnim->mNumRotationKeys;++i) - { - if (pAnimation->mDuration > 0. && pNodeAnim->mRotationKeys[i].mTime > pAnimation->mDuration+0.001) - { - ReportError("aiNodeAnim::mRotationKeys[%i].mTime (%.5f) is larger " - "than aiAnimation::mDuration (which is %.5f)",i, - (float)pNodeAnim->mRotationKeys[i].mTime, - (float)pAnimation->mDuration); - } - if (i && pNodeAnim->mRotationKeys[i].mTime <= dLast) - { - ReportWarning("aiNodeAnim::mRotationKeys[%i].mTime (%.5f) is smaller " - "than aiAnimation::mRotationKeys[%i] (which is %.5f)",i, - (float)pNodeAnim->mRotationKeys[i].mTime, - i-1, (float)dLast); - } - dLast = pNodeAnim->mRotationKeys[i].mTime; - } - } - // scaling keys - if (pNodeAnim->mNumScalingKeys) - { - if (!pNodeAnim->mScalingKeys) { - ReportError("aiNodeAnim::mScalingKeys is NULL (aiNodeAnim::mNumScalingKeys is %i)", - pNodeAnim->mNumScalingKeys); - } - double dLast = -10e10; - for (unsigned int i = 0; i < pNodeAnim->mNumScalingKeys;++i) - { - if (pAnimation->mDuration > 0. && pNodeAnim->mScalingKeys[i].mTime > pAnimation->mDuration+0.001) - { - ReportError("aiNodeAnim::mScalingKeys[%i].mTime (%.5f) is larger " - "than aiAnimation::mDuration (which is %.5f)",i, - (float)pNodeAnim->mScalingKeys[i].mTime, - (float)pAnimation->mDuration); - } - if (i && pNodeAnim->mScalingKeys[i].mTime <= dLast) - { - ReportWarning("aiNodeAnim::mScalingKeys[%i].mTime (%.5f) is smaller " - "than aiAnimation::mScalingKeys[%i] (which is %.5f)",i, - (float)pNodeAnim->mScalingKeys[i].mTime, - i-1, (float)dLast); - } - dLast = pNodeAnim->mScalingKeys[i].mTime; - } - } - - if (!pNodeAnim->mNumScalingKeys && !pNodeAnim->mNumRotationKeys && - !pNodeAnim->mNumPositionKeys) - { - ReportError("A node animation channel must have at least one subtrack"); - } -} - -void ValidateDSProcess::Validate( const aiAnimation* pAnimation, - const aiMeshMorphAnim* pMeshMorphAnim) -{ - Validate(&pMeshMorphAnim->mName); - - if (!pMeshMorphAnim->mNumKeys) { - ReportError("Empty mesh morph animation channel"); - } - - // otherwise check whether one of the keys exceeds the total duration of the animation - if (pMeshMorphAnim->mNumKeys) - { - if (!pMeshMorphAnim->mKeys) - { - ReportError("aiMeshMorphAnim::mKeys is NULL (aiMeshMorphAnim::mNumKeys is %i)", - pMeshMorphAnim->mNumKeys); - } - double dLast = -10e10; - for (unsigned int i = 0; i < pMeshMorphAnim->mNumKeys;++i) - { - // ScenePreprocessor will compute the duration if still the default value - // (Aramis) Add small epsilon, comparison tended to fail if max_time == duration, - // seems to be due the compilers register usage/width. - if (pAnimation->mDuration > 0. && pMeshMorphAnim->mKeys[i].mTime > pAnimation->mDuration+0.001) - { - ReportError("aiMeshMorphAnim::mKeys[%i].mTime (%.5f) is larger " - "than aiAnimation::mDuration (which is %.5f)",i, - (float)pMeshMorphAnim->mKeys[i].mTime, - (float)pAnimation->mDuration); - } - if (i && pMeshMorphAnim->mKeys[i].mTime <= dLast) - { - ReportWarning("aiMeshMorphAnim::mKeys[%i].mTime (%.5f) is smaller " - "than aiMeshMorphAnim::mKeys[%i] (which is %.5f)",i, - (float)pMeshMorphAnim->mKeys[i].mTime, - i-1, (float)dLast); - } - dLast = pMeshMorphAnim->mKeys[i].mTime; - } - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiNode* pNode) -{ - if (!pNode) { - ReportError("A node of the scenegraph is NULL"); - } - // Validate node name string first so that it's safe to use in below expressions - this->Validate(&pNode->mName); - const char* nodeName = (&pNode->mName)->C_Str(); - if (pNode != mScene->mRootNode && !pNode->mParent){ - ReportError("Non-root node %s lacks a valid parent (aiNode::mParent is NULL) ", nodeName); - } - - // validate all meshes - if (pNode->mNumMeshes) - { - if (!pNode->mMeshes) - { - ReportError("aiNode::mMeshes is NULL for node %s (aiNode::mNumMeshes is %i)", - nodeName, pNode->mNumMeshes); - } - std::vector<bool> abHadMesh; - abHadMesh.resize(mScene->mNumMeshes,false); - for (unsigned int i = 0; i < pNode->mNumMeshes;++i) - { - if (pNode->mMeshes[i] >= mScene->mNumMeshes) - { - ReportError("aiNode::mMeshes[%i] is out of range for node %s (maximum is %i)", - pNode->mMeshes[i], nodeName, mScene->mNumMeshes-1); - } - if (abHadMesh[pNode->mMeshes[i]]) - { - ReportError("aiNode::mMeshes[%i] is already referenced by this node %s (value: %i)", - i, nodeName, pNode->mMeshes[i]); - } - abHadMesh[pNode->mMeshes[i]] = true; - } - } - if (pNode->mNumChildren) - { - if (!pNode->mChildren) { - ReportError("aiNode::mChildren is NULL for node %s (aiNode::mNumChildren is %i)", - nodeName, pNode->mNumChildren); - } - for (unsigned int i = 0; i < pNode->mNumChildren;++i) { - Validate(pNode->mChildren[i]); - } - } -} - -// ------------------------------------------------------------------------------------------------ -void ValidateDSProcess::Validate( const aiString* pString) -{ - if (pString->length > MAXLEN) - { - ReportError("aiString::length is too large (%u, maximum is %lu)", - pString->length,MAXLEN); - } - const char* sz = pString->data; - while (true) - { - if ('\0' == *sz) - { - if (pString->length != (unsigned int)(sz-pString->data)) { - ReportError("aiString::data is invalid: the terminal zero is at a wrong offset"); - } - break; - } - else if (sz >= &pString->data[MAXLEN]) { - ReportError("aiString::data is invalid. There is no terminal character"); - } - ++sz; - } -} diff --git a/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.h b/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.h deleted file mode 100644 index 7b309c9251..0000000000 --- a/thirdparty/assimp/code/PostProcessing/ValidateDataStructure.h +++ /dev/null @@ -1,197 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a (dummy) post processing step to validate the loader's - * output data structure (for debugging) - */ -#ifndef AI_VALIDATEPROCESS_H_INC -#define AI_VALIDATEPROCESS_H_INC - -#include <assimp/types.h> -#include <assimp/material.h> - -#include "Common/BaseProcess.h" - -struct aiBone; -struct aiMesh; -struct aiAnimation; -struct aiNodeAnim; -struct aiMeshMorphAnim; -struct aiTexture; -struct aiMaterial; -struct aiNode; -struct aiString; -struct aiCamera; -struct aiLight; - -namespace Assimp { - -// -------------------------------------------------------------------------------------- -/** Validates the whole ASSIMP scene data structure for correctness. - * ImportErrorException is thrown of the scene is corrupt.*/ -// -------------------------------------------------------------------------------------- -class ValidateDSProcess : public BaseProcess -{ -public: - - ValidateDSProcess(); - ~ValidateDSProcess(); - -public: - // ------------------------------------------------------------------- - bool IsActive( unsigned int pFlags) const; - - // ------------------------------------------------------------------- - void Execute( aiScene* pScene); - -protected: - - // ------------------------------------------------------------------- - /** Report a validation error. This will throw an exception, - * control won't return. - * @param msg Format string for sprintf().*/ - AI_WONT_RETURN void ReportError(const char* msg,...) AI_WONT_RETURN_SUFFIX; - - - // ------------------------------------------------------------------- - /** Report a validation warning. This won't throw an exception, - * control will return to the caller. - * @param msg Format string for sprintf().*/ - void ReportWarning(const char* msg,...); - - - // ------------------------------------------------------------------- - /** Validates a mesh - * @param pMesh Input mesh*/ - void Validate( const aiMesh* pMesh); - - // ------------------------------------------------------------------- - /** Validates a bone - * @param pMesh Input mesh - * @param pBone Input bone*/ - void Validate( const aiMesh* pMesh,const aiBone* pBone,float* afSum); - - // ------------------------------------------------------------------- - /** Validates an animation - * @param pAnimation Input animation*/ - void Validate( const aiAnimation* pAnimation); - - // ------------------------------------------------------------------- - /** Validates a material - * @param pMaterial Input material*/ - void Validate( const aiMaterial* pMaterial); - - // ------------------------------------------------------------------- - /** Search the material data structure for invalid or corrupt - * texture keys. - * @param pMaterial Input material - * @param type Type of the texture*/ - void SearchForInvalidTextures(const aiMaterial* pMaterial, - aiTextureType type); - - // ------------------------------------------------------------------- - /** Validates a texture - * @param pTexture Input texture*/ - void Validate( const aiTexture* pTexture); - - // ------------------------------------------------------------------- - /** Validates a light source - * @param pLight Input light - */ - void Validate( const aiLight* pLight); - - // ------------------------------------------------------------------- - /** Validates a camera - * @param pCamera Input camera*/ - void Validate( const aiCamera* pCamera); - - // ------------------------------------------------------------------- - /** Validates a bone animation channel - * @param pAnimation Animation channel. - * @param pBoneAnim Input bone animation */ - void Validate( const aiAnimation* pAnimation, - const aiNodeAnim* pBoneAnim); - - /** Validates a mesh morph animation channel. - * @param pAnimation Input animation. - * @param pMeshMorphAnim Mesh morph animation channel. - * */ - void Validate( const aiAnimation* pAnimation, - const aiMeshMorphAnim* pMeshMorphAnim); - - // ------------------------------------------------------------------- - /** Validates a node and all of its subnodes - * @param Node Input node*/ - void Validate( const aiNode* pNode); - - // ------------------------------------------------------------------- - /** Validates a string - * @param pString Input string*/ - void Validate( const aiString* pString); - -private: - - // template to validate one of the aiScene::mXXX arrays - template <typename T> - inline void DoValidation(T** array, unsigned int size, - const char* firstName, const char* secondName); - - // extended version: checks whether T::mName occurs twice - template <typename T> - inline void DoValidationEx(T** array, unsigned int size, - const char* firstName, const char* secondName); - - // extension to the first template which does also search - // the nodegraph for an item with the same name - template <typename T> - inline void DoValidationWithNameCheck(T** array, unsigned int size, - const char* firstName, const char* secondName); - - aiScene* mScene; -}; - - - - -} // end of namespace Assimp - -#endif // AI_VALIDATEPROCESS_H_INC diff --git a/thirdparty/assimp/contrib/utf8cpp/source/utf8.h b/thirdparty/assimp/contrib/utf8cpp/source/utf8.h deleted file mode 100644 index 82b13f59f9..0000000000 --- a/thirdparty/assimp/contrib/utf8cpp/source/utf8.h +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2006 Nemanja Trifunovic - -/* -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -*/ - - -#ifndef UTF8_FOR_CPP_2675DCD0_9480_4c0c_B92A_CC14C027B731 -#define UTF8_FOR_CPP_2675DCD0_9480_4c0c_B92A_CC14C027B731 - -#include "utf8/checked.h" -#include "utf8/unchecked.h" - -#endif // header guard diff --git a/thirdparty/assimp/contrib/utf8cpp/source/utf8/checked.h b/thirdparty/assimp/contrib/utf8cpp/source/utf8/checked.h deleted file mode 100644 index 1331155138..0000000000 --- a/thirdparty/assimp/contrib/utf8cpp/source/utf8/checked.h +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2006 Nemanja Trifunovic - -/* -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -*/ - - -#ifndef UTF8_FOR_CPP_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 -#define UTF8_FOR_CPP_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 - -#include "core.h" -#include <stdexcept> - -namespace utf8 -{ - // Base for the exceptions that may be thrown from the library - class exception : public ::std::exception { - }; - - // Exceptions that may be thrown from the library functions. - class invalid_code_point : public exception { - uint32_t cp; - public: - invalid_code_point(uint32_t cp) : cp(cp) {} - virtual const char* what() const throw() { return "Invalid code point"; } - uint32_t code_point() const {return cp;} - }; - - class invalid_utf8 : public exception { - uint8_t u8; - public: - invalid_utf8 (uint8_t u) : u8(u) {} - virtual const char* what() const throw() { return "Invalid UTF-8"; } - uint8_t utf8_octet() const {return u8;} - }; - - class invalid_utf16 : public exception { - uint16_t u16; - public: - invalid_utf16 (uint16_t u) : u16(u) {} - virtual const char* what() const throw() { return "Invalid UTF-16"; } - uint16_t utf16_word() const {return u16;} - }; - - class not_enough_room : public exception { - public: - virtual const char* what() const throw() { return "Not enough space"; } - }; - - /// The library API - functions intended to be called by the users - - template <typename octet_iterator> - octet_iterator append(uint32_t cp, octet_iterator result) - { - if (!utf8::internal::is_code_point_valid(cp)) - throw invalid_code_point(cp); - - if (cp < 0x80) // one octet - *(result++) = static_cast<uint8_t>(cp); - else if (cp < 0x800) { // two octets - *(result++) = static_cast<uint8_t>((cp >> 6) | 0xc0); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - else if (cp < 0x10000) { // three octets - *(result++) = static_cast<uint8_t>((cp >> 12) | 0xe0); - *(result++) = static_cast<uint8_t>(((cp >> 6) & 0x3f) | 0x80); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - else { // four octets - *(result++) = static_cast<uint8_t>((cp >> 18) | 0xf0); - *(result++) = static_cast<uint8_t>(((cp >> 12) & 0x3f) | 0x80); - *(result++) = static_cast<uint8_t>(((cp >> 6) & 0x3f) | 0x80); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - return result; - } - - template <typename octet_iterator, typename output_iterator> - output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement) - { - while (start != end) { - octet_iterator sequence_start = start; - internal::utf_error err_code = utf8::internal::validate_next(start, end); - switch (err_code) { - case internal::UTF8_OK : - for (octet_iterator it = sequence_start; it != start; ++it) - *out++ = *it; - break; - case internal::NOT_ENOUGH_ROOM: - throw not_enough_room(); - case internal::INVALID_LEAD: - out = utf8::append (replacement, out); - ++start; - break; - case internal::INCOMPLETE_SEQUENCE: - case internal::OVERLONG_SEQUENCE: - case internal::INVALID_CODE_POINT: - out = utf8::append (replacement, out); - ++start; - // just one replacement mark for the sequence - while (start != end && utf8::internal::is_trail(*start)) - ++start; - break; - } - } - return out; - } - - template <typename octet_iterator, typename output_iterator> - inline output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out) - { - static const uint32_t replacement_marker = utf8::internal::mask16(0xfffd); - return utf8::replace_invalid(start, end, out, replacement_marker); - } - - template <typename octet_iterator> - uint32_t next(octet_iterator& it, octet_iterator end) - { - uint32_t cp = 0; - internal::utf_error err_code = utf8::internal::validate_next(it, end, cp); - switch (err_code) { - case internal::UTF8_OK : - break; - case internal::NOT_ENOUGH_ROOM : - throw not_enough_room(); - case internal::INVALID_LEAD : - case internal::INCOMPLETE_SEQUENCE : - case internal::OVERLONG_SEQUENCE : - throw invalid_utf8(*it); - case internal::INVALID_CODE_POINT : - throw invalid_code_point(cp); - } - return cp; - } - - template <typename octet_iterator> - uint32_t peek_next(octet_iterator it, octet_iterator end) - { - return utf8::next(it, end); - } - - template <typename octet_iterator> - uint32_t prior(octet_iterator& it, octet_iterator start) - { - // can't do much if it == start - if (it == start) - throw not_enough_room(); - - octet_iterator end = it; - // Go back until we hit either a lead octet or start - while (utf8::internal::is_trail(*(--it))) - if (it == start) - throw invalid_utf8(*it); // error - no lead byte in the sequence - return utf8::peek_next(it, end); - } - - /// Deprecated in versions that include "prior" - template <typename octet_iterator> - uint32_t previous(octet_iterator& it, octet_iterator pass_start) - { - octet_iterator end = it; - while (utf8::internal::is_trail(*(--it))) - if (it == pass_start) - throw invalid_utf8(*it); // error - no lead byte in the sequence - octet_iterator temp = it; - return utf8::next(temp, end); - } - - template <typename octet_iterator, typename distance_type> - void advance (octet_iterator& it, distance_type n, octet_iterator end) - { - for (distance_type i = 0; i < n; ++i) - utf8::next(it, end); - } - - template <typename octet_iterator> - typename std::iterator_traits<octet_iterator>::difference_type - distance (octet_iterator first, octet_iterator last) - { - typename std::iterator_traits<octet_iterator>::difference_type dist; - for (dist = 0; first < last; ++dist) - utf8::next(first, last); - return dist; - } - - template <typename u16bit_iterator, typename octet_iterator> - octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result) - { - while (start != end) { - uint32_t cp = utf8::internal::mask16(*start++); - // Take care of surrogate pairs first - if (utf8::internal::is_lead_surrogate(cp)) { - if (start != end) { - uint32_t trail_surrogate = utf8::internal::mask16(*start++); - if (utf8::internal::is_trail_surrogate(trail_surrogate)) - cp = (cp << 10) + trail_surrogate + internal::SURROGATE_OFFSET; - else - throw invalid_utf16(static_cast<uint16_t>(trail_surrogate)); - } - else - throw invalid_utf16(static_cast<uint16_t>(cp)); - - } - // Lone trail surrogate - else if (utf8::internal::is_trail_surrogate(cp)) - throw invalid_utf16(static_cast<uint16_t>(cp)); - - result = utf8::append(cp, result); - } - return result; - } - - template <typename u16bit_iterator, typename octet_iterator> - u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result) - { - while (start != end) { - uint32_t cp = utf8::next(start, end); - if (cp > 0xffff) { //make a surrogate pair - *result++ = static_cast<uint16_t>((cp >> 10) + internal::LEAD_OFFSET); - *result++ = static_cast<uint16_t>((cp & 0x3ff) + internal::TRAIL_SURROGATE_MIN); - } - else - *result++ = static_cast<uint16_t>(cp); - } - return result; - } - - template <typename octet_iterator, typename u32bit_iterator> - octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result) - { - while (start != end) - result = utf8::append(*(start++), result); - - return result; - } - - template <typename octet_iterator, typename u32bit_iterator> - u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result) - { - while (start != end) - (*result++) = utf8::next(start, end); - - return result; - } - - // The iterator class - template <typename octet_iterator> - class iterator : public std::iterator <std::bidirectional_iterator_tag, uint32_t> { - octet_iterator it; - octet_iterator range_start; - octet_iterator range_end; - public: - iterator () {} - explicit iterator (const octet_iterator& octet_it, - const octet_iterator& range_start, - const octet_iterator& range_end) : - it(octet_it), range_start(range_start), range_end(range_end) - { - if (it < range_start || it > range_end) - throw std::out_of_range("Invalid utf-8 iterator position"); - } - // the default "big three" are OK - octet_iterator base () const { return it; } - uint32_t operator * () const - { - octet_iterator temp = it; - return utf8::next(temp, range_end); - } - bool operator == (const iterator& rhs) const - { - if (range_start != rhs.range_start || range_end != rhs.range_end) - throw std::logic_error("Comparing utf-8 iterators defined with different ranges"); - return (it == rhs.it); - } - bool operator != (const iterator& rhs) const - { - return !(operator == (rhs)); - } - iterator& operator ++ () - { - utf8::next(it, range_end); - return *this; - } - iterator operator ++ (int) - { - iterator temp = *this; - utf8::next(it, range_end); - return temp; - } - iterator& operator -- () - { - utf8::prior(it, range_start); - return *this; - } - iterator operator -- (int) - { - iterator temp = *this; - utf8::prior(it, range_start); - return temp; - } - }; // class iterator - -} // namespace utf8 - -#endif //header guard - - diff --git a/thirdparty/assimp/contrib/utf8cpp/source/utf8/core.h b/thirdparty/assimp/contrib/utf8cpp/source/utf8/core.h deleted file mode 100644 index 693d388c07..0000000000 --- a/thirdparty/assimp/contrib/utf8cpp/source/utf8/core.h +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2006 Nemanja Trifunovic - -/* -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -*/ - - -#ifndef UTF8_FOR_CPP_CORE_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 -#define UTF8_FOR_CPP_CORE_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 - -#include <iterator> - -namespace utf8 -{ - // The typedefs for 8-bit, 16-bit and 32-bit unsigned integers - // You may need to change them to match your system. - // These typedefs have the same names as ones from cstdint, or boost/cstdint - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; - -// Helper code - not intended to be directly called by the library users. May be changed at any time -namespace internal -{ - // Unicode constants - // Leading (high) surrogates: 0xd800 - 0xdbff - // Trailing (low) surrogates: 0xdc00 - 0xdfff - const uint16_t LEAD_SURROGATE_MIN = 0xd800u; - const uint16_t LEAD_SURROGATE_MAX = 0xdbffu; - const uint16_t TRAIL_SURROGATE_MIN = 0xdc00u; - const uint16_t TRAIL_SURROGATE_MAX = 0xdfffu; - const uint16_t LEAD_OFFSET = LEAD_SURROGATE_MIN - (0x10000 >> 10); - const uint32_t SURROGATE_OFFSET = 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN; - - // Maximum valid value for a Unicode code point - const uint32_t CODE_POINT_MAX = 0x0010ffffu; - - template<typename octet_type> - inline uint8_t mask8(octet_type oc) - { - return static_cast<uint8_t>(0xff & oc); - } - template<typename u16_type> - inline uint16_t mask16(u16_type oc) - { - return static_cast<uint16_t>(0xffff & oc); - } - template<typename octet_type> - inline bool is_trail(octet_type oc) - { - return ((utf8::internal::mask8(oc) >> 6) == 0x2); - } - - template <typename u16> - inline bool is_lead_surrogate(u16 cp) - { - return (cp >= LEAD_SURROGATE_MIN && cp <= LEAD_SURROGATE_MAX); - } - - template <typename u16> - inline bool is_trail_surrogate(u16 cp) - { - return (cp >= TRAIL_SURROGATE_MIN && cp <= TRAIL_SURROGATE_MAX); - } - - template <typename u16> - inline bool is_surrogate(u16 cp) - { - return (cp >= LEAD_SURROGATE_MIN && cp <= TRAIL_SURROGATE_MAX); - } - - template <typename u32> - inline bool is_code_point_valid(u32 cp) - { - return (cp <= CODE_POINT_MAX && !utf8::internal::is_surrogate(cp)); - } - - template <typename octet_iterator> - inline typename std::iterator_traits<octet_iterator>::difference_type - sequence_length(octet_iterator lead_it) - { - uint8_t lead = utf8::internal::mask8(*lead_it); - if (lead < 0x80) - return 1; - else if ((lead >> 5) == 0x6) - return 2; - else if ((lead >> 4) == 0xe) - return 3; - else if ((lead >> 3) == 0x1e) - return 4; - else - return 0; - } - - template <typename octet_difference_type> - inline bool is_overlong_sequence(uint32_t cp, octet_difference_type length) - { - if (cp < 0x80) { - if (length != 1) - return true; - } - else if (cp < 0x800) { - if (length != 2) - return true; - } - else if (cp < 0x10000) { - if (length != 3) - return true; - } - - return false; - } - - enum utf_error {UTF8_OK, NOT_ENOUGH_ROOM, INVALID_LEAD, INCOMPLETE_SEQUENCE, OVERLONG_SEQUENCE, INVALID_CODE_POINT}; - - /// Helper for get_sequence_x - template <typename octet_iterator> - utf_error increase_safely(octet_iterator& it, octet_iterator end) - { - if (++it == end) - return NOT_ENOUGH_ROOM; - - if (!utf8::internal::is_trail(*it)) - return INCOMPLETE_SEQUENCE; - - return UTF8_OK; - } - - #define UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(IT, END) {utf_error ret = increase_safely(IT, END); if (ret != UTF8_OK) return ret;} - - /// get_sequence_x functions decode utf-8 sequences of the length x - template <typename octet_iterator> - utf_error get_sequence_1(octet_iterator& it, octet_iterator end, uint32_t& code_point) - { - if (it == end) - return NOT_ENOUGH_ROOM; - - code_point = utf8::internal::mask8(*it); - - return UTF8_OK; - } - - template <typename octet_iterator> - utf_error get_sequence_2(octet_iterator& it, octet_iterator end, uint32_t& code_point) - { - if (it == end) - return NOT_ENOUGH_ROOM; - - code_point = utf8::internal::mask8(*it); - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point = ((code_point << 6) & 0x7ff) + ((*it) & 0x3f); - - return UTF8_OK; - } - - template <typename octet_iterator> - utf_error get_sequence_3(octet_iterator& it, octet_iterator end, uint32_t& code_point) - { - if (it == end) - return NOT_ENOUGH_ROOM; - - code_point = utf8::internal::mask8(*it); - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point = ((code_point << 12) & 0xffff) + ((utf8::internal::mask8(*it) << 6) & 0xfff); - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point += (*it) & 0x3f; - - return UTF8_OK; - } - - template <typename octet_iterator> - utf_error get_sequence_4(octet_iterator& it, octet_iterator end, uint32_t& code_point) - { - if (it == end) - return NOT_ENOUGH_ROOM; - - code_point = utf8::internal::mask8(*it); - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point = ((code_point << 18) & 0x1fffff) + ((utf8::internal::mask8(*it) << 12) & 0x3ffff); - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point += (utf8::internal::mask8(*it) << 6) & 0xfff; - - UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) - - code_point += (*it) & 0x3f; - - return UTF8_OK; - } - - #undef UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR - - template <typename octet_iterator> - utf_error validate_next(octet_iterator& it, octet_iterator end, uint32_t& code_point) - { - // Save the original value of it so we can go back in case of failure - // Of course, it does not make much sense with i.e. stream iterators - octet_iterator original_it = it; - - uint32_t cp = 0; - // Determine the sequence length based on the lead octet - typedef typename std::iterator_traits<octet_iterator>::difference_type octet_difference_type; - const octet_difference_type length = utf8::internal::sequence_length(it); - - // Get trail octets and calculate the code point - utf_error err = UTF8_OK; - switch (length) { - case 0: - return INVALID_LEAD; - case 1: - err = utf8::internal::get_sequence_1(it, end, cp); - break; - case 2: - err = utf8::internal::get_sequence_2(it, end, cp); - break; - case 3: - err = utf8::internal::get_sequence_3(it, end, cp); - break; - case 4: - err = utf8::internal::get_sequence_4(it, end, cp); - break; - } - - if (err == UTF8_OK) { - // Decoding succeeded. Now, security checks... - if (utf8::internal::is_code_point_valid(cp)) { - if (!utf8::internal::is_overlong_sequence(cp, length)){ - // Passed! Return here. - code_point = cp; - ++it; - return UTF8_OK; - } - else - err = OVERLONG_SEQUENCE; - } - else - err = INVALID_CODE_POINT; - } - - // Failure branch - restore the original value of the iterator - it = original_it; - return err; - } - - template <typename octet_iterator> - inline utf_error validate_next(octet_iterator& it, octet_iterator end) { - uint32_t ignored; - return utf8::internal::validate_next(it, end, ignored); - } - -} // namespace internal - - /// The library API - functions intended to be called by the users - - // Byte order mark - const uint8_t bom[] = {0xef, 0xbb, 0xbf}; - - template <typename octet_iterator> - octet_iterator find_invalid(octet_iterator start, octet_iterator end) - { - octet_iterator result = start; - while (result != end) { - utf8::internal::utf_error err_code = utf8::internal::validate_next(result, end); - if (err_code != internal::UTF8_OK) - return result; - } - return result; - } - - template <typename octet_iterator> - inline bool is_valid(octet_iterator start, octet_iterator end) - { - return (utf8::find_invalid(start, end) == end); - } - - template <typename octet_iterator> - inline bool starts_with_bom (octet_iterator it, octet_iterator end) - { - return ( - ((it != end) && (utf8::internal::mask8(*it++)) == bom[0]) && - ((it != end) && (utf8::internal::mask8(*it++)) == bom[1]) && - ((it != end) && (utf8::internal::mask8(*it)) == bom[2]) - ); - } - - //Deprecated in release 2.3 - template <typename octet_iterator> - inline bool is_bom (octet_iterator it) - { - return ( - (utf8::internal::mask8(*it++)) == bom[0] && - (utf8::internal::mask8(*it++)) == bom[1] && - (utf8::internal::mask8(*it)) == bom[2] - ); - } -} // namespace utf8 - -#endif // header guard - - diff --git a/thirdparty/assimp/contrib/utf8cpp/source/utf8/unchecked.h b/thirdparty/assimp/contrib/utf8cpp/source/utf8/unchecked.h deleted file mode 100644 index cb2427166b..0000000000 --- a/thirdparty/assimp/contrib/utf8cpp/source/utf8/unchecked.h +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2006 Nemanja Trifunovic - -/* -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -*/ - - -#ifndef UTF8_FOR_CPP_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 -#define UTF8_FOR_CPP_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731 - -#include "core.h" - -namespace utf8 -{ - namespace unchecked - { - template <typename octet_iterator> - octet_iterator append(uint32_t cp, octet_iterator result) - { - if (cp < 0x80) // one octet - *(result++) = static_cast<uint8_t>(cp); - else if (cp < 0x800) { // two octets - *(result++) = static_cast<uint8_t>((cp >> 6) | 0xc0); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - else if (cp < 0x10000) { // three octets - *(result++) = static_cast<uint8_t>((cp >> 12) | 0xe0); - *(result++) = static_cast<uint8_t>(((cp >> 6) & 0x3f) | 0x80); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - else { // four octets - *(result++) = static_cast<uint8_t>((cp >> 18) | 0xf0); - *(result++) = static_cast<uint8_t>(((cp >> 12) & 0x3f)| 0x80); - *(result++) = static_cast<uint8_t>(((cp >> 6) & 0x3f) | 0x80); - *(result++) = static_cast<uint8_t>((cp & 0x3f) | 0x80); - } - return result; - } - - template <typename octet_iterator> - uint32_t next(octet_iterator& it) - { - uint32_t cp = utf8::internal::mask8(*it); - typename std::iterator_traits<octet_iterator>::difference_type length = utf8::internal::sequence_length(it); - switch (length) { - case 1: - break; - case 2: - it++; - cp = ((cp << 6) & 0x7ff) + ((*it) & 0x3f); - break; - case 3: - ++it; - cp = ((cp << 12) & 0xffff) + ((utf8::internal::mask8(*it) << 6) & 0xfff); - ++it; - cp += (*it) & 0x3f; - break; - case 4: - ++it; - cp = ((cp << 18) & 0x1fffff) + ((utf8::internal::mask8(*it) << 12) & 0x3ffff); - ++it; - cp += (utf8::internal::mask8(*it) << 6) & 0xfff; - ++it; - cp += (*it) & 0x3f; - break; - } - ++it; - return cp; - } - - template <typename octet_iterator> - uint32_t peek_next(octet_iterator it) - { - return utf8::unchecked::next(it); - } - - template <typename octet_iterator> - uint32_t prior(octet_iterator& it) - { - while (utf8::internal::is_trail(*(--it))) ; - octet_iterator temp = it; - return utf8::unchecked::next(temp); - } - - // Deprecated in versions that include prior, but only for the sake of consistency (see utf8::previous) - template <typename octet_iterator> - inline uint32_t previous(octet_iterator& it) - { - return utf8::unchecked::prior(it); - } - - template <typename octet_iterator, typename distance_type> - void advance (octet_iterator& it, distance_type n) - { - for (distance_type i = 0; i < n; ++i) - utf8::unchecked::next(it); - } - - template <typename octet_iterator> - typename std::iterator_traits<octet_iterator>::difference_type - distance (octet_iterator first, octet_iterator last) - { - typename std::iterator_traits<octet_iterator>::difference_type dist; - for (dist = 0; first < last; ++dist) - utf8::unchecked::next(first); - return dist; - } - - template <typename u16bit_iterator, typename octet_iterator> - octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result) - { - while (start != end) { - uint32_t cp = utf8::internal::mask16(*start++); - // Take care of surrogate pairs first - if (utf8::internal::is_lead_surrogate(cp)) { - uint32_t trail_surrogate = utf8::internal::mask16(*start++); - cp = (cp << 10) + trail_surrogate + internal::SURROGATE_OFFSET; - } - result = utf8::unchecked::append(cp, result); - } - return result; - } - - template <typename u16bit_iterator, typename octet_iterator> - u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result) - { - while (start < end) { - uint32_t cp = utf8::unchecked::next(start); - if (cp > 0xffff) { //make a surrogate pair - *result++ = static_cast<uint16_t>((cp >> 10) + internal::LEAD_OFFSET); - *result++ = static_cast<uint16_t>((cp & 0x3ff) + internal::TRAIL_SURROGATE_MIN); - } - else - *result++ = static_cast<uint16_t>(cp); - } - return result; - } - - template <typename octet_iterator, typename u32bit_iterator> - octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result) - { - while (start != end) - result = utf8::unchecked::append(*(start++), result); - - return result; - } - - template <typename octet_iterator, typename u32bit_iterator> - u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result) - { - while (start < end) - (*result++) = utf8::unchecked::next(start); - - return result; - } - - // The iterator class - template <typename octet_iterator> - class iterator : public std::iterator <std::bidirectional_iterator_tag, uint32_t> { - octet_iterator it; - public: - iterator () {} - explicit iterator (const octet_iterator& octet_it): it(octet_it) {} - // the default "big three" are OK - octet_iterator base () const { return it; } - uint32_t operator * () const - { - octet_iterator temp = it; - return utf8::unchecked::next(temp); - } - bool operator == (const iterator& rhs) const - { - return (it == rhs.it); - } - bool operator != (const iterator& rhs) const - { - return !(operator == (rhs)); - } - iterator& operator ++ () - { - ::std::advance(it, utf8::internal::sequence_length(it)); - return *this; - } - iterator operator ++ (int) - { - iterator temp = *this; - ::std::advance(it, utf8::internal::sequence_length(it)); - return temp; - } - iterator& operator -- () - { - utf8::unchecked::prior(it); - return *this; - } - iterator operator -- (int) - { - iterator temp = *this; - utf8::unchecked::prior(it); - return temp; - } - }; // class iterator - - } // namespace utf8::unchecked -} // namespace utf8 - - -#endif // header guard - diff --git a/thirdparty/assimp/include/assimp/.editorconfig b/thirdparty/assimp/include/assimp/.editorconfig deleted file mode 100644 index 9ea66423ad..0000000000 --- a/thirdparty/assimp/include/assimp/.editorconfig +++ /dev/null @@ -1,8 +0,0 @@ -# See <http://EditorConfig.org> for details - -[*.{h,hpp,inl}] -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true -indent_size = 4 -indent_style = space diff --git a/thirdparty/assimp/include/assimp/BaseImporter.h b/thirdparty/assimp/include/assimp/BaseImporter.h deleted file mode 100644 index ad8a3dafd8..0000000000 --- a/thirdparty/assimp/include/assimp/BaseImporter.h +++ /dev/null @@ -1,420 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Definition of the base class for all importer worker classes. */ -#pragma once -#ifndef INCLUDED_AI_BASEIMPORTER_H -#define INCLUDED_AI_BASEIMPORTER_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include "Exceptional.h" - -#include <vector> -#include <set> -#include <map> -#include <assimp/types.h> -#include <assimp/ProgressHandler.hpp> -#include <assimp/ai_assert.h> - -struct aiScene; -struct aiImporterDesc; - -namespace Assimp { - -class Importer; -class IOSystem; -class BaseProcess; -class SharedPostProcessInfo; -class IOStream; - -// utility to do char4 to uint32 in a portable manner -#define AI_MAKE_MAGIC(string) ((uint32_t)((string[0] << 24) + \ - (string[1] << 16) + (string[2] << 8) + string[3])) - - -// --------------------------------------------------------------------------- -/** FOR IMPORTER PLUGINS ONLY: The BaseImporter defines a common interface - * for all importer worker classes. - * - * The interface defines two functions: CanRead() is used to check if the - * importer can handle the format of the given file. If an implementation of - * this function returns true, the importer then calls ReadFile() which - * imports the given file. ReadFile is not overridable, it just calls - * InternReadFile() and catches any ImportErrorException that might occur. - */ -class ASSIMP_API BaseImporter { - friend class Importer; - -private: - /* Pushes state into importer for the importer scale */ - virtual void UpdateImporterScale( Importer* pImp ); - -public: - - /** Constructor to be privately used by #Importer */ - BaseImporter() AI_NO_EXCEPT; - - /** Destructor, private as well */ - virtual ~BaseImporter(); - - // ------------------------------------------------------------------- - /** Returns whether the class can handle the format of the given file. - * - * The implementation should be as quick as possible. A check for - * the file extension is enough. If no suitable loader is found with - * this strategy, CanRead() is called again, the 'checkSig' parameter - * set to true this time. Now the implementation is expected to - * perform a full check of the file structure, possibly searching the - * first bytes of the file for magic identifiers or keywords. - * - * @param pFile Path and file name of the file to be examined. - * @param pIOHandler The IO handler to use for accessing any file. - * @param checkSig Set to true if this method is called a second time. - * This time, the implementation may take more time to examine the - * contents of the file to be loaded for magic bytes, keywords, etc - * to be able to load files with unknown/not existent file extensions. - * @return true if the class can read this file, false if not. - */ - virtual bool CanRead( - const std::string& pFile, - IOSystem* pIOHandler, - bool checkSig - ) const = 0; - - // ------------------------------------------------------------------- - /** Imports the given file and returns the imported data. - * If the import succeeds, ownership of the data is transferred to - * the caller. If the import fails, NULL is returned. The function - * takes care that any partially constructed data is destroyed - * beforehand. - * - * @param pImp #Importer object hosting this loader. - * @param pFile Path of the file to be imported. - * @param pIOHandler IO-Handler used to open this and possible other files. - * @return The imported data or NULL if failed. If it failed a - * human-readable error description can be retrieved by calling - * GetErrorText() - * - * @note This function is not intended to be overridden. Implement - * InternReadFile() to do the import. If an exception is thrown somewhere - * in InternReadFile(), this function will catch it and transform it into - * a suitable response to the caller. - */ - aiScene* ReadFile( - Importer* pImp, - const std::string& pFile, - IOSystem* pIOHandler - ); - - // ------------------------------------------------------------------- - /** Returns the error description of the last error that occurred. - * @return A description of the last error that occurred. An empty - * string if there was no error. - */ - const std::string& GetErrorText() const { - return m_ErrorText; - } - - // ------------------------------------------------------------------- - /** Called prior to ReadFile(). - * The function is a request to the importer to update its configuration - * basing on the Importer's configuration property list. - * @param pImp Importer instance - */ - virtual void SetupProperties( - const Importer* pImp - ); - - // ------------------------------------------------------------------- - /** Called by #Importer::GetImporterInfo to get a description of - * some loader features. Importers must provide this information. */ - virtual const aiImporterDesc* GetInfo() const = 0; - - /** - * Will be called only by scale process when scaling is requested. - */ - virtual void SetFileScale(double scale) - { - fileScale = scale; - } - - virtual double GetFileScale() const - { - return fileScale; - } - - enum ImporterUnits { - M, - MM, - CM, - INCHES, - FEET - }; - - /** - * Assimp Importer - * unit conversions available - * NOTE: Valid options are initialised in the - * constructor in the implementation file to - * work around a VS2013 compiler bug if support - * for that compiler is dropped in the future - * initialisation can be moved back here - * */ - std::map<ImporterUnits, double> importerUnits; - - virtual void SetApplicationUnits( const ImporterUnits& unit ) - { - importerScale = importerUnits[unit]; - applicationUnits = unit; - } - - virtual const ImporterUnits& GetApplicationUnits() - { - return applicationUnits; - } - - // ------------------------------------------------------------------- - /** Called by #Importer::GetExtensionList for each loaded importer. - * Take the extension list contained in the structure returned by - * #GetInfo and insert all file extensions into the given set. - * @param extension set to collect file extensions in*/ - void GetExtensionList(std::set<std::string>& extensions); - -protected: - ImporterUnits applicationUnits = ImporterUnits::M; - double importerScale = 1.0; - double fileScale = 1.0; - - - - // ------------------------------------------------------------------- - /** Imports the given file into the given scene structure. The - * function is expected to throw an ImportErrorException if there is - * an error. If it terminates normally, the data in aiScene is - * expected to be correct. Override this function to implement the - * actual importing. - * <br> - * The output scene must meet the following requirements:<br> - * <ul> - * <li>At least a root node must be there, even if its only purpose - * is to reference one mesh.</li> - * <li>aiMesh::mPrimitiveTypes may be 0. The types of primitives - * in the mesh are determined automatically in this case.</li> - * <li>the vertex data is stored in a pseudo-indexed "verbose" format. - * In fact this means that every vertex that is referenced by - * a face is unique. Or the other way round: a vertex index may - * not occur twice in a single aiMesh.</li> - * <li>aiAnimation::mDuration may be -1. Assimp determines the length - * of the animation automatically in this case as the length of - * the longest animation channel.</li> - * <li>aiMesh::mBitangents may be NULL if tangents and normals are - * given. In this case bitangents are computed as the cross product - * between normal and tangent.</li> - * <li>There needn't be a material. If none is there a default material - * is generated. However, it is recommended practice for loaders - * to generate a default material for yourself that matches the - * default material setting for the file format better than Assimp's - * generic default material. Note that default materials *should* - * be named AI_DEFAULT_MATERIAL_NAME if they're just color-shaded - * or AI_DEFAULT_TEXTURED_MATERIAL_NAME if they define a (dummy) - * texture. </li> - * </ul> - * If the AI_SCENE_FLAGS_INCOMPLETE-Flag is <b>not</b> set:<ul> - * <li> at least one mesh must be there</li> - * <li> there may be no meshes with 0 vertices or faces</li> - * </ul> - * This won't be checked (except by the validation step): Assimp will - * crash if one of the conditions is not met! - * - * @param pFile Path of the file to be imported. - * @param pScene The scene object to hold the imported data. - * NULL is not a valid parameter. - * @param pIOHandler The IO handler to use for any file access. - * NULL is not a valid parameter. */ - virtual void InternReadFile( - const std::string& pFile, - aiScene* pScene, - IOSystem* pIOHandler - ) = 0; - -public: // static utilities - - // ------------------------------------------------------------------- - /** A utility for CanRead(). - * - * The function searches the header of a file for a specific token - * and returns true if this token is found. This works for text - * files only. There is a rudimentary handling of UNICODE files. - * The comparison is case independent. - * - * @param pIOSystem IO System to work with - * @param file File name of the file - * @param tokens List of tokens to search for - * @param numTokens Size of the token array - * @param searchBytes Number of bytes to be searched for the tokens. - */ - static bool SearchFileHeaderForToken( - IOSystem* pIOSystem, - const std::string& file, - const char** tokens, - unsigned int numTokens, - unsigned int searchBytes = 200, - bool tokensSol = false, - bool noAlphaBeforeTokens = false); - - // ------------------------------------------------------------------- - /** @brief Check whether a file has a specific file extension - * @param pFile Input file - * @param ext0 Extension to check for. Lowercase characters only, no dot! - * @param ext1 Optional second extension - * @param ext2 Optional third extension - * @note Case-insensitive - */ - static bool SimpleExtensionCheck ( - const std::string& pFile, - const char* ext0, - const char* ext1 = NULL, - const char* ext2 = NULL); - - // ------------------------------------------------------------------- - /** @brief Extract file extension from a string - * @param pFile Input file - * @return Extension without trailing dot, all lowercase - */ - static std::string GetExtension ( - const std::string& pFile); - - // ------------------------------------------------------------------- - /** @brief Check whether a file starts with one or more magic tokens - * @param pFile Input file - * @param pIOHandler IO system to be used - * @param magic n magic tokens - * @params num Size of magic - * @param offset Offset from file start where tokens are located - * @param Size of one token, in bytes. Maximally 16 bytes. - * @return true if one of the given tokens was found - * - * @note For convenience, the check is also performed for the - * byte-swapped variant of all tokens (big endian). Only for - * tokens of size 2,4. - */ - static bool CheckMagicToken( - IOSystem* pIOHandler, - const std::string& pFile, - const void* magic, - unsigned int num, - unsigned int offset = 0, - unsigned int size = 4); - - // ------------------------------------------------------------------- - /** An utility for all text file loaders. It converts a file to our - * UTF8 character set. Errors are reported, but ignored. - * - * @param data File buffer to be converted to UTF8 data. The buffer - * is resized as appropriate. */ - static void ConvertToUTF8( - std::vector<char>& data); - - // ------------------------------------------------------------------- - /** An utility for all text file loaders. It converts a file from our - * UTF8 character set back to ISO-8859-1. Errors are reported, but ignored. - * - * @param data File buffer to be converted from UTF8 to ISO-8859-1. The buffer - * is resized as appropriate. */ - static void ConvertUTF8toISO8859_1( - std::string& data); - - // ------------------------------------------------------------------- - /// @brief Enum to define, if empty files are ok or not. - enum TextFileMode { - ALLOW_EMPTY, - FORBID_EMPTY - }; - - // ------------------------------------------------------------------- - /** Utility for text file loaders which copies the contents of the - * file into a memory buffer and converts it to our UTF8 - * representation. - * @param stream Stream to read from. - * @param data Output buffer to be resized and filled with the - * converted text file data. The buffer is terminated with - * a binary 0. - * @param mode Whether it is OK to load empty text files. */ - static void TextFileToBuffer( - IOStream* stream, - std::vector<char>& data, - TextFileMode mode = FORBID_EMPTY); - - // ------------------------------------------------------------------- - /** Utility function to move a std::vector into a aiScene array - * @param vec The vector to be moved - * @param out The output pointer to the allocated array. - * @param numOut The output count of elements copied. */ - template<typename T> - AI_FORCE_INLINE - static void CopyVector( - std::vector<T>& vec, - T*& out, - unsigned int& outLength) - { - outLength = unsigned(vec.size()); - if (outLength) { - out = new T[outLength]; - std::swap_ranges(vec.begin(), vec.end(), out); - } - } - -protected: - /// Error description in case there was one. - std::string m_ErrorText; - /// Currently set progress handler. - ProgressHandler* m_progress; -}; - - - -} // end of namespace Assimp - -#endif // AI_BASEIMPORTER_H_INC diff --git a/thirdparty/assimp/include/assimp/Bitmap.h b/thirdparty/assimp/include/assimp/Bitmap.h deleted file mode 100644 index 4c3f5a437b..0000000000 --- a/thirdparty/assimp/include/assimp/Bitmap.h +++ /dev/null @@ -1,129 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Bitmap.h - * @brief Defines bitmap format helper for textures - * - * Used for file formats which embed their textures into the model file. - */ -#pragma once -#ifndef AI_BITMAP_H_INC -#define AI_BITMAP_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include "defs.h" -#include <stdint.h> -#include <cstddef> - -struct aiTexture; - -namespace Assimp { - -class IOStream; - -class ASSIMP_API Bitmap { -protected: - - struct Header { - uint16_t type; - uint32_t size; - uint16_t reserved1; - uint16_t reserved2; - uint32_t offset; - - // We define the struct size because sizeof(Header) might return a wrong result because of structure padding. - // Moreover, we must use this ugly and error prone syntax because Visual Studio neither support constexpr or sizeof(name_of_field). - static const std::size_t header_size = - sizeof(uint16_t) + // type - sizeof(uint32_t) + // size - sizeof(uint16_t) + // reserved1 - sizeof(uint16_t) + // reserved2 - sizeof(uint32_t); // offset - }; - - struct DIB { - uint32_t size; - int32_t width; - int32_t height; - uint16_t planes; - uint16_t bits_per_pixel; - uint32_t compression; - uint32_t image_size; - int32_t x_resolution; - int32_t y_resolution; - uint32_t nb_colors; - uint32_t nb_important_colors; - - // We define the struct size because sizeof(DIB) might return a wrong result because of structure padding. - // Moreover, we must use this ugly and error prone syntax because Visual Studio neither support constexpr or sizeof(name_of_field). - static const std::size_t dib_size = - sizeof(uint32_t) + // size - sizeof(int32_t) + // width - sizeof(int32_t) + // height - sizeof(uint16_t) + // planes - sizeof(uint16_t) + // bits_per_pixel - sizeof(uint32_t) + // compression - sizeof(uint32_t) + // image_size - sizeof(int32_t) + // x_resolution - sizeof(int32_t) + // y_resolution - sizeof(uint32_t) + // nb_colors - sizeof(uint32_t); // nb_important_colors - }; - - static const std::size_t mBytesPerPixel = 4; - -public: - static void Save(aiTexture* texture, IOStream* file); - -protected: - static void WriteHeader(Header& header, IOStream* file); - static void WriteDIB(DIB& dib, IOStream* file); - static void WriteData(aiTexture* texture, IOStream* file); -}; - -} - -#endif // AI_BITMAP_H_INC diff --git a/thirdparty/assimp/include/assimp/BlobIOSystem.h b/thirdparty/assimp/include/assimp/BlobIOSystem.h deleted file mode 100644 index d005e5c119..0000000000 --- a/thirdparty/assimp/include/assimp/BlobIOSystem.h +++ /dev/null @@ -1,338 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Provides cheat implementations for IOSystem and IOStream to - * redirect exporter output to a blob chain.*/ - -#ifndef AI_BLOBIOSYSTEM_H_INCLUDED -#define AI_BLOBIOSYSTEM_H_INCLUDED - -#include <assimp/IOStream.hpp> -#include <assimp/cexport.h> -#include <assimp/IOSystem.hpp> -#include <assimp/DefaultLogger.hpp> -#include <stdint.h> -#include <set> -#include <vector> - -namespace Assimp { - class BlobIOSystem; - -// -------------------------------------------------------------------------------------------- -/** Redirect IOStream to a blob */ -// -------------------------------------------------------------------------------------------- -class BlobIOStream : public IOStream -{ -public: - - BlobIOStream(BlobIOSystem* creator, const std::string& file, size_t initial = 4096) - : buffer() - , cur_size() - , file_size() - , cursor() - , initial(initial) - , file(file) - , creator(creator) - { - } - - - virtual ~BlobIOStream(); - -public: - - // ------------------------------------------------------------------- - aiExportDataBlob* GetBlob() - { - aiExportDataBlob* blob = new aiExportDataBlob(); - blob->size = file_size; - blob->data = buffer; - - buffer = NULL; - - return blob; - } - - -public: - - - // ------------------------------------------------------------------- - virtual size_t Read( void *, - size_t, - size_t ) - { - return 0; - } - - // ------------------------------------------------------------------- - virtual size_t Write(const void* pvBuffer, - size_t pSize, - size_t pCount) - { - pSize *= pCount; - if (cursor + pSize > cur_size) { - Grow(cursor + pSize); - } - - memcpy(buffer+cursor, pvBuffer, pSize); - cursor += pSize; - - file_size = std::max(file_size,cursor); - return pCount; - } - - // ------------------------------------------------------------------- - virtual aiReturn Seek(size_t pOffset, - aiOrigin pOrigin) - { - switch(pOrigin) - { - case aiOrigin_CUR: - cursor += pOffset; - break; - - case aiOrigin_END: - cursor = file_size - pOffset; - break; - - case aiOrigin_SET: - cursor = pOffset; - break; - - default: - return AI_FAILURE; - } - - if (cursor > file_size) { - Grow(cursor); - } - - file_size = std::max(cursor,file_size); - return AI_SUCCESS; - } - - // ------------------------------------------------------------------- - virtual size_t Tell() const - { - return cursor; - } - - // ------------------------------------------------------------------- - virtual size_t FileSize() const - { - return file_size; - } - - // ------------------------------------------------------------------- - virtual void Flush() - { - // ignore - } - - - -private: - - // ------------------------------------------------------------------- - void Grow(size_t need = 0) - { - // 1.5 and phi are very heap-friendly growth factors (the first - // allows for frequent re-use of heap blocks, the second - // forms a fibonacci sequence with similar characteristics - - // since this heavily depends on the heap implementation - // and other factors as well, i'll just go with 1.5 since - // it is quicker to compute). - size_t new_size = std::max(initial, std::max( need, cur_size+(cur_size>>1) )); - - const uint8_t* const old = buffer; - buffer = new uint8_t[new_size]; - - if (old) { - memcpy(buffer,old,cur_size); - delete[] old; - } - - cur_size = new_size; - } - -private: - - uint8_t* buffer; - size_t cur_size,file_size, cursor, initial; - - const std::string file; - BlobIOSystem* const creator; -}; - - -#define AI_BLOBIO_MAGIC "$blobfile" - -// -------------------------------------------------------------------------------------------- -/** Redirect IOSystem to a blob */ -// -------------------------------------------------------------------------------------------- -class BlobIOSystem : public IOSystem -{ - - friend class BlobIOStream; - typedef std::pair<std::string, aiExportDataBlob*> BlobEntry; - -public: - - BlobIOSystem() - { - } - - virtual ~BlobIOSystem() - { - for(BlobEntry& blobby : blobs) { - delete blobby.second; - } - } - -public: - - // ------------------------------------------------------------------- - const char* GetMagicFileName() const - { - return AI_BLOBIO_MAGIC; - } - - - // ------------------------------------------------------------------- - aiExportDataBlob* GetBlobChain() - { - // one must be the master - aiExportDataBlob* master = NULL, *cur; - for(const BlobEntry& blobby : blobs) { - if (blobby.first == AI_BLOBIO_MAGIC) { - master = blobby.second; - break; - } - } - if (!master) { - ASSIMP_LOG_ERROR("BlobIOSystem: no data written or master file was not closed properly."); - return NULL; - } - - master->name.Set(""); - - cur = master; - for(const BlobEntry& blobby : blobs) { - if (blobby.second == master) { - continue; - } - - cur->next = blobby.second; - cur = cur->next; - - // extract the file extension from the file written - const std::string::size_type s = blobby.first.find_first_of('.'); - cur->name.Set(s == std::string::npos ? blobby.first : blobby.first.substr(s+1)); - } - - // give up blob ownership - blobs.clear(); - return master; - } - -public: - - // ------------------------------------------------------------------- - virtual bool Exists( const char* pFile) const { - return created.find(std::string(pFile)) != created.end(); - } - - - // ------------------------------------------------------------------- - virtual char getOsSeparator() const { - return '/'; - } - - - // ------------------------------------------------------------------- - virtual IOStream* Open(const char* pFile, - const char* pMode) - { - if (pMode[0] != 'w') { - return NULL; - } - - created.insert(std::string(pFile)); - return new BlobIOStream(this,std::string(pFile)); - } - - // ------------------------------------------------------------------- - virtual void Close( IOStream* pFile) - { - delete pFile; - } - -private: - - // ------------------------------------------------------------------- - void OnDestruct(const std::string& filename, BlobIOStream* child) - { - // we don't know in which the files are closed, so we - // can't reliably say that the first must be the master - // file ... - blobs.push_back( BlobEntry(filename,child->GetBlob()) ); - } - -private: - std::set<std::string> created; - std::vector< BlobEntry > blobs; -}; - - -// -------------------------------------------------------------------------------------------- -BlobIOStream :: ~BlobIOStream() -{ - creator->OnDestruct(file,this); - delete[] buffer; -} - - -} // end Assimp - -#endif diff --git a/thirdparty/assimp/include/assimp/ByteSwapper.h b/thirdparty/assimp/include/assimp/ByteSwapper.h deleted file mode 100644 index 3f14c471a8..0000000000 --- a/thirdparty/assimp/include/assimp/ByteSwapper.h +++ /dev/null @@ -1,292 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Helper class tp perform various byte oder swappings - (e.g. little to big endian) */ -#pragma once -#ifndef AI_BYTESWAPPER_H_INC -#define AI_BYTESWAPPER_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/ai_assert.h> -#include <assimp/types.h> -#include <stdint.h> - -#if _MSC_VER >= 1400 -#include <stdlib.h> -#endif - -namespace Assimp { -// -------------------------------------------------------------------------------------- -/** Defines some useful byte order swap routines. - * - * This is required to read big-endian model formats on little-endian machines, - * and vice versa. Direct use of this class is DEPRECATED. Use #StreamReader instead. */ -// -------------------------------------------------------------------------------------- -class ByteSwap { - ByteSwap() AI_NO_EXCEPT {} - -public: - - // ---------------------------------------------------------------------- - /** Swap two bytes of data - * @param[inout] _szOut A void* to save the reintcasts for the caller. */ - static inline void Swap2(void* _szOut) - { - ai_assert(_szOut); - -#if _MSC_VER >= 1400 - uint16_t* const szOut = reinterpret_cast<uint16_t*>(_szOut); - *szOut = _byteswap_ushort(*szOut); -#else - uint8_t* const szOut = reinterpret_cast<uint8_t*>(_szOut); - std::swap(szOut[0],szOut[1]); -#endif - } - - // ---------------------------------------------------------------------- - /** Swap four bytes of data - * @param[inout] _szOut A void* to save the reintcasts for the caller. */ - static inline void Swap4(void* _szOut) - { - ai_assert(_szOut); - -#if _MSC_VER >= 1400 - uint32_t* const szOut = reinterpret_cast<uint32_t*>(_szOut); - *szOut = _byteswap_ulong(*szOut); -#else - uint8_t* const szOut = reinterpret_cast<uint8_t*>(_szOut); - std::swap(szOut[0],szOut[3]); - std::swap(szOut[1],szOut[2]); -#endif - } - - // ---------------------------------------------------------------------- - /** Swap eight bytes of data - * @param[inout] _szOut A void* to save the reintcasts for the caller. */ - static inline void Swap8(void* _szOut) - { - ai_assert(_szOut); - -#if _MSC_VER >= 1400 - uint64_t* const szOut = reinterpret_cast<uint64_t*>(_szOut); - *szOut = _byteswap_uint64(*szOut); -#else - uint8_t* const szOut = reinterpret_cast<uint8_t*>(_szOut); - std::swap(szOut[0],szOut[7]); - std::swap(szOut[1],szOut[6]); - std::swap(szOut[2],szOut[5]); - std::swap(szOut[3],szOut[4]); -#endif - } - - // ---------------------------------------------------------------------- - /** ByteSwap a float. Not a joke. - * @param[inout] fOut ehm. .. */ - static inline void Swap(float* fOut) { - Swap4(fOut); - } - - // ---------------------------------------------------------------------- - /** ByteSwap a double. Not a joke. - * @param[inout] fOut ehm. .. */ - static inline void Swap(double* fOut) { - Swap8(fOut); - } - - - // ---------------------------------------------------------------------- - /** ByteSwap an int16t. Not a joke. - * @param[inout] fOut ehm. .. */ - static inline void Swap(int16_t* fOut) { - Swap2(fOut); - } - - static inline void Swap(uint16_t* fOut) { - Swap2(fOut); - } - - // ---------------------------------------------------------------------- - /** ByteSwap an int32t. Not a joke. - * @param[inout] fOut ehm. .. */ - static inline void Swap(int32_t* fOut){ - Swap4(fOut); - } - - static inline void Swap(uint32_t* fOut){ - Swap4(fOut); - } - - // ---------------------------------------------------------------------- - /** ByteSwap an int64t. Not a joke. - * @param[inout] fOut ehm. .. */ - static inline void Swap(int64_t* fOut) { - Swap8(fOut); - } - - static inline void Swap(uint64_t* fOut) { - Swap8(fOut); - } - - // ---------------------------------------------------------------------- - //! Templatized ByteSwap - //! \returns param tOut as swapped - template<typename Type> - static inline Type Swapped(Type tOut) - { - return _swapper<Type,sizeof(Type)>()(tOut); - } - -private: - - template <typename T, size_t size> struct _swapper; -}; - -template <typename T> struct ByteSwap::_swapper<T,2> { - T operator() (T tOut) { - Swap2(&tOut); - return tOut; - } -}; - -template <typename T> struct ByteSwap::_swapper<T,4> { - T operator() (T tOut) { - Swap4(&tOut); - return tOut; - } -}; - -template <typename T> struct ByteSwap::_swapper<T,8> { - T operator() (T tOut) { - Swap8(&tOut); - return tOut; - } -}; - - -// -------------------------------------------------------------------------------------- -// ByteSwap macros for BigEndian/LittleEndian support -// -------------------------------------------------------------------------------------- -#if (defined AI_BUILD_BIG_ENDIAN) -# define AI_LE(t) (t) -# define AI_BE(t) ByteSwap::Swapped(t) -# define AI_LSWAP2(p) -# define AI_LSWAP4(p) -# define AI_LSWAP8(p) -# define AI_LSWAP2P(p) -# define AI_LSWAP4P(p) -# define AI_LSWAP8P(p) -# define LE_NCONST const -# define AI_SWAP2(p) ByteSwap::Swap2(&(p)) -# define AI_SWAP4(p) ByteSwap::Swap4(&(p)) -# define AI_SWAP8(p) ByteSwap::Swap8(&(p)) -# define AI_SWAP2P(p) ByteSwap::Swap2((p)) -# define AI_SWAP4P(p) ByteSwap::Swap4((p)) -# define AI_SWAP8P(p) ByteSwap::Swap8((p)) -# define BE_NCONST -#else -# define AI_BE(t) (t) -# define AI_LE(t) ByteSwap::Swapped(t) -# define AI_SWAP2(p) -# define AI_SWAP4(p) -# define AI_SWAP8(p) -# define AI_SWAP2P(p) -# define AI_SWAP4P(p) -# define AI_SWAP8P(p) -# define BE_NCONST const -# define AI_LSWAP2(p) ByteSwap::Swap2(&(p)) -# define AI_LSWAP4(p) ByteSwap::Swap4(&(p)) -# define AI_LSWAP8(p) ByteSwap::Swap8(&(p)) -# define AI_LSWAP2P(p) ByteSwap::Swap2((p)) -# define AI_LSWAP4P(p) ByteSwap::Swap4((p)) -# define AI_LSWAP8P(p) ByteSwap::Swap8((p)) -# define LE_NCONST -#endif - - -namespace Intern { - -// -------------------------------------------------------------------------------------------- -template <typename T, bool doit> -struct ByteSwapper { - void operator() (T* inout) { - ByteSwap::Swap(inout); - } -}; - -template <typename T> -struct ByteSwapper<T,false> { - void operator() (T*) { - } -}; - -// -------------------------------------------------------------------------------------------- -template <bool SwapEndianess, typename T, bool RuntimeSwitch> -struct Getter { - void operator() (T* inout, bool le) { -#ifdef AI_BUILD_BIG_ENDIAN - le = le; -#else - le = !le; -#endif - if (le) { - ByteSwapper<T,(sizeof(T)>1?true:false)> () (inout); - } - else ByteSwapper<T,false> () (inout); - } -}; - -template <bool SwapEndianess, typename T> -struct Getter<SwapEndianess,T,false> { - - void operator() (T* inout, bool /*le*/) { - // static branch - ByteSwapper<T,(SwapEndianess && sizeof(T)>1)> () (inout); - } -}; -} // end Intern -} // end Assimp - -#endif //!! AI_BYTESWAPPER_H_INC diff --git a/thirdparty/assimp/include/assimp/Compiler/poppack1.h b/thirdparty/assimp/include/assimp/Compiler/poppack1.h deleted file mode 100644 index e033bc1472..0000000000 --- a/thirdparty/assimp/include/assimp/Compiler/poppack1.h +++ /dev/null @@ -1,22 +0,0 @@ - -// =============================================================================== -// May be included multiple times - resets structure packing to the defaults -// for all supported compilers. Reverts the changes made by #include <pushpack1.h> -// -// Currently this works on the following compilers: -// MSVC 7,8,9 -// GCC -// BORLAND (complains about 'pack state changed but not reverted', but works) -// =============================================================================== - -#ifndef AI_PUSHPACK_IS_DEFINED -# error pushpack1.h must be included after poppack1.h -#endif - -// reset packing to the original value -#if defined(_MSC_VER) || defined(__BORLANDC__) || defined (__BCPLUSPLUS__) -# pragma pack( pop ) -#endif -#undef PACK_STRUCT - -#undef AI_PUSHPACK_IS_DEFINED diff --git a/thirdparty/assimp/include/assimp/Compiler/pstdint.h b/thirdparty/assimp/include/assimp/Compiler/pstdint.h deleted file mode 100644 index 4de4ce2a90..0000000000 --- a/thirdparty/assimp/include/assimp/Compiler/pstdint.h +++ /dev/null @@ -1,912 +0,0 @@ -/* A portable stdint.h - **************************************************************************** - * BSD License: - **************************************************************************** - * - * Copyright (c) 2005-2016 Paul Hsieh - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - **************************************************************************** - * - * Version 0.1.15.4 - * - * The ANSI C standard committee, for the C99 standard, specified the - * inclusion of a new standard include file called stdint.h. This is - * a very useful and long desired include file which contains several - * very precise definitions for integer scalar types that is - * critically important for making portable several classes of - * applications including cryptography, hashing, variable length - * integer libraries and so on. But for most developers its likely - * useful just for programming sanity. - * - * The problem is that some compiler vendors chose to ignore the C99 - * standard and some older compilers have no opportunity to be updated. - * Because of this situation, simply including stdint.h in your code - * makes it unportable. - * - * So that's what this file is all about. Its an attempt to build a - * single universal include file that works on as many platforms as - * possible to deliver what stdint.h is supposed to. Even compilers - * that already come with stdint.h can use this file instead without - * any loss of functionality. A few things that should be noted about - * this file: - * - * 1) It is not guaranteed to be portable and/or present an identical - * interface on all platforms. The extreme variability of the - * ANSI C standard makes this an impossibility right from the - * very get go. Its really only meant to be useful for the vast - * majority of platforms that possess the capability of - * implementing usefully and precisely defined, standard sized - * integer scalars. Systems which are not intrinsically 2s - * complement may produce invalid constants. - * - * 2) There is an unavoidable use of non-reserved symbols. - * - * 3) Other standard include files are invoked. - * - * 4) This file may come in conflict with future platforms that do - * include stdint.h. The hope is that one or the other can be - * used with no real difference. - * - * 5) In the current version, if your platform can't represent - * int32_t, int16_t and int8_t, it just dumps out with a compiler - * error. - * - * 6) 64 bit integers may or may not be defined. Test for their - * presence with the test: #ifdef INT64_MAX or #ifdef UINT64_MAX. - * Note that this is different from the C99 specification which - * requires the existence of 64 bit support in the compiler. If - * this is not defined for your platform, yet it is capable of - * dealing with 64 bits then it is because this file has not yet - * been extended to cover all of your system's capabilities. - * - * 7) (u)intptr_t may or may not be defined. Test for its presence - * with the test: #ifdef PTRDIFF_MAX. If this is not defined - * for your platform, then it is because this file has not yet - * been extended to cover all of your system's capabilities, not - * because its optional. - * - * 8) The following might not been defined even if your platform is - * capable of defining it: - * - * WCHAR_MIN - * WCHAR_MAX - * (u)int64_t - * PTRDIFF_MIN - * PTRDIFF_MAX - * (u)intptr_t - * - * 9) The following have not been defined: - * - * WINT_MIN - * WINT_MAX - * - * 10) The criteria for defining (u)int_least(*)_t isn't clear, - * except for systems which don't have a type that precisely - * defined 8, 16, or 32 bit types (which this include file does - * not support anyways). Default definitions have been given. - * - * 11) The criteria for defining (u)int_fast(*)_t isn't something I - * would trust to any particular compiler vendor or the ANSI C - * committee. It is well known that "compatible systems" are - * commonly created that have very different performance - * characteristics from the systems they are compatible with, - * especially those whose vendors make both the compiler and the - * system. Default definitions have been given, but its strongly - * recommended that users never use these definitions for any - * reason (they do *NOT* deliver any serious guarantee of - * improved performance -- not in this file, nor any vendor's - * stdint.h). - * - * 12) The following macros: - * - * PRINTF_INTMAX_MODIFIER - * PRINTF_INT64_MODIFIER - * PRINTF_INT32_MODIFIER - * PRINTF_INT16_MODIFIER - * PRINTF_LEAST64_MODIFIER - * PRINTF_LEAST32_MODIFIER - * PRINTF_LEAST16_MODIFIER - * PRINTF_INTPTR_MODIFIER - * - * are strings which have been defined as the modifiers required - * for the "d", "u" and "x" printf formats to correctly output - * (u)intmax_t, (u)int64_t, (u)int32_t, (u)int16_t, (u)least64_t, - * (u)least32_t, (u)least16_t and (u)intptr_t types respectively. - * PRINTF_INTPTR_MODIFIER is not defined for some systems which - * provide their own stdint.h. PRINTF_INT64_MODIFIER is not - * defined if INT64_MAX is not defined. These are an extension - * beyond what C99 specifies must be in stdint.h. - * - * In addition, the following macros are defined: - * - * PRINTF_INTMAX_HEX_WIDTH - * PRINTF_INT64_HEX_WIDTH - * PRINTF_INT32_HEX_WIDTH - * PRINTF_INT16_HEX_WIDTH - * PRINTF_INT8_HEX_WIDTH - * PRINTF_INTMAX_DEC_WIDTH - * PRINTF_INT64_DEC_WIDTH - * PRINTF_INT32_DEC_WIDTH - * PRINTF_INT16_DEC_WIDTH - * PRINTF_UINT8_DEC_WIDTH - * PRINTF_UINTMAX_DEC_WIDTH - * PRINTF_UINT64_DEC_WIDTH - * PRINTF_UINT32_DEC_WIDTH - * PRINTF_UINT16_DEC_WIDTH - * PRINTF_UINT8_DEC_WIDTH - * - * Which specifies the maximum number of characters required to - * print the number of that type in either hexadecimal or decimal. - * These are an extension beyond what C99 specifies must be in - * stdint.h. - * - * Compilers tested (all with 0 warnings at their highest respective - * settings): Borland Turbo C 2.0, WATCOM C/C++ 11.0 (16 bits and 32 - * bits), Microsoft Visual C++ 6.0 (32 bit), Microsoft Visual Studio - * .net (VC7), Intel C++ 4.0, GNU gcc v3.3.3 - * - * This file should be considered a work in progress. Suggestions for - * improvements, especially those which increase coverage are strongly - * encouraged. - * - * Acknowledgements - * - * The following people have made significant contributions to the - * development and testing of this file: - * - * Chris Howie - * John Steele Scott - * Dave Thorup - * John Dill - * Florian Wobbe - * Christopher Sean Morrison - * Mikkel Fahnoe Jorgensen - * - */ - -#include <stddef.h> -#include <limits.h> -#include <signal.h> - -/* - * For gcc with _STDINT_H, fill in the PRINTF_INT*_MODIFIER macros, and - * do nothing else. On the Mac OS X version of gcc this is _STDINT_H_. - */ - -#if ((defined(__SUNPRO_C) && __SUNPRO_C >= 0x570) || (defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined (__WATCOMC__) && (defined (_STDINT_H_INCLUDED) || __WATCOMC__ >= 1250)) || (defined(__GNUC__) && (__GNUC__ > 3 || defined(_STDINT_H) || defined(_STDINT_H_) || defined (__UINT_FAST64_TYPE__)) )) && !defined (_PSTDINT_H_INCLUDED) -#include <stdint.h> -#define _PSTDINT_H_INCLUDED -# if defined(__GNUC__) && (defined(__x86_64__) || defined(__ppc64__)) && !(defined(__APPLE__) && defined(__MACH__)) -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "l" -# endif -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "" -# endif -# else -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "ll" -# endif -# ifndef PRINTF_INT32_MODIFIER -# if (UINT_MAX == UINT32_MAX) -# define PRINTF_INT32_MODIFIER "" -# else -# define PRINTF_INT32_MODIFIER "l" -# endif -# endif -# endif -# ifndef PRINTF_INT16_MODIFIER -# define PRINTF_INT16_MODIFIER "h" -# endif -# ifndef PRINTF_INTMAX_MODIFIER -# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER -# endif -# ifndef PRINTF_INT64_HEX_WIDTH -# define PRINTF_INT64_HEX_WIDTH "16" -# endif -# ifndef PRINTF_UINT64_HEX_WIDTH -# define PRINTF_UINT64_HEX_WIDTH "16" -# endif -# ifndef PRINTF_INT32_HEX_WIDTH -# define PRINTF_INT32_HEX_WIDTH "8" -# endif -# ifndef PRINTF_UINT32_HEX_WIDTH -# define PRINTF_UINT32_HEX_WIDTH "8" -# endif -# ifndef PRINTF_INT16_HEX_WIDTH -# define PRINTF_INT16_HEX_WIDTH "4" -# endif -# ifndef PRINTF_UINT16_HEX_WIDTH -# define PRINTF_UINT16_HEX_WIDTH "4" -# endif -# ifndef PRINTF_INT8_HEX_WIDTH -# define PRINTF_INT8_HEX_WIDTH "2" -# endif -# ifndef PRINTF_UINT8_HEX_WIDTH -# define PRINTF_UINT8_HEX_WIDTH "2" -# endif -# ifndef PRINTF_INT64_DEC_WIDTH -# define PRINTF_INT64_DEC_WIDTH "19" -# endif -# ifndef PRINTF_UINT64_DEC_WIDTH -# define PRINTF_UINT64_DEC_WIDTH "20" -# endif -# ifndef PRINTF_INT32_DEC_WIDTH -# define PRINTF_INT32_DEC_WIDTH "10" -# endif -# ifndef PRINTF_UINT32_DEC_WIDTH -# define PRINTF_UINT32_DEC_WIDTH "10" -# endif -# ifndef PRINTF_INT16_DEC_WIDTH -# define PRINTF_INT16_DEC_WIDTH "5" -# endif -# ifndef PRINTF_UINT16_DEC_WIDTH -# define PRINTF_UINT16_DEC_WIDTH "5" -# endif -# ifndef PRINTF_INT8_DEC_WIDTH -# define PRINTF_INT8_DEC_WIDTH "3" -# endif -# ifndef PRINTF_UINT8_DEC_WIDTH -# define PRINTF_UINT8_DEC_WIDTH "3" -# endif -# ifndef PRINTF_INTMAX_HEX_WIDTH -# define PRINTF_INTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH -# endif -# ifndef PRINTF_UINTMAX_HEX_WIDTH -# define PRINTF_UINTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH -# endif -# ifndef PRINTF_INTMAX_DEC_WIDTH -# define PRINTF_INTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH -# endif -# ifndef PRINTF_UINTMAX_DEC_WIDTH -# define PRINTF_UINTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH -# endif - -/* - * Something really weird is going on with Open Watcom. Just pull some of - * these duplicated definitions from Open Watcom's stdint.h file for now. - */ - -# if defined (__WATCOMC__) && __WATCOMC__ >= 1250 -# if !defined (INT64_C) -# define INT64_C(x) (x + (INT64_MAX - INT64_MAX)) -# endif -# if !defined (UINT64_C) -# define UINT64_C(x) (x + (UINT64_MAX - UINT64_MAX)) -# endif -# if !defined (INT32_C) -# define INT32_C(x) (x + (INT32_MAX - INT32_MAX)) -# endif -# if !defined (UINT32_C) -# define UINT32_C(x) (x + (UINT32_MAX - UINT32_MAX)) -# endif -# if !defined (INT16_C) -# define INT16_C(x) (x) -# endif -# if !defined (UINT16_C) -# define UINT16_C(x) (x) -# endif -# if !defined (INT8_C) -# define INT8_C(x) (x) -# endif -# if !defined (UINT8_C) -# define UINT8_C(x) (x) -# endif -# if !defined (UINT64_MAX) -# define UINT64_MAX 18446744073709551615ULL -# endif -# if !defined (INT64_MAX) -# define INT64_MAX 9223372036854775807LL -# endif -# if !defined (UINT32_MAX) -# define UINT32_MAX 4294967295UL -# endif -# if !defined (INT32_MAX) -# define INT32_MAX 2147483647L -# endif -# if !defined (INTMAX_MAX) -# define INTMAX_MAX INT64_MAX -# endif -# if !defined (INTMAX_MIN) -# define INTMAX_MIN INT64_MIN -# endif -# endif -#endif - -/* - * I have no idea what is the truly correct thing to do on older Solaris. - * From some online discussions, this seems to be what is being - * recommended. For people who actually are developing on older Solaris, - * what I would like to know is, does this define all of the relevant - * macros of a complete stdint.h? Remember, in pstdint.h 64 bit is - * considered optional. - */ - -#if (defined(__SUNPRO_C) && __SUNPRO_C >= 0x420) && !defined(_PSTDINT_H_INCLUDED) -#include <sys/inttypes.h> -#define _PSTDINT_H_INCLUDED -#endif - -#ifndef _PSTDINT_H_INCLUDED -#define _PSTDINT_H_INCLUDED - -#ifndef SIZE_MAX -# define SIZE_MAX (~(size_t)0) -#endif - -/* - * Deduce the type assignments from limits.h under the assumption that - * integer sizes in bits are powers of 2, and follow the ANSI - * definitions. - */ - -#ifndef UINT8_MAX -# define UINT8_MAX 0xff -#endif -#if !defined(uint8_t) && !defined(_UINT8_T) && !defined(vxWorks) -# if (UCHAR_MAX == UINT8_MAX) || defined (S_SPLINT_S) - typedef unsigned char uint8_t; -# define UINT8_C(v) ((uint8_t) v) -# else -# error "Platform not supported" -# endif -#endif - -#ifndef INT8_MAX -# define INT8_MAX 0x7f -#endif -#ifndef INT8_MIN -# define INT8_MIN INT8_C(0x80) -#endif -#if !defined(int8_t) && !defined(_INT8_T) && !defined(vxWorks) -# if (SCHAR_MAX == INT8_MAX) || defined (S_SPLINT_S) - typedef signed char int8_t; -# define INT8_C(v) ((int8_t) v) -# else -# error "Platform not supported" -# endif -#endif - -#ifndef UINT16_MAX -# define UINT16_MAX 0xffff -#endif -#if !defined(uint16_t) && !defined(_UINT16_T) && !defined(vxWorks) -#if (UINT_MAX == UINT16_MAX) || defined (S_SPLINT_S) - typedef unsigned int uint16_t; -# ifndef PRINTF_INT16_MODIFIER -# define PRINTF_INT16_MODIFIER "" -# endif -# define UINT16_C(v) ((uint16_t) (v)) -#elif (USHRT_MAX == UINT16_MAX) - typedef unsigned short uint16_t; -# define UINT16_C(v) ((uint16_t) (v)) -# ifndef PRINTF_INT16_MODIFIER -# define PRINTF_INT16_MODIFIER "h" -# endif -#else -#error "Platform not supported" -#endif -#endif - -#ifndef INT16_MAX -# define INT16_MAX 0x7fff -#endif -#ifndef INT16_MIN -# define INT16_MIN INT16_C(0x8000) -#endif -#if !defined(int16_t) && !defined(_INT16_T) && !defined(vxWorks) -#if (INT_MAX == INT16_MAX) || defined (S_SPLINT_S) - typedef signed int int16_t; -# define INT16_C(v) ((int16_t) (v)) -# ifndef PRINTF_INT16_MODIFIER -# define PRINTF_INT16_MODIFIER "" -# endif -#elif (SHRT_MAX == INT16_MAX) - typedef signed short int16_t; -# define INT16_C(v) ((int16_t) (v)) -# ifndef PRINTF_INT16_MODIFIER -# define PRINTF_INT16_MODIFIER "h" -# endif -#else -#error "Platform not supported" -#endif -#endif - -#ifndef UINT32_MAX -# define UINT32_MAX (0xffffffffUL) -#endif -#if !defined(uint32_t) && !defined(_UINT32_T) && !defined(vxWorks) -#if (ULONG_MAX == UINT32_MAX) || defined (S_SPLINT_S) - typedef unsigned long uint32_t; -# define UINT32_C(v) v ## UL -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "l" -# endif -#elif (UINT_MAX == UINT32_MAX) - typedef unsigned int uint32_t; -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "" -# endif -# define UINT32_C(v) v ## U -#elif (USHRT_MAX == UINT32_MAX) - typedef unsigned short uint32_t; -# define UINT32_C(v) ((unsigned short) (v)) -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "" -# endif -#else -#error "Platform not supported" -#endif -#endif - -#ifndef INT32_MAX -# define INT32_MAX (0x7fffffffL) -#endif -#ifndef INT32_MIN -# define INT32_MIN INT32_C(0x80000000) -#endif -#if !defined(int32_t) && !defined(_INT32_T) && !defined(vxWorks) -#if (LONG_MAX == INT32_MAX) || defined (S_SPLINT_S) - typedef signed long int32_t; -# define INT32_C(v) v ## L -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "l" -# endif -#elif (INT_MAX == INT32_MAX) - typedef signed int int32_t; -# define INT32_C(v) v -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "" -# endif -#elif (SHRT_MAX == INT32_MAX) - typedef signed short int32_t; -# define INT32_C(v) ((short) (v)) -# ifndef PRINTF_INT32_MODIFIER -# define PRINTF_INT32_MODIFIER "" -# endif -#else -#error "Platform not supported" -#endif -#endif - -/* - * The macro stdint_int64_defined is temporarily used to record - * whether or not 64 integer support is available. It must be - * defined for any 64 integer extensions for new platforms that are - * added. - */ - -#undef stdint_int64_defined -#if (defined(__STDC__) && defined(__STDC_VERSION__)) || defined (S_SPLINT_S) -# if (__STDC__ && __STDC_VERSION__ >= 199901L) || defined (S_SPLINT_S) -# define stdint_int64_defined - typedef long long int64_t; - typedef unsigned long long uint64_t; -# define UINT64_C(v) v ## ULL -# define INT64_C(v) v ## LL -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "ll" -# endif -# endif -#endif - -#if !defined (stdint_int64_defined) -# if defined(__GNUC__) && !defined(vxWorks) -# define stdint_int64_defined - __extension__ typedef long long int64_t; - __extension__ typedef unsigned long long uint64_t; -# define UINT64_C(v) v ## ULL -# define INT64_C(v) v ## LL -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "ll" -# endif -# elif defined(__MWERKS__) || defined (__SUNPRO_C) || defined (__SUNPRO_CC) || defined (__APPLE_CC__) || defined (_LONG_LONG) || defined (_CRAYC) || defined (S_SPLINT_S) -# define stdint_int64_defined - typedef long long int64_t; - typedef unsigned long long uint64_t; -# define UINT64_C(v) v ## ULL -# define INT64_C(v) v ## LL -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "ll" -# endif -# elif (defined(__WATCOMC__) && defined(__WATCOM_INT64__)) || (defined(_MSC_VER) && _INTEGRAL_MAX_BITS >= 64) || (defined (__BORLANDC__) && __BORLANDC__ > 0x460) || defined (__alpha) || defined (__DECC) -# define stdint_int64_defined - typedef __int64 int64_t; - typedef unsigned __int64 uint64_t; -# define UINT64_C(v) v ## UI64 -# define INT64_C(v) v ## I64 -# ifndef PRINTF_INT64_MODIFIER -# define PRINTF_INT64_MODIFIER "I64" -# endif -# endif -#endif - -#if !defined (LONG_LONG_MAX) && defined (INT64_C) -# define LONG_LONG_MAX INT64_C (9223372036854775807) -#endif -#ifndef ULONG_LONG_MAX -# define ULONG_LONG_MAX UINT64_C (18446744073709551615) -#endif - -#if !defined (INT64_MAX) && defined (INT64_C) -# define INT64_MAX INT64_C (9223372036854775807) -#endif -#if !defined (INT64_MIN) && defined (INT64_C) -# define INT64_MIN INT64_C (-9223372036854775808) -#endif -#if !defined (UINT64_MAX) && defined (INT64_C) -# define UINT64_MAX UINT64_C (18446744073709551615) -#endif - -/* - * Width of hexadecimal for number field. - */ - -#ifndef PRINTF_INT64_HEX_WIDTH -# define PRINTF_INT64_HEX_WIDTH "16" -#endif -#ifndef PRINTF_INT32_HEX_WIDTH -# define PRINTF_INT32_HEX_WIDTH "8" -#endif -#ifndef PRINTF_INT16_HEX_WIDTH -# define PRINTF_INT16_HEX_WIDTH "4" -#endif -#ifndef PRINTF_INT8_HEX_WIDTH -# define PRINTF_INT8_HEX_WIDTH "2" -#endif -#ifndef PRINTF_INT64_DEC_WIDTH -# define PRINTF_INT64_DEC_WIDTH "19" -#endif -#ifndef PRINTF_INT32_DEC_WIDTH -# define PRINTF_INT32_DEC_WIDTH "10" -#endif -#ifndef PRINTF_INT16_DEC_WIDTH -# define PRINTF_INT16_DEC_WIDTH "5" -#endif -#ifndef PRINTF_INT8_DEC_WIDTH -# define PRINTF_INT8_DEC_WIDTH "3" -#endif -#ifndef PRINTF_UINT64_DEC_WIDTH -# define PRINTF_UINT64_DEC_WIDTH "20" -#endif -#ifndef PRINTF_UINT32_DEC_WIDTH -# define PRINTF_UINT32_DEC_WIDTH "10" -#endif -#ifndef PRINTF_UINT16_DEC_WIDTH -# define PRINTF_UINT16_DEC_WIDTH "5" -#endif -#ifndef PRINTF_UINT8_DEC_WIDTH -# define PRINTF_UINT8_DEC_WIDTH "3" -#endif - -/* - * Ok, lets not worry about 128 bit integers for now. Moore's law says - * we don't need to worry about that until about 2040 at which point - * we'll have bigger things to worry about. - */ - -#ifdef stdint_int64_defined - typedef int64_t intmax_t; - typedef uint64_t uintmax_t; -# define INTMAX_MAX INT64_MAX -# define INTMAX_MIN INT64_MIN -# define UINTMAX_MAX UINT64_MAX -# define UINTMAX_C(v) UINT64_C(v) -# define INTMAX_C(v) INT64_C(v) -# ifndef PRINTF_INTMAX_MODIFIER -# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER -# endif -# ifndef PRINTF_INTMAX_HEX_WIDTH -# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT64_HEX_WIDTH -# endif -# ifndef PRINTF_INTMAX_DEC_WIDTH -# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT64_DEC_WIDTH -# endif -#else - typedef int32_t intmax_t; - typedef uint32_t uintmax_t; -# define INTMAX_MAX INT32_MAX -# define UINTMAX_MAX UINT32_MAX -# define UINTMAX_C(v) UINT32_C(v) -# define INTMAX_C(v) INT32_C(v) -# ifndef PRINTF_INTMAX_MODIFIER -# define PRINTF_INTMAX_MODIFIER PRINTF_INT32_MODIFIER -# endif -# ifndef PRINTF_INTMAX_HEX_WIDTH -# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT32_HEX_WIDTH -# endif -# ifndef PRINTF_INTMAX_DEC_WIDTH -# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT32_DEC_WIDTH -# endif -#endif - -/* - * Because this file currently only supports platforms which have - * precise powers of 2 as bit sizes for the default integers, the - * least definitions are all trivial. Its possible that a future - * version of this file could have different definitions. - */ - -#ifndef stdint_least_defined - typedef int8_t int_least8_t; - typedef uint8_t uint_least8_t; - typedef int16_t int_least16_t; - typedef uint16_t uint_least16_t; - typedef int32_t int_least32_t; - typedef uint32_t uint_least32_t; -# define PRINTF_LEAST32_MODIFIER PRINTF_INT32_MODIFIER -# define PRINTF_LEAST16_MODIFIER PRINTF_INT16_MODIFIER -# define UINT_LEAST8_MAX UINT8_MAX -# define INT_LEAST8_MAX INT8_MAX -# define UINT_LEAST16_MAX UINT16_MAX -# define INT_LEAST16_MAX INT16_MAX -# define UINT_LEAST32_MAX UINT32_MAX -# define INT_LEAST32_MAX INT32_MAX -# define INT_LEAST8_MIN INT8_MIN -# define INT_LEAST16_MIN INT16_MIN -# define INT_LEAST32_MIN INT32_MIN -# ifdef stdint_int64_defined - typedef int64_t int_least64_t; - typedef uint64_t uint_least64_t; -# define PRINTF_LEAST64_MODIFIER PRINTF_INT64_MODIFIER -# define UINT_LEAST64_MAX UINT64_MAX -# define INT_LEAST64_MAX INT64_MAX -# define INT_LEAST64_MIN INT64_MIN -# endif -#endif -#undef stdint_least_defined - -/* - * The ANSI C committee pretending to know or specify anything about - * performance is the epitome of misguided arrogance. The mandate of - * this file is to *ONLY* ever support that absolute minimum - * definition of the fast integer types, for compatibility purposes. - * No extensions, and no attempt to suggest what may or may not be a - * faster integer type will ever be made in this file. Developers are - * warned to stay away from these types when using this or any other - * stdint.h. - */ - -typedef int_least8_t int_fast8_t; -typedef uint_least8_t uint_fast8_t; -typedef int_least16_t int_fast16_t; -typedef uint_least16_t uint_fast16_t; -typedef int_least32_t int_fast32_t; -typedef uint_least32_t uint_fast32_t; -#define UINT_FAST8_MAX UINT_LEAST8_MAX -#define INT_FAST8_MAX INT_LEAST8_MAX -#define UINT_FAST16_MAX UINT_LEAST16_MAX -#define INT_FAST16_MAX INT_LEAST16_MAX -#define UINT_FAST32_MAX UINT_LEAST32_MAX -#define INT_FAST32_MAX INT_LEAST32_MAX -#define INT_FAST8_MIN INT_LEAST8_MIN -#define INT_FAST16_MIN INT_LEAST16_MIN -#define INT_FAST32_MIN INT_LEAST32_MIN -#ifdef stdint_int64_defined - typedef int_least64_t int_fast64_t; - typedef uint_least64_t uint_fast64_t; -# define UINT_FAST64_MAX UINT_LEAST64_MAX -# define INT_FAST64_MAX INT_LEAST64_MAX -# define INT_FAST64_MIN INT_LEAST64_MIN -#endif - -#undef stdint_int64_defined - -/* - * Whatever piecemeal, per compiler thing we can do about the wchar_t - * type limits. - */ - -#if defined(__WATCOMC__) || defined(_MSC_VER) || defined (__GNUC__) && !defined(vxWorks) -# include <wchar.h> -# ifndef WCHAR_MIN -# define WCHAR_MIN 0 -# endif -# ifndef WCHAR_MAX -# define WCHAR_MAX ((wchar_t)-1) -# endif -#endif - -/* - * Whatever piecemeal, per compiler/platform thing we can do about the - * (u)intptr_t types and limits. - */ - -#if (defined (_MSC_VER) && defined (_UINTPTR_T_DEFINED)) || defined (_UINTPTR_T) -# define STDINT_H_UINTPTR_T_DEFINED -#endif - -#ifndef STDINT_H_UINTPTR_T_DEFINED -# if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) || defined (_WIN64) || defined (__ppc64__) -# define stdint_intptr_bits 64 -# elif defined (__WATCOMC__) || defined (__TURBOC__) -# if defined(__TINY__) || defined(__SMALL__) || defined(__MEDIUM__) -# define stdint_intptr_bits 16 -# else -# define stdint_intptr_bits 32 -# endif -# elif defined (__i386__) || defined (_WIN32) || defined (WIN32) || defined (__ppc64__) -# define stdint_intptr_bits 32 -# elif defined (__INTEL_COMPILER) -/* TODO -- what did Intel do about x86-64? */ -# else -/* #error "This platform might not be supported yet" */ -# endif - -# ifdef stdint_intptr_bits -# define stdint_intptr_glue3_i(a,b,c) a##b##c -# define stdint_intptr_glue3(a,b,c) stdint_intptr_glue3_i(a,b,c) -# ifndef PRINTF_INTPTR_MODIFIER -# define PRINTF_INTPTR_MODIFIER stdint_intptr_glue3(PRINTF_INT,stdint_intptr_bits,_MODIFIER) -# endif -# ifndef PTRDIFF_MAX -# define PTRDIFF_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX) -# endif -# ifndef PTRDIFF_MIN -# define PTRDIFF_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN) -# endif -# ifndef UINTPTR_MAX -# define UINTPTR_MAX stdint_intptr_glue3(UINT,stdint_intptr_bits,_MAX) -# endif -# ifndef INTPTR_MAX -# define INTPTR_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX) -# endif -# ifndef INTPTR_MIN -# define INTPTR_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN) -# endif -# ifndef INTPTR_C -# define INTPTR_C(x) stdint_intptr_glue3(INT,stdint_intptr_bits,_C)(x) -# endif -# ifndef UINTPTR_C -# define UINTPTR_C(x) stdint_intptr_glue3(UINT,stdint_intptr_bits,_C)(x) -# endif - typedef stdint_intptr_glue3(uint,stdint_intptr_bits,_t) uintptr_t; - typedef stdint_intptr_glue3( int,stdint_intptr_bits,_t) intptr_t; -# else -/* TODO -- This following is likely wrong for some platforms, and does - nothing for the definition of uintptr_t. */ - typedef ptrdiff_t intptr_t; -# endif -# define STDINT_H_UINTPTR_T_DEFINED -#endif - -/* - * Assumes sig_atomic_t is signed and we have a 2s complement machine. - */ - -#ifndef SIG_ATOMIC_MAX -# define SIG_ATOMIC_MAX ((((sig_atomic_t) 1) << (sizeof (sig_atomic_t)*CHAR_BIT-1)) - 1) -#endif - -#endif - -#if defined (__TEST_PSTDINT_FOR_CORRECTNESS) - -/* - * Please compile with the maximum warning settings to make sure macros are - * not defined more than once. - */ - -#include <stdlib.h> -#include <stdio.h> -#include <string.h> - -#define glue3_aux(x,y,z) x ## y ## z -#define glue3(x,y,z) glue3_aux(x,y,z) - -#define DECLU(bits) glue3(uint,bits,_t) glue3(u,bits,) = glue3(UINT,bits,_C) (0); -#define DECLI(bits) glue3(int,bits,_t) glue3(i,bits,) = glue3(INT,bits,_C) (0); - -#define DECL(us,bits) glue3(DECL,us,) (bits) - -#define TESTUMAX(bits) glue3(u,bits,) = ~glue3(u,bits,); if (glue3(UINT,bits,_MAX) != glue3(u,bits,)) printf ("Something wrong with UINT%d_MAX\n", bits) - -#define REPORTERROR(msg) { err_n++; if (err_first <= 0) err_first = __LINE__; printf msg; } - -int main () { - int err_n = 0; - int err_first = 0; - DECL(I,8) - DECL(U,8) - DECL(I,16) - DECL(U,16) - DECL(I,32) - DECL(U,32) -#ifdef INT64_MAX - DECL(I,64) - DECL(U,64) -#endif - intmax_t imax = INTMAX_C(0); - uintmax_t umax = UINTMAX_C(0); - char str0[256], str1[256]; - - sprintf (str0, "%" PRINTF_INT32_MODIFIER "d", INT32_C(2147483647)); - if (0 != strcmp (str0, "2147483647")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0)); - if (atoi(PRINTF_INT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_INT32_DEC_WIDTH : %s\n", PRINTF_INT32_DEC_WIDTH)); - sprintf (str0, "%" PRINTF_INT32_MODIFIER "u", UINT32_C(4294967295)); - if (0 != strcmp (str0, "4294967295")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0)); - if (atoi(PRINTF_UINT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_UINT32_DEC_WIDTH : %s\n", PRINTF_UINT32_DEC_WIDTH)); -#ifdef INT64_MAX - sprintf (str1, "%" PRINTF_INT64_MODIFIER "d", INT64_C(9223372036854775807)); - if (0 != strcmp (str1, "9223372036854775807")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1)); - if (atoi(PRINTF_INT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_INT64_DEC_WIDTH : %s, %d\n", PRINTF_INT64_DEC_WIDTH, (int) strlen(str1))); - sprintf (str1, "%" PRINTF_INT64_MODIFIER "u", UINT64_C(18446744073709550591)); - if (0 != strcmp (str1, "18446744073709550591")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1)); - if (atoi(PRINTF_UINT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_UINT64_DEC_WIDTH : %s, %d\n", PRINTF_UINT64_DEC_WIDTH, (int) strlen(str1))); -#endif - - sprintf (str0, "%d %x\n", 0, ~0); - - sprintf (str1, "%d %x\n", i8, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i8 : %s\n", str1)); - sprintf (str1, "%u %x\n", u8, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u8 : %s\n", str1)); - sprintf (str1, "%d %x\n", i16, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i16 : %s\n", str1)); - sprintf (str1, "%u %x\n", u16, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u16 : %s\n", str1)); - sprintf (str1, "%" PRINTF_INT32_MODIFIER "d %x\n", i32, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i32 : %s\n", str1)); - sprintf (str1, "%" PRINTF_INT32_MODIFIER "u %x\n", u32, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u32 : %s\n", str1)); -#ifdef INT64_MAX - sprintf (str1, "%" PRINTF_INT64_MODIFIER "d %x\n", i64, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i64 : %s\n", str1)); -#endif - sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "d %x\n", imax, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with imax : %s\n", str1)); - sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "u %x\n", umax, ~0); - if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with umax : %s\n", str1)); - - TESTUMAX(8); - TESTUMAX(16); - TESTUMAX(32); -#ifdef INT64_MAX - TESTUMAX(64); -#endif - -#define STR(v) #v -#define Q(v) printf ("sizeof " STR(v) " = %u\n", (unsigned) sizeof (v)); - if (err_n) { - printf ("pstdint.h is not correct. Please use sizes below to correct it:\n"); - } - - Q(int) - Q(unsigned) - Q(long int) - Q(short int) - Q(int8_t) - Q(int16_t) - Q(int32_t) -#ifdef INT64_MAX - Q(int64_t) -#endif - - return EXIT_SUCCESS; -} - -#endif diff --git a/thirdparty/assimp/include/assimp/Compiler/pushpack1.h b/thirdparty/assimp/include/assimp/Compiler/pushpack1.h deleted file mode 100644 index 4c9fbb857f..0000000000 --- a/thirdparty/assimp/include/assimp/Compiler/pushpack1.h +++ /dev/null @@ -1,43 +0,0 @@ - - -// =============================================================================== -// May be included multiple times - sets structure packing to 1 -// for all supported compilers. #include <poppack1.h> reverts the changes. -// -// Currently this works on the following compilers: -// MSVC 7,8,9 -// GCC -// BORLAND (complains about 'pack state changed but not reverted', but works) -// Clang -// -// -// USAGE: -// -// struct StructToBePacked { -// } PACK_STRUCT; -// -// =============================================================================== - -#ifdef AI_PUSHPACK_IS_DEFINED -# error poppack1.h must be included after pushpack1.h -#endif - -#if defined(_MSC_VER) || defined(__BORLANDC__) || defined (__BCPLUSPLUS__) -# pragma pack(push,1) -# define PACK_STRUCT -#elif defined( __GNUC__ ) || defined(__clang__) -# if !defined(HOST_MINGW) -# define PACK_STRUCT __attribute__((__packed__)) -# else -# define PACK_STRUCT __attribute__((gcc_struct, __packed__)) -# endif -#else -# error Compiler not supported -#endif - -#if defined(_MSC_VER) -// C4103: Packing was changed after the inclusion of the header, probably missing #pragma pop -# pragma warning (disable : 4103) -#endif - -#define AI_PUSHPACK_IS_DEFINED diff --git a/thirdparty/assimp/include/assimp/CreateAnimMesh.h b/thirdparty/assimp/include/assimp/CreateAnimMesh.h deleted file mode 100644 index 1266d1de11..0000000000 --- a/thirdparty/assimp/include/assimp/CreateAnimMesh.h +++ /dev/null @@ -1,68 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file CreateAnimMesh.h - * Create AnimMesh from Mesh - */ -#pragma once -#ifndef INCLUDED_AI_CREATE_ANIM_MESH_H -#define INCLUDED_AI_CREATE_ANIM_MESH_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/mesh.h> - -namespace Assimp { - -/** - * Create aiAnimMesh from aiMesh. - * @param mesh The input mesh to create an animated mesh from. - * @return The new created animated mesh. - */ -ASSIMP_API aiAnimMesh *aiCreateAnimMesh(const aiMesh *mesh); - -} // end of namespace Assimp - -#endif // INCLUDED_AI_CREATE_ANIM_MESH_H - diff --git a/thirdparty/assimp/include/assimp/DefaultIOStream.h b/thirdparty/assimp/include/assimp/DefaultIOStream.h deleted file mode 100644 index c6d382c1b5..0000000000 --- a/thirdparty/assimp/include/assimp/DefaultIOStream.h +++ /dev/null @@ -1,139 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Default file I/O using fXXX()-family of functions */ -#pragma once -#ifndef AI_DEFAULTIOSTREAM_H_INC -#define AI_DEFAULTIOSTREAM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <stdio.h> -#include <assimp/IOStream.hpp> -#include <assimp/importerdesc.h> -#include <assimp/Defines.h> - -namespace Assimp { - -// ---------------------------------------------------------------------------------- -//! @class DefaultIOStream -//! @brief Default IO implementation, use standard IO operations -//! @note An instance of this class can exist without a valid file handle -//! attached to it. All calls fail, but the instance can nevertheless be -//! used with no restrictions. -class ASSIMP_API DefaultIOStream : public IOStream { - friend class DefaultIOSystem; -#if __ANDROID__ -# if __ANDROID_API__ > 9 -# if defined(AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT) - friend class AndroidJNIIOSystem; -# endif // defined(AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT) -# endif // __ANDROID_API__ > 9 -#endif // __ANDROID__ - -protected: - DefaultIOStream() AI_NO_EXCEPT; - DefaultIOStream(FILE* pFile, const std::string &strFilename); - -public: - /** Destructor public to allow simple deletion to close the file. */ - ~DefaultIOStream (); - - // ------------------------------------------------------------------- - /// Read from stream - size_t Read(void* pvBuffer, - size_t pSize, - size_t pCount); - - // ------------------------------------------------------------------- - /// Write to stream - size_t Write(const void* pvBuffer, - size_t pSize, - size_t pCount); - - // ------------------------------------------------------------------- - /// Seek specific position - aiReturn Seek(size_t pOffset, - aiOrigin pOrigin); - - // ------------------------------------------------------------------- - /// Get current seek position - size_t Tell() const; - - // ------------------------------------------------------------------- - /// Get size of file - size_t FileSize() const; - - // ------------------------------------------------------------------- - /// Flush file contents - void Flush(); - -private: - FILE* mFile; - std::string mFilename; - mutable size_t mCachedSize; -}; - -// ---------------------------------------------------------------------------------- -AI_FORCE_INLINE -DefaultIOStream::DefaultIOStream() AI_NO_EXCEPT -: mFile(nullptr) -, mFilename("") -, mCachedSize(SIZE_MAX) { - // empty -} - -// ---------------------------------------------------------------------------------- -AI_FORCE_INLINE -DefaultIOStream::DefaultIOStream (FILE* pFile, const std::string &strFilename) -: mFile(pFile) -, mFilename(strFilename) -, mCachedSize(SIZE_MAX) { - // empty -} -// ---------------------------------------------------------------------------------- - -} // ns assimp - -#endif //!!AI_DEFAULTIOSTREAM_H_INC diff --git a/thirdparty/assimp/include/assimp/DefaultIOSystem.h b/thirdparty/assimp/include/assimp/DefaultIOSystem.h deleted file mode 100644 index 46f6d447c5..0000000000 --- a/thirdparty/assimp/include/assimp/DefaultIOSystem.h +++ /dev/null @@ -1,98 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Default implementation of IOSystem using the standard C file functions */ -#pragma once -#ifndef AI_DEFAULTIOSYSTEM_H_INC -#define AI_DEFAULTIOSYSTEM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/IOSystem.hpp> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** Default implementation of IOSystem using the standard C file functions */ -class ASSIMP_API DefaultIOSystem : public IOSystem { -public: - // ------------------------------------------------------------------- - /** Tests for the existence of a file at the given path. */ - bool Exists( const char* pFile) const; - - // ------------------------------------------------------------------- - /** Returns the directory separator. */ - char getOsSeparator() const; - - // ------------------------------------------------------------------- - /** Open a new file with a given path. */ - IOStream* Open( const char* pFile, const char* pMode = "rb"); - - // ------------------------------------------------------------------- - /** Closes the given file and releases all resources associated with it. */ - void Close( IOStream* pFile); - - // ------------------------------------------------------------------- - /** Compare two paths */ - bool ComparePaths (const char* one, const char* second) const; - - /** @brief get the file name of a full filepath - * example: /tmp/archive.tar.gz -> archive.tar.gz - */ - static std::string fileName( const std::string &path ); - - /** @brief get the complete base name of a full filepath - * example: /tmp/archive.tar.gz -> archive.tar - */ - static std::string completeBaseName( const std::string &path); - - /** @brief get the path of a full filepath - * example: /tmp/archive.tar.gz -> /tmp/ - */ - static std::string absolutePath( const std::string &path); -}; - -} //!ns Assimp - -#endif //AI_DEFAULTIOSYSTEM_H_INC diff --git a/thirdparty/assimp/include/assimp/DefaultLogger.hpp b/thirdparty/assimp/include/assimp/DefaultLogger.hpp deleted file mode 100644 index 1946e250a6..0000000000 --- a/thirdparty/assimp/include/assimp/DefaultLogger.hpp +++ /dev/null @@ -1,188 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -/** @file DefaultLogger.hpp -*/ - -#ifndef INCLUDED_AI_DEFAULTLOGGER -#define INCLUDED_AI_DEFAULTLOGGER - -#include "Logger.hpp" -#include "LogStream.hpp" -#include "NullLogger.hpp" -#include <vector> - -namespace Assimp { -// ------------------------------------------------------------------------------------ -class IOStream; -struct LogStreamInfo; - -/** default name of logfile */ -#define ASSIMP_DEFAULT_LOG_NAME "AssimpLog.txt" - -// ------------------------------------------------------------------------------------ -/** @brief CPP-API: Primary logging facility of Assimp. - * - * The library stores its primary #Logger as a static member of this class. - * #get() returns this primary logger. By default the underlying implementation is - * just a #NullLogger which rejects all log messages. By calling #create(), logging - * is turned on. To capture the log output multiple log streams (#LogStream) can be - * attach to the logger. Some default streams for common streaming locations (such as - * a file, std::cout, OutputDebugString()) are also provided. - * - * If you wish to customize the logging at an even deeper level supply your own - * implementation of #Logger to #set(). - * @note The whole logging stuff causes a small extra overhead for all imports. */ -class ASSIMP_API DefaultLogger : - public Logger { - -public: - - // ---------------------------------------------------------------------- - /** @brief Creates a logging instance. - * @param name Name for log file. Only valid in combination - * with the aiDefaultLogStream_FILE flag. - * @param severity Log severity, VERBOSE turns on debug messages - * @param defStreams Default log streams to be attached. Any bitwise - * combination of the aiDefaultLogStream enumerated values. - * If #aiDefaultLogStream_FILE is specified but an empty string is - * passed for 'name', no log file is created at all. - * @param io IOSystem to be used to open external files (such as the - * log file). Pass NULL to rely on the default implementation. - * This replaces the default #NullLogger with a #DefaultLogger instance. */ - static Logger *create(const char* name = ASSIMP_DEFAULT_LOG_NAME, - LogSeverity severity = NORMAL, - unsigned int defStreams = aiDefaultLogStream_DEBUGGER | aiDefaultLogStream_FILE, - IOSystem* io = NULL); - - // ---------------------------------------------------------------------- - /** @brief Setup a custom #Logger implementation. - * - * Use this if the provided #DefaultLogger class doesn't fit into - * your needs. If the provided message formatting is OK for you, - * it's much easier to use #create() and to attach your own custom - * output streams to it. - * @param logger Pass NULL to setup a default NullLogger*/ - static void set (Logger *logger); - - // ---------------------------------------------------------------------- - /** @brief Getter for singleton instance - * @return Only instance. This is never null, but it could be a - * NullLogger. Use isNullLogger to check this.*/ - static Logger *get(); - - // ---------------------------------------------------------------------- - /** @brief Return whether a #NullLogger is currently active - * @return true if the current logger is a #NullLogger. - * Use create() or set() to setup a logger that does actually do - * something else than just rejecting all log messages. */ - static bool isNullLogger(); - - // ---------------------------------------------------------------------- - /** @brief Kills the current singleton logger and replaces it with a - * #NullLogger instance. */ - static void kill(); - - // ---------------------------------------------------------------------- - /** @copydoc Logger::attachStream */ - bool attachStream(LogStream *pStream, - unsigned int severity); - - // ---------------------------------------------------------------------- - /** @copydoc Logger::detatchStream */ - bool detatchStream(LogStream *pStream, - unsigned int severity); - -private: - // ---------------------------------------------------------------------- - /** @briefPrivate construction for internal use by create(). - * @param severity Logging granularity */ - explicit DefaultLogger(LogSeverity severity); - - // ---------------------------------------------------------------------- - /** @briefDestructor */ - ~DefaultLogger(); - - /** @brief Logs debug infos, only been written when severity level VERBOSE is set */ - void OnDebug(const char* message); - - /** @brief Logs an info message */ - void OnInfo(const char* message); - - /** @brief Logs a warning message */ - void OnWarn(const char* message); - - /** @brief Logs an error message */ - void OnError(const char* message); - - // ---------------------------------------------------------------------- - /** @brief Writes a message to all streams */ - void WriteToStreams(const char* message, ErrorSeverity ErrorSev ); - - // ---------------------------------------------------------------------- - /** @brief Returns the thread id. - * @note This is an OS specific feature, if not supported, a - * zero will be returned. - */ - unsigned int GetThreadID(); - -private: - // Aliases for stream container - typedef std::vector<LogStreamInfo*> StreamArray; - typedef std::vector<LogStreamInfo*>::iterator StreamIt; - typedef std::vector<LogStreamInfo*>::const_iterator ConstStreamIt; - - //! only logging instance - static Logger *m_pLogger; - static NullLogger s_pNullLogger; - - //! Attached streams - StreamArray m_StreamArray; - - bool noRepeatMsg; - char lastMsg[MAX_LOG_MESSAGE_LENGTH*2]; - size_t lastLen; -}; -// ------------------------------------------------------------------------------------ - -} // Namespace Assimp - -#endif // !! INCLUDED_AI_DEFAULTLOGGER diff --git a/thirdparty/assimp/include/assimp/Exceptional.h b/thirdparty/assimp/include/assimp/Exceptional.h deleted file mode 100644 index 6bb6ce1e39..0000000000 --- a/thirdparty/assimp/include/assimp/Exceptional.h +++ /dev/null @@ -1,128 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2008, assimp team -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#pragma once -#ifndef AI_INCLUDED_EXCEPTIONAL_H -#define AI_INCLUDED_EXCEPTIONAL_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <stdexcept> -#include <assimp/DefaultIOStream.h> - -using std::runtime_error; - -#ifdef _MSC_VER -# pragma warning(disable : 4275) -#endif - -// --------------------------------------------------------------------------- -/** FOR IMPORTER PLUGINS ONLY: Simple exception class to be thrown if an - * unrecoverable error occurs while importing. Loading APIs return - * NULL instead of a valid aiScene then. */ -class DeadlyImportError : public runtime_error { -public: - /** Constructor with arguments */ - explicit DeadlyImportError( const std::string& errorText) - : runtime_error(errorText) { - // empty - } - -}; - -typedef DeadlyImportError DeadlyExportError; - -#ifdef _MSC_VER -# pragma warning(default : 4275) -#endif - -// --------------------------------------------------------------------------- -template <typename T> -struct ExceptionSwallower { - T operator ()() const { - return T(); - } -}; - -// --------------------------------------------------------------------------- -template <typename T> -struct ExceptionSwallower<T*> { - T* operator ()() const { - return nullptr; - } -}; - -// --------------------------------------------------------------------------- -template <> -struct ExceptionSwallower<aiReturn> { - aiReturn operator ()() const { - try { - throw; - } - catch (std::bad_alloc&) { - return aiReturn_OUTOFMEMORY; - } - catch (...) { - return aiReturn_FAILURE; - } - } -}; - -// --------------------------------------------------------------------------- -template <> -struct ExceptionSwallower<void> { - void operator ()() const { - return; - } -}; - -#define ASSIMP_BEGIN_EXCEPTION_REGION()\ -{\ - try { - -#define ASSIMP_END_EXCEPTION_REGION(type)\ - } catch(...) {\ - return ExceptionSwallower<type>()();\ - }\ -} - -#endif // AI_INCLUDED_EXCEPTIONAL_H diff --git a/thirdparty/assimp/include/assimp/Exporter.hpp b/thirdparty/assimp/include/assimp/Exporter.hpp deleted file mode 100644 index 2612e1f9d2..0000000000 --- a/thirdparty/assimp/include/assimp/Exporter.hpp +++ /dev/null @@ -1,509 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Exporter.hpp -* @brief Defines the CPP-API for the Assimp export interface -*/ -#pragma once -#ifndef AI_EXPORT_HPP_INC -#define AI_EXPORT_HPP_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifndef ASSIMP_BUILD_NO_EXPORT - -#include "cexport.h" -#include <map> - -namespace Assimp { - -class ExporterPimpl; -class IOSystem; -class ProgressHandler; - -// ---------------------------------------------------------------------------------- -/** CPP-API: The Exporter class forms an C++ interface to the export functionality - * of the Open Asset Import Library. Note that the export interface is available - * only if Assimp has been built with ASSIMP_BUILD_NO_EXPORT not defined. - * - * The interface is modeled after the importer interface and mostly - * symmetric. The same rules for threading etc. apply. - * - * In a nutshell, there are two export interfaces: #Export, which writes the - * output file(s) either to the regular file system or to a user-supplied - * #IOSystem, and #ExportToBlob which returns a linked list of memory - * buffers (blob), each referring to one output file (in most cases - * there will be only one output file of course, but this extra complexity is - * needed since Assimp aims at supporting a wide range of file formats). - * - * #ExportToBlob is especially useful if you intend to work - * with the data in-memory. -*/ -class ASSIMP_API ExportProperties; - -class ASSIMP_API Exporter { -public: - /** Function pointer type of a Export worker function */ - typedef void (*fpExportFunc)(const char*, IOSystem*, const aiScene*, const ExportProperties*); - - /** Internal description of an Assimp export format option */ - struct ExportFormatEntry { - /// Public description structure to be returned by aiGetExportFormatDescription() - aiExportFormatDesc mDescription; - - // Worker function to do the actual exporting - fpExportFunc mExportFunction; - - // Post-processing steps to be executed PRIOR to invoking mExportFunction - unsigned int mEnforcePP; - - // Constructor to fill all entries - ExportFormatEntry( const char* pId, const char* pDesc, const char* pExtension, fpExportFunc pFunction, unsigned int pEnforcePP = 0u) - { - mDescription.id = pId; - mDescription.description = pDesc; - mDescription.fileExtension = pExtension; - mExportFunction = pFunction; - mEnforcePP = pEnforcePP; - } - - ExportFormatEntry() : - mExportFunction() - , mEnforcePP() - { - mDescription.id = NULL; - mDescription.description = NULL; - mDescription.fileExtension = NULL; - } - }; - - /** - * @brief The class constructor. - */ - Exporter(); - - /** - * @brief The class destructor. - */ - ~Exporter(); - - // ------------------------------------------------------------------- - /** Supplies a custom IO handler to the exporter to use to open and - * access files. - * - * If you need #Export to use custom IO logic to access the files, - * you need to supply a custom implementation of IOSystem and - * IOFile to the exporter. - * - * #Exporter takes ownership of the object and will destroy it - * afterwards. The previously assigned handler will be deleted. - * Pass NULL to take again ownership of your IOSystem and reset Assimp - * to use its default implementation, which uses plain file IO. - * - * @param pIOHandler The IO handler to be used in all file accesses - * of the Importer. */ - void SetIOHandler( IOSystem* pIOHandler); - - // ------------------------------------------------------------------- - /** Retrieves the IO handler that is currently set. - * You can use #IsDefaultIOHandler() to check whether the returned - * interface is the default IO handler provided by ASSIMP. The default - * handler is active as long the application doesn't supply its own - * custom IO handler via #SetIOHandler(). - * @return A valid IOSystem interface, never NULL. */ - IOSystem* GetIOHandler() const; - - // ------------------------------------------------------------------- - /** Checks whether a default IO handler is active - * A default handler is active as long the application doesn't - * supply its own custom IO handler via #SetIOHandler(). - * @return true by default */ - bool IsDefaultIOHandler() const; - - // ------------------------------------------------------------------- - /** Supplies a custom progress handler to the exporter. This - * interface exposes an #Update() callback, which is called - * more or less periodically (please don't sue us if it - * isn't as periodically as you'd like it to have ...). - * This can be used to implement progress bars and loading - * timeouts. - * @param pHandler Progress callback interface. Pass nullptr to - * disable progress reporting. - * @note Progress handlers can be used to abort the loading - * at almost any time.*/ - void SetProgressHandler(ProgressHandler* pHandler); - - // ------------------------------------------------------------------- - /** Exports the given scene to a chosen file format. Returns the exported - * data as a binary blob which you can write into a file or something. - * When you're done with the data, simply let the #Exporter instance go - * out of scope to have it released automatically. - * @param pScene The scene to export. Stays in possession of the caller, - * is not changed by the function. - * @param pFormatId ID string to specify to which format you want to - * export to. Use - * #GetExportFormatCount / #GetExportFormatDescription to learn which - * export formats are available. - * @param pPreprocessing See the documentation for #Export - * @return the exported data or NULL in case of error. - * @note If the Exporter instance did already hold a blob from - * a previous call to #ExportToBlob, it will be disposed. - * Any IO handlers set via #SetIOHandler are ignored here. - * @note Use aiCopyScene() to get a modifiable copy of a previously - * imported scene. */ - const aiExportDataBlob* ExportToBlob(const aiScene* pScene, const char* pFormatId, - unsigned int pPreprocessing = 0u, const ExportProperties* pProperties = nullptr); - const aiExportDataBlob* ExportToBlob( const aiScene* pScene, const std::string& pFormatId, - unsigned int pPreprocessing = 0u, const ExportProperties* pProperties = nullptr); - - // ------------------------------------------------------------------- - /** Convenience function to export directly to a file. Use - * #SetIOSystem to supply a custom IOSystem to gain fine-grained control - * about the output data flow of the export process. - * @param pBlob A data blob obtained from a previous call to #aiExportScene. Must not be NULL. - * @param pPath Full target file name. Target must be accessible. - * @param pPreprocessing Accepts any choice of the #aiPostProcessSteps enumerated - * flags, but in reality only a subset of them makes sense here. Specifying - * 'preprocessing' flags is useful if the input scene does not conform to - * Assimp's default conventions as specified in the @link data Data Structures Page @endlink. - * In short, this means the geometry data should use a right-handed coordinate systems, face - * winding should be counter-clockwise and the UV coordinate origin is assumed to be in - * the upper left. The #aiProcess_MakeLeftHanded, #aiProcess_FlipUVs and - * #aiProcess_FlipWindingOrder flags are used in the import side to allow users - * to have those defaults automatically adapted to their conventions. Specifying those flags - * for exporting has the opposite effect, respectively. Some other of the - * #aiPostProcessSteps enumerated values may be useful as well, but you'll need - * to try out what their effect on the exported file is. Many formats impose - * their own restrictions on the structure of the geometry stored therein, - * so some preprocessing may have little or no effect at all, or may be - * redundant as exporters would apply them anyhow. A good example - * is triangulation - whilst you can enforce it by specifying - * the #aiProcess_Triangulate flag, most export formats support only - * triangulate data so they would run the step even if it wasn't requested. - * - * If assimp detects that the input scene was directly taken from the importer side of - * the library (i.e. not copied using aiCopyScene and potentially modified afterwards), - * any post-processing steps already applied to the scene will not be applied again, unless - * they show non-idempotent behavior (#aiProcess_MakeLeftHanded, #aiProcess_FlipUVs and - * #aiProcess_FlipWindingOrder). - * @return AI_SUCCESS if everything was fine. - * @note Use aiCopyScene() to get a modifiable copy of a previously - * imported scene.*/ - aiReturn Export( const aiScene* pScene, const char* pFormatId, const char* pPath, - unsigned int pPreprocessing = 0u, const ExportProperties* pProperties = nullptr); - aiReturn Export( const aiScene* pScene, const std::string& pFormatId, const std::string& pPath, - unsigned int pPreprocessing = 0u, const ExportProperties* pProperties = nullptr); - - // ------------------------------------------------------------------- - /** Returns an error description of an error that occurred in #Export - * or #ExportToBlob - * - * Returns an empty string if no error occurred. - * @return A description of the last error, an empty string if no - * error occurred. The string is never NULL. - * - * @note The returned function remains valid until one of the - * following methods is called: #Export, #ExportToBlob, #FreeBlob */ - const char* GetErrorString() const; - - // ------------------------------------------------------------------- - /** Return the blob obtained from the last call to #ExportToBlob */ - const aiExportDataBlob* GetBlob() const; - - // ------------------------------------------------------------------- - /** Orphan the blob from the last call to #ExportToBlob. This means - * the caller takes ownership and is thus responsible for calling - * the C API function #aiReleaseExportBlob to release it. */ - const aiExportDataBlob* GetOrphanedBlob() const; - - // ------------------------------------------------------------------- - /** Frees the current blob. - * - * The function does nothing if no blob has previously been - * previously produced via #ExportToBlob. #FreeBlob is called - * automatically by the destructor. The only reason to call - * it manually would be to reclaim as much storage as possible - * without giving up the #Exporter instance yet. */ - void FreeBlob( ); - - // ------------------------------------------------------------------- - /** Returns the number of export file formats available in the current - * Assimp build. Use #Exporter::GetExportFormatDescription to - * retrieve infos of a specific export format. - * - * This includes built-in exporters as well as exporters registered - * using #RegisterExporter. - **/ - size_t GetExportFormatCount() const; - - // ------------------------------------------------------------------- - /** Returns a description of the nth export file format. Use # - * #Exporter::GetExportFormatCount to learn how many export - * formats are supported. - * - * The returned pointer is of static storage duration if the - * pIndex pertains to a built-in exporter (i.e. one not registered - * via #RegistrerExporter). It is restricted to the life-time of the - * #Exporter instance otherwise. - * - * @param pIndex Index of the export format to retrieve information - * for. Valid range is 0 to #Exporter::GetExportFormatCount - * @return A description of that specific export format. - * NULL if pIndex is out of range. */ - const aiExportFormatDesc* GetExportFormatDescription( size_t pIndex ) const; - - // ------------------------------------------------------------------- - /** Register a custom exporter. Custom export formats are limited to - * to the current #Exporter instance and do not affect the - * library globally. The indexes under which the format's - * export format description can be queried are assigned - * monotonously. - * @param desc Exporter description. - * @return aiReturn_SUCCESS if the export format was successfully - * registered. A common cause that would prevent an exporter - * from being registered is that its format id is already - * occupied by another format. */ - aiReturn RegisterExporter(const ExportFormatEntry& desc); - - // ------------------------------------------------------------------- - /** Remove an export format previously registered with #RegisterExporter - * from the #Exporter instance (this can also be used to drop - * built-in exporters because those are implicitly registered - * using #RegisterExporter). - * @param id Format id to be unregistered, this refers to the - * 'id' field of #aiExportFormatDesc. - * @note Calling this method on a format description not yet registered - * has no effect.*/ - void UnregisterExporter(const char* id); - -protected: - // Just because we don't want you to know how we're hacking around. - ExporterPimpl* pimpl; -}; - -class ASSIMP_API ExportProperties { -public: - // Data type to store the key hash - typedef unsigned int KeyType; - - // typedefs for our four configuration maps. - // We don't need more, so there is no need for a generic solution - typedef std::map<KeyType, int> IntPropertyMap; - typedef std::map<KeyType, ai_real> FloatPropertyMap; - typedef std::map<KeyType, std::string> StringPropertyMap; - typedef std::map<KeyType, aiMatrix4x4> MatrixPropertyMap; - -public: - /** Standard constructor - * @see ExportProperties() - */ - ExportProperties(); - - // ------------------------------------------------------------------- - /** Copy constructor. - * - * This copies the configuration properties of another ExportProperties. - * @see ExportProperties(const ExportProperties& other) - */ - ExportProperties(const ExportProperties& other); - - // ------------------------------------------------------------------- - /** Set an integer configuration property. - * @param szName Name of the property. All supported properties - * are defined in the aiConfig.g header (all constants share the - * prefix AI_CONFIG_XXX and are simple strings). - * @param iValue New value of the property - * @return true if the property was set before. The new value replaces - * the previous value in this case. - * @note Property of different types (float, int, string ..) are kept - * on different stacks, so calling SetPropertyInteger() for a - * floating-point property has no effect - the loader will call - * GetPropertyFloat() to read the property, but it won't be there. - */ - bool SetPropertyInteger(const char* szName, int iValue); - - // ------------------------------------------------------------------- - /** Set a boolean configuration property. Boolean properties - * are stored on the integer stack internally so it's possible - * to set them via #SetPropertyBool and query them with - * #GetPropertyBool and vice versa. - * @see SetPropertyInteger() - */ - bool SetPropertyBool(const char* szName, bool value) { - return SetPropertyInteger(szName,value); - } - - // ------------------------------------------------------------------- - /** Set a floating-point configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyFloat(const char* szName, ai_real fValue); - - // ------------------------------------------------------------------- - /** Set a string configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyString(const char* szName, const std::string& sValue); - - // ------------------------------------------------------------------- - /** Set a matrix configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyMatrix(const char* szName, const aiMatrix4x4& sValue); - - // ------------------------------------------------------------------- - /** Get a configuration property. - * @param szName Name of the property. All supported properties - * are defined in the aiConfig.g header (all constants share the - * prefix AI_CONFIG_XXX). - * @param iErrorReturn Value that is returned if the property - * is not found. - * @return Current value of the property - * @note Property of different types (float, int, string ..) are kept - * on different lists, so calling SetPropertyInteger() for a - * floating-point property has no effect - the loader will call - * GetPropertyFloat() to read the property, but it won't be there. - */ - int GetPropertyInteger(const char* szName, - int iErrorReturn = 0xffffffff) const; - - // ------------------------------------------------------------------- - /** Get a boolean configuration property. Boolean properties - * are stored on the integer stack internally so it's possible - * to set them via #SetPropertyBool and query them with - * #GetPropertyBool and vice versa. - * @see GetPropertyInteger() - */ - bool GetPropertyBool(const char* szName, bool bErrorReturn = false) const { - return GetPropertyInteger(szName,bErrorReturn)!=0; - } - - // ------------------------------------------------------------------- - /** Get a floating-point configuration property - * @see GetPropertyInteger() - */ - ai_real GetPropertyFloat(const char* szName, - ai_real fErrorReturn = 10e10f) const; - - // ------------------------------------------------------------------- - /** Get a string configuration property - * - * The return value remains valid until the property is modified. - * @see GetPropertyInteger() - */ - const std::string GetPropertyString(const char* szName, - const std::string& sErrorReturn = "") const; - - // ------------------------------------------------------------------- - /** Get a matrix configuration property - * - * The return value remains valid until the property is modified. - * @see GetPropertyInteger() - */ - const aiMatrix4x4 GetPropertyMatrix(const char* szName, - const aiMatrix4x4& sErrorReturn = aiMatrix4x4()) const; - - // ------------------------------------------------------------------- - /** Determine a integer configuration property has been set. - * @see HasPropertyInteger() - */ - bool HasPropertyInteger(const char* szName) const; - - /** Determine a boolean configuration property has been set. - * @see HasPropertyBool() - */ - bool HasPropertyBool(const char* szName) const; - - /** Determine a boolean configuration property has been set. - * @see HasPropertyFloat() - */ - bool HasPropertyFloat(const char* szName) const; - - /** Determine a String configuration property has been set. - * @see HasPropertyString() - */ - bool HasPropertyString(const char* szName) const; - - /** Determine a Matrix configuration property has been set. - * @see HasPropertyMatrix() - */ - bool HasPropertyMatrix(const char* szName) const; - -protected: - - /** List of integer properties */ - IntPropertyMap mIntProperties; - - /** List of floating-point properties */ - FloatPropertyMap mFloatProperties; - - /** List of string properties */ - StringPropertyMap mStringProperties; - - /** List of Matrix properties */ - MatrixPropertyMap mMatrixProperties; -}; - -// ---------------------------------------------------------------------------------- -inline -const aiExportDataBlob* Exporter::ExportToBlob( const aiScene* pScene, const std::string& pFormatId, - unsigned int pPreprocessing, const ExportProperties* pProperties) -{ - return ExportToBlob(pScene,pFormatId.c_str(),pPreprocessing, pProperties); -} - -// ---------------------------------------------------------------------------------- -inline -aiReturn Exporter :: Export( const aiScene* pScene, const std::string& pFormatId, - const std::string& pPath, unsigned int pPreprocessing, - const ExportProperties* pProperties) -{ - return Export(pScene,pFormatId.c_str(),pPath.c_str(),pPreprocessing, pProperties); -} - -} // namespace Assimp - -#endif // ASSIMP_BUILD_NO_EXPORT -#endif // AI_EXPORT_HPP_INC diff --git a/thirdparty/assimp/include/assimp/GenericProperty.h b/thirdparty/assimp/include/assimp/GenericProperty.h deleted file mode 100644 index 7796d595b8..0000000000 --- a/thirdparty/assimp/include/assimp/GenericProperty.h +++ /dev/null @@ -1,138 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#pragma once -#ifndef AI_GENERIC_PROPERTY_H_INCLUDED -#define AI_GENERIC_PROPERTY_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/Importer.hpp> -#include <assimp/ai_assert.h> -#include <assimp/Hash.h> - -#include <map> - -// ------------------------------------------------------------------------------------------------ -template <class T> -inline -bool SetGenericProperty(std::map< unsigned int, T >& list, - const char* szName, const T& value) { - ai_assert(nullptr != szName); - const uint32_t hash = SuperFastHash(szName); - - typename std::map<unsigned int, T>::iterator it = list.find(hash); - if (it == list.end()) { - list.insert(std::pair<unsigned int, T>( hash, value )); - return false; - } - (*it).second = value; - - return true; -} - -// ------------------------------------------------------------------------------------------------ -template <class T> -inline -const T& GetGenericProperty(const std::map< unsigned int, T >& list, - const char* szName, const T& errorReturn) { - ai_assert(nullptr != szName); - const uint32_t hash = SuperFastHash(szName); - - typename std::map<unsigned int, T>::const_iterator it = list.find(hash); - if (it == list.end()) { - return errorReturn; - } - - return (*it).second; -} - -// ------------------------------------------------------------------------------------------------ -// Special version for pointer types - they will be deleted when replaced with another value -// passing NULL removes the whole property -template <class T> -inline -void SetGenericPropertyPtr(std::map< unsigned int, T* >& list, - const char* szName, T* value, bool* bWasExisting = nullptr ) { - ai_assert(nullptr != szName); - const uint32_t hash = SuperFastHash(szName); - - typename std::map<unsigned int, T*>::iterator it = list.find(hash); - if (it == list.end()) { - if (bWasExisting) { - *bWasExisting = false; - } - - list.insert(std::pair<unsigned int,T*>( hash, value )); - return; - } - if ((*it).second != value) { - delete (*it).second; - (*it).second = value; - } - if (!value) { - list.erase(it); - } - if (bWasExisting) { - *bWasExisting = true; - } -} - -// ------------------------------------------------------------------------------------------------ -template <class T> -inline -bool HasGenericProperty(const std::map< unsigned int, T >& list, - const char* szName) { - ai_assert(nullptr != szName); - const uint32_t hash = SuperFastHash(szName); - - typename std::map<unsigned int, T>::const_iterator it = list.find(hash); - if (it == list.end()) { - return false; - } - - return true; -} - -#endif // !! AI_GENERIC_PROPERTY_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/Hash.h b/thirdparty/assimp/include/assimp/Hash.h deleted file mode 100644 index 9056440789..0000000000 --- a/thirdparty/assimp/include/assimp/Hash.h +++ /dev/null @@ -1,122 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#pragma once -#ifndef AI_HASH_H_INCLUDED -#define AI_HASH_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <stdint.h> -#include <string.h> - -// ------------------------------------------------------------------------------------------------ -// Hashing function taken from -// http://www.azillionmonkeys.com/qed/hash.html -// (incremental version) -// -// This code is Copyright 2004-2008 by Paul Hsieh. It is used here in the belief that -// Assimp's license is considered compatible with Pauls's derivative license as specified -// on his web page. -// -// (stdint.h should have been been included here) -// ------------------------------------------------------------------------------------------------ -#undef get16bits -#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ - || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) -#define get16bits(d) (*((const uint16_t *) (d))) -#endif - -#if !defined (get16bits) -#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\ - +(uint32_t)(((const uint8_t *)(d))[0]) ) -#endif - -// ------------------------------------------------------------------------------------------------ -inline uint32_t SuperFastHash (const char * data, uint32_t len = 0, uint32_t hash = 0) { -uint32_t tmp; -int rem; - - if (!data) return 0; - if (!len)len = (uint32_t)::strlen(data); - - rem = len & 3; - len >>= 2; - - /* Main loop */ - for (;len > 0; len--) { - hash += get16bits (data); - tmp = (get16bits (data+2) << 11) ^ hash; - hash = (hash << 16) ^ tmp; - data += 2*sizeof (uint16_t); - hash += hash >> 11; - } - - /* Handle end cases */ - switch (rem) { - case 3: hash += get16bits (data); - hash ^= hash << 16; - hash ^= data[sizeof (uint16_t)] << 18; - hash += hash >> 11; - break; - case 2: hash += get16bits (data); - hash ^= hash << 11; - hash += hash >> 17; - break; - case 1: hash += *data; - hash ^= hash << 10; - hash += hash >> 1; - } - - /* Force "avalanching" of final 127 bits */ - hash ^= hash << 3; - hash += hash >> 5; - hash ^= hash << 4; - hash += hash >> 17; - hash ^= hash << 25; - hash += hash >> 6; - - return hash; -} - -#endif // !! AI_HASH_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/IOStream.hpp b/thirdparty/assimp/include/assimp/IOStream.hpp deleted file mode 100644 index 39932cd949..0000000000 --- a/thirdparty/assimp/include/assimp/IOStream.hpp +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file IOStream.hpp - * @brief File I/O wrappers for C++. - */ - -#pragma once -#ifndef AI_IOSTREAM_H_INC -#define AI_IOSTREAM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -#ifndef __cplusplus -# error This header requires C++ to be used. aiFileIO.h is the \ - corresponding C interface. -#endif - -namespace Assimp { - -// ---------------------------------------------------------------------------------- -/** @brief CPP-API: Class to handle file I/O for C++ - * - * Derive an own implementation from this interface to provide custom IO handling - * to the Importer. If you implement this interface, be sure to also provide an - * implementation for IOSystem that creates instances of your custom IO class. -*/ -class ASSIMP_API IOStream -#ifndef SWIG - : public Intern::AllocateFromAssimpHeap -#endif -{ -protected: - /** Constructor protected, use IOSystem::Open() to create an instance. */ - IOStream() AI_NO_EXCEPT; - -public: - // ------------------------------------------------------------------- - /** @brief Destructor. Deleting the object closes the underlying file, - * alternatively you may use IOSystem::Close() to release the file. - */ - virtual ~IOStream(); - - // ------------------------------------------------------------------- - /** @brief Read from the file - * - * See fread() for more details - * This fails for write-only files */ - virtual size_t Read(void* pvBuffer, - size_t pSize, - size_t pCount) = 0; - - // ------------------------------------------------------------------- - /** @brief Write to the file - * - * See fwrite() for more details - * This fails for read-only files */ - virtual size_t Write(const void* pvBuffer, - size_t pSize, - size_t pCount) = 0; - - // ------------------------------------------------------------------- - /** @brief Set the read/write cursor of the file - * - * Note that the offset is _negative_ for aiOrigin_END. - * See fseek() for more details */ - virtual aiReturn Seek(size_t pOffset, - aiOrigin pOrigin) = 0; - - // ------------------------------------------------------------------- - /** @brief Get the current position of the read/write cursor - * - * See ftell() for more details */ - virtual size_t Tell() const = 0; - - // ------------------------------------------------------------------- - /** @brief Returns filesize - * Returns the filesize. */ - virtual size_t FileSize() const = 0; - - // ------------------------------------------------------------------- - /** @brief Flush the contents of the file buffer (for writers) - * See fflush() for more details. - */ - virtual void Flush() = 0; -}; //! class IOStream - -// ---------------------------------------------------------------------------------- -AI_FORCE_INLINE -IOStream::IOStream() AI_NO_EXCEPT { - // empty -} - -// ---------------------------------------------------------------------------------- -AI_FORCE_INLINE -IOStream::~IOStream() { - // empty -} -// ---------------------------------------------------------------------------------- - -} //!namespace Assimp - -#endif //!!AI_IOSTREAM_H_INC diff --git a/thirdparty/assimp/include/assimp/IOStreamBuffer.h b/thirdparty/assimp/include/assimp/IOStreamBuffer.h deleted file mode 100644 index 97c84b23e2..0000000000 --- a/thirdparty/assimp/include/assimp/IOStreamBuffer.h +++ /dev/null @@ -1,362 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#pragma once -#ifndef AI_IOSTREAMBUFFER_H_INC -#define AI_IOSTREAMBUFFER_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <assimp/IOStream.hpp> -#include <assimp/ParsingUtils.h> - -#include <vector> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** - * Implementation of a cached stream buffer. - */ -template<class T> -class IOStreamBuffer { -public: - /// @brief The class constructor. - IOStreamBuffer( size_t cache = 4096 * 4096 ); - - /// @brief The class destructor. - ~IOStreamBuffer(); - - /// @brief Will open the cached access for a given stream. - /// @param stream The stream to cache. - /// @return true if successful. - bool open( IOStream *stream ); - - /// @brief Will close the cached access. - /// @return true if successful. - bool close(); - - /// @brief Returns the file-size. - /// @return The file-size. - size_t size() const; - - /// @brief Returns the cache size. - /// @return The cache size. - size_t cacheSize() const; - - /// @brief Will read the next block. - /// @return true if successful. - bool readNextBlock(); - - /// @brief Returns the number of blocks to read. - /// @return The number of blocks. - size_t getNumBlocks() const; - - /// @brief Returns the current block index. - /// @return The current block index. - size_t getCurrentBlockIndex() const; - - /// @brief Returns the current file pos. - /// @return The current file pos. - size_t getFilePos() const; - - /// @brief Will read the next line. - /// @param buffer The buffer for the next line. - /// @return true if successful. - bool getNextDataLine( std::vector<T> &buffer, T continuationToken ); - - /// @brief Will read the next line ascii or binary end line char. - /// @param buffer The buffer for the next line. - /// @return true if successful. - bool getNextLine(std::vector<T> &buffer); - - /// @brief Will read the next block. - /// @param buffer The buffer for the next block. - /// @return true if successful. - bool getNextBlock( std::vector<T> &buffer ); - -private: - IOStream *m_stream; - size_t m_filesize; - size_t m_cacheSize; - size_t m_numBlocks; - size_t m_blockIdx; - std::vector<T> m_cache; - size_t m_cachePos; - size_t m_filePos; -}; - -template<class T> -AI_FORCE_INLINE -IOStreamBuffer<T>::IOStreamBuffer( size_t cache ) -: m_stream( nullptr ) -, m_filesize( 0 ) -, m_cacheSize( cache ) -, m_numBlocks( 0 ) -, m_blockIdx( 0 ) -, m_cachePos( 0 ) -, m_filePos( 0 ) { - m_cache.resize( cache ); - std::fill( m_cache.begin(), m_cache.end(), '\n' ); -} - -template<class T> -AI_FORCE_INLINE -IOStreamBuffer<T>::~IOStreamBuffer() { - // empty -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::open( IOStream *stream ) { - // file still opened! - if ( nullptr != m_stream ) { - return false; - } - - // Invalid stream pointer - if ( nullptr == stream ) { - return false; - } - - m_stream = stream; - m_filesize = m_stream->FileSize(); - if ( m_filesize == 0 ) { - return false; - } - if ( m_filesize < m_cacheSize ) { - m_cacheSize = m_filesize; - } - - m_numBlocks = m_filesize / m_cacheSize; - if ( ( m_filesize % m_cacheSize ) > 0 ) { - m_numBlocks++; - } - - return true; -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::close() { - if ( nullptr == m_stream ) { - return false; - } - - // init counters and state vars - m_stream = nullptr; - m_filesize = 0; - m_numBlocks = 0; - m_blockIdx = 0; - m_cachePos = 0; - m_filePos = 0; - - return true; -} - -template<class T> -AI_FORCE_INLINE -size_t IOStreamBuffer<T>::size() const { - return m_filesize; -} - -template<class T> -AI_FORCE_INLINE -size_t IOStreamBuffer<T>::cacheSize() const { - return m_cacheSize; -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::readNextBlock() { - m_stream->Seek( m_filePos, aiOrigin_SET ); - size_t readLen = m_stream->Read( &m_cache[ 0 ], sizeof( T ), m_cacheSize ); - if ( readLen == 0 ) { - return false; - } - if ( readLen < m_cacheSize ) { - m_cacheSize = readLen; - } - m_filePos += m_cacheSize; - m_cachePos = 0; - m_blockIdx++; - - return true; -} - -template<class T> -AI_FORCE_INLINE -size_t IOStreamBuffer<T>::getNumBlocks() const { - return m_numBlocks; -} - -template<class T> -AI_FORCE_INLINE -size_t IOStreamBuffer<T>::getCurrentBlockIndex() const { - return m_blockIdx; -} - -template<class T> -AI_FORCE_INLINE -size_t IOStreamBuffer<T>::getFilePos() const { - return m_filePos; -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::getNextDataLine( std::vector<T> &buffer, T continuationToken ) { - buffer.resize( m_cacheSize ); - if ( m_cachePos >= m_cacheSize || 0 == m_filePos ) { - if ( !readNextBlock() ) { - return false; - } - } - - bool continuationFound( false ); - size_t i = 0; - for( ;; ) { - if ( continuationToken == m_cache[ m_cachePos ] ) { - continuationFound = true; - ++m_cachePos; - } - if ( IsLineEnd( m_cache[ m_cachePos ] ) ) { - if ( !continuationFound ) { - // the end of the data line - break; - } else { - // skip line end - while ( m_cache[m_cachePos] != '\n') { - ++m_cachePos; - } - ++m_cachePos; - continuationFound = false; - } - } - - buffer[ i ] = m_cache[ m_cachePos ]; - ++m_cachePos; - ++i; - if (m_cachePos >= size()) { - break; - } - if ( m_cachePos >= m_cacheSize ) { - if ( !readNextBlock() ) { - return false; - } - } - } - - buffer[ i ] = '\n'; - ++m_cachePos; - - return true; -} - -static AI_FORCE_INLINE -bool isEndOfCache( size_t pos, size_t cacheSize ) { - return ( pos == cacheSize ); -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::getNextLine(std::vector<T> &buffer) { - buffer.resize(m_cacheSize); - if ( isEndOfCache( m_cachePos, m_cacheSize ) || 0 == m_filePos) { - if (!readNextBlock()) { - return false; - } - } - - if (IsLineEnd(m_cache[m_cachePos])) { - // skip line end - while (m_cache[m_cachePos] != '\n') { - ++m_cachePos; - } - ++m_cachePos; - if ( isEndOfCache( m_cachePos, m_cacheSize ) ) { - if ( !readNextBlock() ) { - return false; - } - } - } - - size_t i( 0 ); - while (!IsLineEnd(m_cache[ m_cachePos ])) { - buffer[i] = m_cache[ m_cachePos ]; - ++m_cachePos; - ++i; - if (m_cachePos >= m_cacheSize) { - if (!readNextBlock()) { - return false; - } - } - } - buffer[i] = '\n'; - ++m_cachePos; - - return true; -} - -template<class T> -AI_FORCE_INLINE -bool IOStreamBuffer<T>::getNextBlock( std::vector<T> &buffer) { - // Return the last block-value if getNextLine was used before - if ( 0 != m_cachePos ) { - buffer = std::vector<T>( m_cache.begin() + m_cachePos, m_cache.end() ); - m_cachePos = 0; - } else { - if ( !readNextBlock() ) { - return false; - } - - buffer = std::vector<T>(m_cache.begin(), m_cache.end()); - } - - return true; -} - -} // !ns Assimp - -#endif // AI_IOSTREAMBUFFER_H_INC diff --git a/thirdparty/assimp/include/assimp/IOSystem.hpp b/thirdparty/assimp/include/assimp/IOSystem.hpp deleted file mode 100644 index f1fb3b0c27..0000000000 --- a/thirdparty/assimp/include/assimp/IOSystem.hpp +++ /dev/null @@ -1,361 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file IOSystem.hpp - * @brief File system wrapper for C++. Inherit this class to supply - * custom file handling logic to the Import library. -*/ - -#pragma once -#ifndef AI_IOSYSTEM_H_INC -#define AI_IOSYSTEM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifndef __cplusplus -# error This header requires C++ to be used. aiFileIO.h is the \ - corresponding C interface. -#endif - -#include "types.h" - -#ifdef _WIN32 -# include <direct.h> -# include <stdlib.h> -# include <stdio.h> -#else -# include <sys/stat.h> -# include <sys/types.h> -# include <unistd.h> -#endif // _WIN32 - -#include <vector> - -namespace Assimp { - - class IOStream; - -// --------------------------------------------------------------------------- -/** @brief CPP-API: Interface to the file system. - * - * Derive an own implementation from this interface to supply custom file handling - * to the importer library. If you implement this interface, you also want to - * supply a custom implementation for IOStream. - * - * @see Importer::SetIOHandler() - */ -class ASSIMP_API IOSystem -#ifndef SWIG - : public Intern::AllocateFromAssimpHeap -#endif -{ -public: - - // ------------------------------------------------------------------- - /** @brief Default constructor. - * - * Create an instance of your derived class and assign it to an - * #Assimp::Importer instance by calling Importer::SetIOHandler(). - */ - IOSystem() AI_NO_EXCEPT; - - // ------------------------------------------------------------------- - /** @brief Virtual destructor. - * - * It is safe to be called from within DLL Assimp, we're constructed - * on Assimp's heap. - */ - virtual ~IOSystem(); - - // ------------------------------------------------------------------- - /** @brief For backward compatibility - * @see Exists(const char*) - */ - AI_FORCE_INLINE bool Exists( const std::string& pFile) const; - - // ------------------------------------------------------------------- - /** @brief Tests for the existence of a file at the given path. - * - * @param pFile Path to the file - * @return true if there is a file with this path, else false. - */ - virtual bool Exists( const char* pFile) const = 0; - - // ------------------------------------------------------------------- - /** @brief Returns the system specific directory separator - * @return System specific directory separator - */ - virtual char getOsSeparator() const = 0; - - // ------------------------------------------------------------------- - /** @brief Open a new file with a given path. - * - * When the access to the file is finished, call Close() to release - * all associated resources (or the virtual dtor of the IOStream). - * - * @param pFile Path to the file - * @param pMode Desired file I/O mode. Required are: "wb", "w", "wt", - * "rb", "r", "rt". - * - * @return New IOStream interface allowing the lib to access - * the underlying file. - * @note When implementing this class to provide custom IO handling, - * you probably have to supply an own implementation of IOStream as well. - */ - virtual IOStream* Open(const char* pFile, - const char* pMode = "rb") = 0; - - // ------------------------------------------------------------------- - /** @brief For backward compatibility - * @see Open(const char*, const char*) - */ - inline IOStream* Open(const std::string& pFile, - const std::string& pMode = std::string("rb")); - - // ------------------------------------------------------------------- - /** @brief Closes the given file and releases all resources - * associated with it. - * @param pFile The file instance previously created by Open(). - */ - virtual void Close( IOStream* pFile) = 0; - - // ------------------------------------------------------------------- - /** @brief Compares two paths and check whether the point to - * identical files. - * - * The dummy implementation of this virtual member performs a - * case-insensitive comparison of the given strings. The default IO - * system implementation uses OS mechanisms to convert relative into - * absolute paths, so the result can be trusted. - * @param one First file - * @param second Second file - * @return true if the paths point to the same file. The file needn't - * be existing, however. - */ - virtual bool ComparePaths (const char* one, - const char* second) const; - - // ------------------------------------------------------------------- - /** @brief For backward compatibility - * @see ComparePaths(const char*, const char*) - */ - inline bool ComparePaths (const std::string& one, - const std::string& second) const; - - // ------------------------------------------------------------------- - /** @brief Pushes a new directory onto the directory stack. - * @param path Path to push onto the stack. - * @return True, when push was successful, false if path is empty. - */ - virtual bool PushDirectory( const std::string &path ); - - // ------------------------------------------------------------------- - /** @brief Returns the top directory from the stack. - * @return The directory on the top of the stack. - * Returns empty when no directory was pushed to the stack. - */ - virtual const std::string &CurrentDirectory() const; - - // ------------------------------------------------------------------- - /** @brief Returns the number of directories stored on the stack. - * @return The number of directories of the stack. - */ - virtual size_t StackSize() const; - - // ------------------------------------------------------------------- - /** @brief Pops the top directory from the stack. - * @return True, when a directory was on the stack. False if no - * directory was on the stack. - */ - virtual bool PopDirectory(); - - // ------------------------------------------------------------------- - /** @brief CReates an new directory at the given path. - * @param path [in] The path to create. - * @return True, when a directory was created. False if the directory - * cannot be created. - */ - virtual bool CreateDirectory( const std::string &path ); - - // ------------------------------------------------------------------- - /** @brief Will change the current directory to the given path. - * @param path [in] The path to change to. - * @return True, when the directory has changed successfully. - */ - virtual bool ChangeDirectory( const std::string &path ); - - virtual bool DeleteFile( const std::string &file ); - -private: - std::vector<std::string> m_pathStack; -}; - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -IOSystem::IOSystem() AI_NO_EXCEPT -: m_pathStack() { - // empty -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -IOSystem::~IOSystem() { - // empty -} - -// ---------------------------------------------------------------------------- -// For compatibility, the interface of some functions taking a std::string was -// changed to const char* to avoid crashes between binary incompatible STL -// versions. This code her is inlined, so it shouldn't cause any problems. -// ---------------------------------------------------------------------------- - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -IOStream* IOSystem::Open(const std::string& pFile, const std::string& pMode) { - // NOTE: - // For compatibility, interface was changed to const char* to - // avoid crashes between binary incompatible STL versions - return Open(pFile.c_str(),pMode.c_str()); -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::Exists( const std::string& pFile) const { - // NOTE: - // For compatibility, interface was changed to const char* to - // avoid crashes between binary incompatible STL versions - return Exists(pFile.c_str()); -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::ComparePaths (const std::string& one, const std::string& second) const { - // NOTE: - // For compatibility, interface was changed to const char* to - // avoid crashes between binary incompatible STL versions - return ComparePaths(one.c_str(),second.c_str()); -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::PushDirectory( const std::string &path ) { - if ( path.empty() ) { - return false; - } - - m_pathStack.push_back( path ); - - return true; -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -const std::string &IOSystem::CurrentDirectory() const { - if ( m_pathStack.empty() ) { - static const std::string Dummy(""); - return Dummy; - } - return m_pathStack[ m_pathStack.size()-1 ]; -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -size_t IOSystem::StackSize() const { - return m_pathStack.size(); -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::PopDirectory() { - if ( m_pathStack.empty() ) { - return false; - } - - m_pathStack.pop_back(); - - return true; -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::CreateDirectory( const std::string &path ) { - if ( path.empty() ) { - return false; - } - -#ifdef _WIN32 - return 0 != ::_mkdir( path.c_str() ); -#else - return 0 != ::mkdir( path.c_str(), 0777 ); -#endif // _WIN32 -} - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::ChangeDirectory( const std::string &path ) { - if ( path.empty() ) { - return false; - } - -#ifdef _WIN32 - return 0 != ::_chdir( path.c_str() ); -#else - return 0 != ::chdir( path.c_str() ); -#endif // _WIN32 -} - - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE -bool IOSystem::DeleteFile( const std::string &file ) { - if ( file.empty() ) { - return false; - } - const int retCode( ::remove( file.c_str() ) ); - return ( 0 == retCode ); -} -} //!ns Assimp - -#endif //AI_IOSYSTEM_H_INC diff --git a/thirdparty/assimp/include/assimp/Importer.hpp b/thirdparty/assimp/include/assimp/Importer.hpp deleted file mode 100644 index bf449a9a25..0000000000 --- a/thirdparty/assimp/include/assimp/Importer.hpp +++ /dev/null @@ -1,663 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Importer.hpp - * @brief Defines the C++-API to the Open Asset Import Library. - */ -#pragma once -#ifndef AI_ASSIMP_HPP_INC -#define AI_ASSIMP_HPP_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifndef __cplusplus -# error This header requires C++ to be used. Use assimp.h for plain C. -#endif // __cplusplus - -// Public ASSIMP data structures -#include <assimp/types.h> - -namespace Assimp { - // ======================================================================= - // Public interface to Assimp - class Importer; - class IOStream; - class IOSystem; - class ProgressHandler; - - // ======================================================================= - // Plugin development - // - // Include the following headers for the declarations: - // BaseImporter.h - // BaseProcess.h - class BaseImporter; - class BaseProcess; - class SharedPostProcessInfo; - class BatchLoader; - - // ======================================================================= - // Holy stuff, only for members of the high council of the Jedi. - class ImporterPimpl; -} //! namespace Assimp - -#define AI_PROPERTY_WAS_NOT_EXISTING 0xffffffff - -struct aiScene; - -// importerdesc.h -struct aiImporterDesc; - -/** @namespace Assimp Assimp's CPP-API and all internal APIs */ -namespace Assimp { - -// ---------------------------------------------------------------------------------- -/** CPP-API: The Importer class forms an C++ interface to the functionality of the -* Open Asset Import Library. -* -* Create an object of this class and call ReadFile() to import a file. -* If the import succeeds, the function returns a pointer to the imported data. -* The data remains property of the object, it is intended to be accessed -* read-only. The imported data will be destroyed along with the Importer -* object. If the import fails, ReadFile() returns a NULL pointer. In this -* case you can retrieve a human-readable error description be calling -* GetErrorString(). You can call ReadFile() multiple times with a single Importer -* instance. Actually, constructing Importer objects involves quite many -* allocations and may take some time, so it's better to reuse them as often as -* possible. -* -* If you need the Importer to do custom file handling to access the files, -* implement IOSystem and IOStream and supply an instance of your custom -* IOSystem implementation by calling SetIOHandler() before calling ReadFile(). -* If you do not assign a custion IO handler, a default handler using the -* standard C++ IO logic will be used. -* -* @note One Importer instance is not thread-safe. If you use multiple -* threads for loading, each thread should maintain its own Importer instance. -*/ -class ASSIMP_API Importer { -public: - /** - * @brief The upper limit for hints. - */ - static const unsigned int MaxLenHint = 200; - -public: - - // ------------------------------------------------------------------- - /** Constructor. Creates an empty importer object. - * - * Call ReadFile() to start the import process. The configuration - * property table is initially empty. - */ - Importer(); - - // ------------------------------------------------------------------- - /** Copy constructor. - * - * This copies the configuration properties of another Importer. - * If this Importer owns a scene it won't be copied. - * Call ReadFile() to start the import process. - */ - Importer(const Importer& other)=delete; - - // ------------------------------------------------------------------- - /** Assignment operator has been deleted - */ - Importer &operator=(const Importer &) = delete; - - // ------------------------------------------------------------------- - /** Destructor. The object kept ownership of the imported data, - * which now will be destroyed along with the object. - */ - ~Importer(); - - - // ------------------------------------------------------------------- - /** Registers a new loader. - * - * @param pImp Importer to be added. The Importer instance takes - * ownership of the pointer, so it will be automatically deleted - * with the Importer instance. - * @return AI_SUCCESS if the loader has been added. The registration - * fails if there is already a loader for a specific file extension. - */ - aiReturn RegisterLoader(BaseImporter* pImp); - - // ------------------------------------------------------------------- - /** Unregisters a loader. - * - * @param pImp Importer to be unregistered. - * @return AI_SUCCESS if the loader has been removed. The function - * fails if the loader is currently in use (this could happen - * if the #Importer instance is used by more than one thread) or - * if it has not yet been registered. - */ - aiReturn UnregisterLoader(BaseImporter* pImp); - - // ------------------------------------------------------------------- - /** Registers a new post-process step. - * - * At the moment, there's a small limitation: new post processing - * steps are added to end of the list, or in other words, executed - * last, after all built-in steps. - * @param pImp Post-process step to be added. The Importer instance - * takes ownership of the pointer, so it will be automatically - * deleted with the Importer instance. - * @return AI_SUCCESS if the step has been added correctly. - */ - aiReturn RegisterPPStep(BaseProcess* pImp); - - // ------------------------------------------------------------------- - /** Unregisters a post-process step. - * - * @param pImp Step to be unregistered. - * @return AI_SUCCESS if the step has been removed. The function - * fails if the step is currently in use (this could happen - * if the #Importer instance is used by more than one thread) or - * if it has not yet been registered. - */ - aiReturn UnregisterPPStep(BaseProcess* pImp); - - // ------------------------------------------------------------------- - /** Set an integer configuration property. - * @param szName Name of the property. All supported properties - * are defined in the aiConfig.g header (all constants share the - * prefix AI_CONFIG_XXX and are simple strings). - * @param iValue New value of the property - * @return true if the property was set before. The new value replaces - * the previous value in this case. - * @note Property of different types (float, int, string ..) are kept - * on different stacks, so calling SetPropertyInteger() for a - * floating-point property has no effect - the loader will call - * GetPropertyFloat() to read the property, but it won't be there. - */ - bool SetPropertyInteger(const char* szName, int iValue); - - // ------------------------------------------------------------------- - /** Set a boolean configuration property. Boolean properties - * are stored on the integer stack internally so it's possible - * to set them via #SetPropertyBool and query them with - * #GetPropertyBool and vice versa. - * @see SetPropertyInteger() - */ - bool SetPropertyBool(const char* szName, bool value) { - return SetPropertyInteger(szName,value); - } - - // ------------------------------------------------------------------- - /** Set a floating-point configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyFloat(const char* szName, ai_real fValue); - - // ------------------------------------------------------------------- - /** Set a string configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyString(const char* szName, const std::string& sValue); - - // ------------------------------------------------------------------- - /** Set a matrix configuration property. - * @see SetPropertyInteger() - */ - bool SetPropertyMatrix(const char* szName, const aiMatrix4x4& sValue); - - // ------------------------------------------------------------------- - /** Get a configuration property. - * @param szName Name of the property. All supported properties - * are defined in the aiConfig.g header (all constants share the - * prefix AI_CONFIG_XXX). - * @param iErrorReturn Value that is returned if the property - * is not found. - * @return Current value of the property - * @note Property of different types (float, int, string ..) are kept - * on different lists, so calling SetPropertyInteger() for a - * floating-point property has no effect - the loader will call - * GetPropertyFloat() to read the property, but it won't be there. - */ - int GetPropertyInteger(const char* szName, - int iErrorReturn = 0xffffffff) const; - - // ------------------------------------------------------------------- - /** Get a boolean configuration property. Boolean properties - * are stored on the integer stack internally so it's possible - * to set them via #SetPropertyBool and query them with - * #GetPropertyBool and vice versa. - * @see GetPropertyInteger() - */ - bool GetPropertyBool(const char* szName, bool bErrorReturn = false) const { - return GetPropertyInteger(szName,bErrorReturn)!=0; - } - - // ------------------------------------------------------------------- - /** Get a floating-point configuration property - * @see GetPropertyInteger() - */ - ai_real GetPropertyFloat(const char* szName, - ai_real fErrorReturn = 10e10) const; - - // ------------------------------------------------------------------- - /** Get a string configuration property - * - * The return value remains valid until the property is modified. - * @see GetPropertyInteger() - */ - const std::string GetPropertyString(const char* szName, - const std::string& sErrorReturn = "") const; - - // ------------------------------------------------------------------- - /** Get a matrix configuration property - * - * The return value remains valid until the property is modified. - * @see GetPropertyInteger() - */ - const aiMatrix4x4 GetPropertyMatrix(const char* szName, - const aiMatrix4x4& sErrorReturn = aiMatrix4x4()) const; - - // ------------------------------------------------------------------- - /** Supplies a custom IO handler to the importer to use to open and - * access files. If you need the importer to use custom IO logic to - * access the files, you need to provide a custom implementation of - * IOSystem and IOFile to the importer. Then create an instance of - * your custom IOSystem implementation and supply it by this function. - * - * The Importer takes ownership of the object and will destroy it - * afterwards. The previously assigned handler will be deleted. - * Pass NULL to take again ownership of your IOSystem and reset Assimp - * to use its default implementation. - * - * @param pIOHandler The IO handler to be used in all file accesses - * of the Importer. - */ - void SetIOHandler( IOSystem* pIOHandler); - - // ------------------------------------------------------------------- - /** Retrieves the IO handler that is currently set. - * You can use #IsDefaultIOHandler() to check whether the returned - * interface is the default IO handler provided by ASSIMP. The default - * handler is active as long the application doesn't supply its own - * custom IO handler via #SetIOHandler(). - * @return A valid IOSystem interface, never NULL. - */ - IOSystem* GetIOHandler() const; - - // ------------------------------------------------------------------- - /** Checks whether a default IO handler is active - * A default handler is active as long the application doesn't - * supply its own custom IO handler via #SetIOHandler(). - * @return true by default - */ - bool IsDefaultIOHandler() const; - - // ------------------------------------------------------------------- - /** Supplies a custom progress handler to the importer. This - * interface exposes an #Update() callback, which is called - * more or less periodically (please don't sue us if it - * isn't as periodically as you'd like it to have ...). - * This can be used to implement progress bars and loading - * timeouts. - * @param pHandler Progress callback interface. Pass NULL to - * disable progress reporting. - * @note Progress handlers can be used to abort the loading - * at almost any time.*/ - void SetProgressHandler ( ProgressHandler* pHandler ); - - // ------------------------------------------------------------------- - /** Retrieves the progress handler that is currently set. - * You can use #IsDefaultProgressHandler() to check whether the returned - * interface is the default handler provided by ASSIMP. The default - * handler is active as long the application doesn't supply its own - * custom handler via #SetProgressHandler(). - * @return A valid ProgressHandler interface, never NULL. - */ - ProgressHandler* GetProgressHandler() const; - - // ------------------------------------------------------------------- - /** Checks whether a default progress handler is active - * A default handler is active as long the application doesn't - * supply its own custom progress handler via #SetProgressHandler(). - * @return true by default - */ - bool IsDefaultProgressHandler() const; - - // ------------------------------------------------------------------- - /** @brief Check whether a given set of post-processing flags - * is supported. - * - * Some flags are mutually exclusive, others are probably - * not available because your excluded them from your - * Assimp builds. Calling this function is recommended if - * you're unsure. - * - * @param pFlags Bitwise combination of the aiPostProcess flags. - * @return true if this flag combination is fine. - */ - bool ValidateFlags(unsigned int pFlags) const; - - // ------------------------------------------------------------------- - /** Reads the given file and returns its contents if successful. - * - * If the call succeeds, the contents of the file are returned as a - * pointer to an aiScene object. The returned data is intended to be - * read-only, the importer object keeps ownership of the data and will - * destroy it upon destruction. If the import fails, NULL is returned. - * A human-readable error description can be retrieved by calling - * GetErrorString(). The previous scene will be deleted during this call. - * @param pFile Path and filename to the file to be imported. - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. If you wish to inspect the imported - * scene first in order to fine-tune your post-processing setup, - * consider to use #ApplyPostProcessing(). - * @return A pointer to the imported data, NULL if the import failed. - * The pointer to the scene remains in possession of the Importer - * instance. Use GetOrphanedScene() to take ownership of it. - * - * @note Assimp is able to determine the file format of a file - * automatically. - */ - const aiScene* ReadFile( - const char* pFile, - unsigned int pFlags); - - // ------------------------------------------------------------------- - /** Reads the given file from a memory buffer and returns its - * contents if successful. - * - * If the call succeeds, the contents of the file are returned as a - * pointer to an aiScene object. The returned data is intended to be - * read-only, the importer object keeps ownership of the data and will - * destroy it upon destruction. If the import fails, NULL is returned. - * A human-readable error description can be retrieved by calling - * GetErrorString(). The previous scene will be deleted during this call. - * Calling this method doesn't affect the active IOSystem. - * @param pBuffer Pointer to the file data - * @param pLength Length of pBuffer, in bytes - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. If you wish to inspect the imported - * scene first in order to fine-tune your post-processing setup, - * consider to use #ApplyPostProcessing(). - * @param pHint An additional hint to the library. If this is a non - * empty string, the library looks for a loader to support - * the file extension specified by pHint and passes the file to - * the first matching loader. If this loader is unable to completely - * the request, the library continues and tries to determine the - * file format on its own, a task that may or may not be successful. - * Check the return value, and you'll know ... - * @return A pointer to the imported data, NULL if the import failed. - * The pointer to the scene remains in possession of the Importer - * instance. Use GetOrphanedScene() to take ownership of it. - * - * @note This is a straightforward way to decode models from memory - * buffers, but it doesn't handle model formats that spread their - * data across multiple files or even directories. Examples include - * OBJ or MD3, which outsource parts of their material info into - * external scripts. If you need full functionality, provide - * a custom IOSystem to make Assimp find these files and use - * the regular ReadFile() API. - */ - const aiScene* ReadFileFromMemory( - const void* pBuffer, - size_t pLength, - unsigned int pFlags, - const char* pHint = ""); - - // ------------------------------------------------------------------- - /** Apply post-processing to an already-imported scene. - * - * This is strictly equivalent to calling #ReadFile() with the same - * flags. However, you can use this separate function to inspect - * the imported scene first to fine-tune your post-processing setup. - * @param pFlags Provide a bitwise combination of the - * #aiPostProcessSteps flags. - * @return A pointer to the post-processed data. This is still the - * same as the pointer returned by #ReadFile(). However, if - * post-processing fails, the scene could now be NULL. - * That's quite a rare case, post processing steps are not really - * designed to 'fail'. To be exact, the #aiProcess_ValidateDS - * flag is currently the only post processing step which can actually - * cause the scene to be reset to NULL. - * - * @note The method does nothing if no scene is currently bound - * to the #Importer instance. */ - const aiScene* ApplyPostProcessing(unsigned int pFlags); - - const aiScene* ApplyCustomizedPostProcessing( BaseProcess *rootProcess, bool requestValidation ); - - // ------------------------------------------------------------------- - /** @brief Reads the given file and returns its contents if successful. - * - * This function is provided for backward compatibility. - * See the const char* version for detailed docs. - * @see ReadFile(const char*, pFlags) */ - const aiScene* ReadFile( - const std::string& pFile, - unsigned int pFlags); - - // ------------------------------------------------------------------- - /** Frees the current scene. - * - * The function does nothing if no scene has previously been - * read via ReadFile(). FreeScene() is called automatically by the - * destructor and ReadFile() itself. */ - void FreeScene( ); - - // ------------------------------------------------------------------- - /** Returns an error description of an error that occurred in ReadFile(). - * - * Returns an empty string if no error occurred. - * @return A description of the last error, an empty string if no - * error occurred. The string is never NULL. - * - * @note The returned function remains valid until one of the - * following methods is called: #ReadFile(), #FreeScene(). */ - const char* GetErrorString() const; - - // ------------------------------------------------------------------- - /** Returns the scene loaded by the last successful call to ReadFile() - * - * @return Current scene or NULL if there is currently no scene loaded */ - const aiScene* GetScene() const; - - // ------------------------------------------------------------------- - /** Returns the scene loaded by the last successful call to ReadFile() - * and releases the scene from the ownership of the Importer - * instance. The application is now responsible for deleting the - * scene. Any further calls to GetScene() or GetOrphanedScene() - * will return NULL - until a new scene has been loaded via ReadFile(). - * - * @return Current scene or NULL if there is currently no scene loaded - * @note Use this method with maximal caution, and only if you have to. - * By design, aiScene's are exclusively maintained, allocated and - * deallocated by Assimp and no one else. The reasoning behind this - * is the golden rule that deallocations should always be done - * by the module that did the original allocation because heaps - * are not necessarily shared. GetOrphanedScene() enforces you - * to delete the returned scene by yourself, but this will only - * be fine if and only if you're using the same heap as assimp. - * On Windows, it's typically fine provided everything is linked - * against the multithreaded-dll version of the runtime library. - * It will work as well for static linkage with Assimp.*/ - aiScene* GetOrphanedScene(); - - // ------------------------------------------------------------------- - /** Returns whether a given file extension is supported by ASSIMP. - * - * @param szExtension Extension to be checked. - * Must include a trailing dot '.'. Example: ".3ds", ".md3". - * Cases-insensitive. - * @return true if the extension is supported, false otherwise */ - bool IsExtensionSupported(const char* szExtension) const; - - // ------------------------------------------------------------------- - /** @brief Returns whether a given file extension is supported by ASSIMP. - * - * This function is provided for backward compatibility. - * See the const char* version for detailed and up-to-date docs. - * @see IsExtensionSupported(const char*) */ - inline bool IsExtensionSupported(const std::string& szExtension) const; - - // ------------------------------------------------------------------- - /** Get a full list of all file extensions supported by ASSIMP. - * - * If a file extension is contained in the list this does of course not - * mean that ASSIMP is able to load all files with this extension --- - * it simply means there is an importer loaded which claims to handle - * files with this file extension. - * @param szOut String to receive the extension list. - * Format of the list: "*.3ds;*.obj;*.dae". This is useful for - * use with the WinAPI call GetOpenFileName(Ex). */ - void GetExtensionList(aiString& szOut) const; - - // ------------------------------------------------------------------- - /** @brief Get a full list of all file extensions supported by ASSIMP. - * - * This function is provided for backward compatibility. - * See the aiString version for detailed and up-to-date docs. - * @see GetExtensionList(aiString&)*/ - inline void GetExtensionList(std::string& szOut) const; - - // ------------------------------------------------------------------- - /** Get the number of importers currently registered with Assimp. */ - size_t GetImporterCount() const; - - // ------------------------------------------------------------------- - /** Get meta data for the importer corresponding to a specific index.. - * - * For the declaration of #aiImporterDesc, include <assimp/importerdesc.h>. - * @param index Index to query, must be within [0,GetImporterCount()) - * @return Importer meta data structure, NULL if the index does not - * exist or if the importer doesn't offer meta information ( - * importers may do this at the cost of being hated by their peers).*/ - const aiImporterDesc* GetImporterInfo(size_t index) const; - - // ------------------------------------------------------------------- - /** Find the importer corresponding to a specific index. - * - * @param index Index to query, must be within [0,GetImporterCount()) - * @return Importer instance. NULL if the index does not - * exist. */ - BaseImporter* GetImporter(size_t index) const; - - // ------------------------------------------------------------------- - /** Find the importer corresponding to a specific file extension. - * - * This is quite similar to #IsExtensionSupported except a - * BaseImporter instance is returned. - * @param szExtension Extension to check for. The following formats - * are recognized (BAH being the file extension): "BAH" (comparison - * is case-insensitive), ".bah", "*.bah" (wild card and dot - * characters at the beginning of the extension are skipped). - * @return NULL if no importer is found*/ - BaseImporter* GetImporter (const char* szExtension) const; - - // ------------------------------------------------------------------- - /** Find the importer index corresponding to a specific file extension. - * - * @param szExtension Extension to check for. The following formats - * are recognized (BAH being the file extension): "BAH" (comparison - * is case-insensitive), ".bah", "*.bah" (wild card and dot - * characters at the beginning of the extension are skipped). - * @return (size_t)-1 if no importer is found */ - size_t GetImporterIndex (const char* szExtension) const; - - // ------------------------------------------------------------------- - /** Returns the storage allocated by ASSIMP to hold the scene data - * in memory. - * - * This refers to the currently loaded file, see #ReadFile(). - * @param in Data structure to be filled. - * @note The returned memory statistics refer to the actual - * size of the use data of the aiScene. Heap-related overhead - * is (naturally) not included.*/ - void GetMemoryRequirements(aiMemoryInfo& in) const; - - // ------------------------------------------------------------------- - /** Enables "extra verbose" mode. - * - * 'Extra verbose' means the data structure is validated after *every* - * single post processing step to make sure everyone modifies the data - * structure in a well-defined manner. This is a debug feature and not - * intended for use in production environments. */ - void SetExtraVerbose(bool bDo); - - // ------------------------------------------------------------------- - /** Private, do not use. */ - ImporterPimpl* Pimpl() { return pimpl; } - const ImporterPimpl* Pimpl() const { return pimpl; } - -protected: - - // Just because we don't want you to know how we're hacking around. - ImporterPimpl* pimpl; -}; //! class Importer - - -// ---------------------------------------------------------------------------- -// For compatibility, the interface of some functions taking a std::string was -// changed to const char* to avoid crashes between binary incompatible STL -// versions. This code her is inlined, so it shouldn't cause any problems. -// ---------------------------------------------------------------------------- - -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE const aiScene* Importer::ReadFile( const std::string& pFile,unsigned int pFlags){ - return ReadFile(pFile.c_str(),pFlags); -} -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE void Importer::GetExtensionList(std::string& szOut) const { - aiString s; - GetExtensionList(s); - szOut = s.data; -} -// ---------------------------------------------------------------------------- -AI_FORCE_INLINE bool Importer::IsExtensionSupported(const std::string& szExtension) const { - return IsExtensionSupported(szExtension.c_str()); -} - -} // !namespace Assimp - -#endif // AI_ASSIMP_HPP_INC diff --git a/thirdparty/assimp/include/assimp/LineSplitter.h b/thirdparty/assimp/include/assimp/LineSplitter.h deleted file mode 100644 index 6c1097bb6d..0000000000 --- a/thirdparty/assimp/include/assimp/LineSplitter.h +++ /dev/null @@ -1,289 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file LineSplitter.h - * @brief LineSplitter, a helper class to iterate through all lines - * of a file easily. Works with StreamReader. - */ -#pragma once -#ifndef INCLUDED_LINE_SPLITTER_H -#define INCLUDED_LINE_SPLITTER_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <stdexcept> -#include <assimp/StreamReader.h> -#include <assimp/ParsingUtils.h> - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -/** Usage: -@code -for(LineSplitter splitter(stream);splitter;++splitter) { - - if (*splitter == "hi!") { - ... - } - else if (splitter->substr(0,5) == "hello") { - ... - // access the third token in the line (tokens are space-separated) - if (strtol(splitter[2]) > 5) { .. } - } - - std::cout << "Current line is: " << splitter.get_index() << std::endl; -} -@endcode -*/ -// ------------------------------------------------------------------------------------------------ -class LineSplitter { -public: - typedef size_t line_idx; - - // ----------------------------------------- - /** construct from existing stream reader - note: trim is *always* assumed true if skyp_empty_lines==true - */ - LineSplitter(StreamReaderLE& stream, bool skip_empty_lines = true, bool trim = true); - - ~LineSplitter(); - - // ----------------------------------------- - /** pseudo-iterator increment */ - LineSplitter& operator++(); - - // ----------------------------------------- - LineSplitter& operator++(int); - - // ----------------------------------------- - /** get a pointer to the beginning of a particular token */ - const char* operator[] (size_t idx) const; - - // ----------------------------------------- - /** extract the start positions of N tokens from the current line*/ - template <size_t N> - void get_tokens(const char* (&tokens)[N]) const; - - // ----------------------------------------- - /** member access */ - const std::string* operator -> () const; - - std::string operator* () const; - - // ----------------------------------------- - /** boolean context */ - operator bool() const; - - // ----------------------------------------- - /** line indices are zero-based, empty lines are included */ - operator line_idx() const; - - line_idx get_index() const; - - // ----------------------------------------- - /** access the underlying stream object */ - StreamReaderLE& get_stream(); - - // ----------------------------------------- - /** !strcmp((*this)->substr(0,strlen(check)),check) */ - bool match_start(const char* check); - - // ----------------------------------------- - /** swallow the next call to ++, return the previous value. */ - void swallow_next_increment(); - - LineSplitter( const LineSplitter & ) = delete; - LineSplitter(LineSplitter &&) = delete; - LineSplitter &operator = ( const LineSplitter & ) = delete; - -private: - line_idx mIdx; - std::string mCur; - StreamReaderLE& mStream; - bool mSwallow, mSkip_empty_lines, mTrim; -}; - -AI_FORCE_INLINE -LineSplitter::LineSplitter(StreamReaderLE& stream, bool skip_empty_lines, bool trim ) -: mIdx(0) -, mCur() -, mStream(stream) -, mSwallow() -, mSkip_empty_lines(skip_empty_lines) -, mTrim(trim) { - mCur.reserve(1024); - operator++(); - mIdx = 0; -} - -AI_FORCE_INLINE -LineSplitter::~LineSplitter() { - // empty -} - -AI_FORCE_INLINE -LineSplitter& LineSplitter::operator++() { - if (mSwallow) { - mSwallow = false; - return *this; - } - - if (!*this) { - throw std::logic_error("End of file, no more lines to be retrieved."); - } - - char s; - mCur.clear(); - while (mStream.GetRemainingSize() && (s = mStream.GetI1(), 1)) { - if (s == '\n' || s == '\r') { - if (mSkip_empty_lines) { - while (mStream.GetRemainingSize() && ((s = mStream.GetI1()) == ' ' || s == '\r' || s == '\n')); - if (mStream.GetRemainingSize()) { - mStream.IncPtr(-1); - } - } else { - // skip both potential line terminators but don't read past this line. - if (mStream.GetRemainingSize() && (s == '\r' && mStream.GetI1() != '\n')) { - mStream.IncPtr(-1); - } - if (mTrim) { - while (mStream.GetRemainingSize() && ((s = mStream.GetI1()) == ' ' || s == '\t')); - if (mStream.GetRemainingSize()) { - mStream.IncPtr(-1); - } - } - } - break; - } - mCur += s; - } - ++mIdx; - - return *this; -} - -AI_FORCE_INLINE -LineSplitter &LineSplitter::operator++(int) { - return ++(*this); -} - -AI_FORCE_INLINE -const char *LineSplitter::operator[] (size_t idx) const { - const char* s = operator->()->c_str(); - - SkipSpaces(&s); - for (size_t i = 0; i < idx; ++i) { - - for (; !IsSpace(*s); ++s) { - if (IsLineEnd(*s)) { - throw std::range_error("Token index out of range, EOL reached"); - } - } - SkipSpaces(&s); - } - return s; -} - -template <size_t N> -AI_FORCE_INLINE -void LineSplitter::get_tokens(const char* (&tokens)[N]) const { - const char* s = operator->()->c_str(); - - SkipSpaces(&s); - for (size_t i = 0; i < N; ++i) { - if (IsLineEnd(*s)) { - throw std::range_error("Token count out of range, EOL reached"); - } - tokens[i] = s; - - for (; *s && !IsSpace(*s); ++s); - SkipSpaces(&s); - } -} - -AI_FORCE_INLINE -const std::string* LineSplitter::operator -> () const { - return &mCur; -} - -AI_FORCE_INLINE -std::string LineSplitter::operator* () const { - return mCur; -} - -AI_FORCE_INLINE -LineSplitter::operator bool() const { - return mStream.GetRemainingSize() > 0; -} - -AI_FORCE_INLINE -LineSplitter::operator line_idx() const { - return mIdx; -} - -AI_FORCE_INLINE -LineSplitter::line_idx LineSplitter::get_index() const { - return mIdx; -} - -AI_FORCE_INLINE -StreamReaderLE &LineSplitter::get_stream() { - return mStream; -} - -AI_FORCE_INLINE -bool LineSplitter::match_start(const char* check) { - const size_t len = ::strlen(check); - - return len <= mCur.length() && std::equal(check, check + len, mCur.begin()); -} - -AI_FORCE_INLINE -void LineSplitter::swallow_next_increment() { - mSwallow = true; -} - -} // Namespace Assimp - -#endif // INCLUDED_LINE_SPLITTER_H diff --git a/thirdparty/assimp/include/assimp/LogAux.h b/thirdparty/assimp/include/assimp/LogAux.h deleted file mode 100644 index bcead78dd3..0000000000 --- a/thirdparty/assimp/include/assimp/LogAux.h +++ /dev/null @@ -1,136 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file LogAux.h - * @brief Common logging usage patterns for importer implementations - */ -#pragma once -#ifndef INCLUDED_AI_LOGAUX_H -#define INCLUDED_AI_LOGAUX_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/TinyFormatter.h> -#include <assimp/Exceptional.h> -#include <assimp/DefaultLogger.hpp> - -namespace Assimp { - -template<class TDeriving> -class LogFunctions { -public: - // ------------------------------------------------------------------------------------------------ - static void ThrowException(const std::string& msg) - { - throw DeadlyImportError(Prefix()+msg); - } - - // ------------------------------------------------------------------------------------------------ - static void LogWarn(const Formatter::format& message) { - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_WARN(Prefix()+(std::string)message); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogError(const Formatter::format& message) { - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_ERROR(Prefix()+(std::string)message); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogInfo(const Formatter::format& message) { - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_INFO(Prefix()+(std::string)message); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogDebug(const Formatter::format& message) { - if (!DefaultLogger::isNullLogger()) { - ASSIMP_LOG_DEBUG(Prefix()+(std::string)message); - } - } - - // https://sourceforge.net/tracker/?func=detail&atid=1067632&aid=3358562&group_id=226462 -#if !defined(__GNUC__) || !defined(__APPLE__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) - - // ------------------------------------------------------------------------------------------------ - static void LogWarn (const char* message) { - if (!DefaultLogger::isNullLogger()) { - LogWarn(Formatter::format(message)); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogError (const char* message) { - if (!DefaultLogger::isNullLogger()) { - LogError(Formatter::format(message)); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogInfo (const char* message) { - if (!DefaultLogger::isNullLogger()) { - LogInfo(Formatter::format(message)); - } - } - - // ------------------------------------------------------------------------------------------------ - static void LogDebug (const char* message) { - if (!DefaultLogger::isNullLogger()) { - LogDebug(Formatter::format(message)); - } - } - -#endif - -private: - static const char* Prefix(); - -}; -} // ! Assimp - -#endif diff --git a/thirdparty/assimp/include/assimp/LogStream.hpp b/thirdparty/assimp/include/assimp/LogStream.hpp deleted file mode 100644 index d0281e2d02..0000000000 --- a/thirdparty/assimp/include/assimp/LogStream.hpp +++ /dev/null @@ -1,111 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file LogStream.hpp - * @brief Abstract base class 'LogStream', representing an output log stream. - */ -#ifndef INCLUDED_AI_LOGSTREAM_H -#define INCLUDED_AI_LOGSTREAM_H - -#include "types.h" - -namespace Assimp { - -class IOSystem; - -// ------------------------------------------------------------------------------------ -/** @brief CPP-API: Abstract interface for log stream implementations. - * - * Several default implementations are provided, see #aiDefaultLogStream for more - * details. Writing your own implementation of LogStream is just necessary if these - * are not enough for your purpose. */ -class ASSIMP_API LogStream -#ifndef SWIG - : public Intern::AllocateFromAssimpHeap -#endif -{ -protected: - /** @brief Default constructor */ - LogStream() AI_NO_EXCEPT; - -public: - /** @brief Virtual destructor */ - virtual ~LogStream(); - - // ------------------------------------------------------------------- - /** @brief Overwrite this for your own output methods - * - * Log messages *may* consist of multiple lines and you shouldn't - * expect a consistent formatting. If you want custom formatting - * (e.g. generate HTML), supply a custom instance of Logger to - * #DefaultLogger:set(). Usually you can *expect* that a log message - * is exactly one line and terminated with a single \n character. - * @param message Message to be written */ - virtual void write(const char* message) = 0; - - // ------------------------------------------------------------------- - /** @brief Creates a default log stream - * @param streams Type of the default stream - * @param name For aiDefaultLogStream_FILE: name of the output file - * @param io For aiDefaultLogStream_FILE: IOSystem to be used to open the output - * file. Pass NULL for the default implementation. - * @return New LogStream instance. */ - static LogStream* createDefaultStream(aiDefaultLogStream stream, - const char* name = "AssimpLog.txt", - IOSystem* io = nullptr ); - -}; // !class LogStream - -inline -LogStream::LogStream() AI_NO_EXCEPT { - // empty -} - -inline -LogStream::~LogStream() { - // empty -} - -// ------------------------------------------------------------------------------------ -} // Namespace Assimp - -#endif diff --git a/thirdparty/assimp/include/assimp/Logger.hpp b/thirdparty/assimp/include/assimp/Logger.hpp deleted file mode 100644 index 89cade6c33..0000000000 --- a/thirdparty/assimp/include/assimp/Logger.hpp +++ /dev/null @@ -1,305 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Logger.hpp - * @brief Abstract base class 'Logger', base of the logging system. - */ -#ifndef INCLUDED_AI_LOGGER_H -#define INCLUDED_AI_LOGGER_H - -#include <assimp/types.h> -#include <assimp/TinyFormatter.h> - -namespace Assimp { - -class LogStream; - -// Maximum length of a log message. Longer messages are rejected. -#define MAX_LOG_MESSAGE_LENGTH 1024u - -// ---------------------------------------------------------------------------------- -/** @brief CPP-API: Abstract interface for logger implementations. - * Assimp provides a default implementation and uses it for almost all - * logging stuff ('DefaultLogger'). This class defines just basic logging - * behavior and is not of interest for you. Instead, take a look at #DefaultLogger. */ -class ASSIMP_API Logger -#ifndef SWIG - : public Intern::AllocateFromAssimpHeap -#endif -{ -public: - - // ---------------------------------------------------------------------- - /** @enum LogSeverity - * @brief Log severity to describe the granularity of logging. - */ - enum LogSeverity { - NORMAL, //!< Normal granularity of logging - VERBOSE //!< Debug infos will be logged, too - }; - - // ---------------------------------------------------------------------- - /** @enum ErrorSeverity - * @brief Description for severity of a log message. - * - * Every LogStream has a bitwise combination of these flags. - * A LogStream doesn't receive any messages of a specific type - * if it doesn't specify the corresponding ErrorSeverity flag. - */ - enum ErrorSeverity { - Debugging = 1, //!< Debug log message - Info = 2, //!< Info log message - Warn = 4, //!< Warn log message - Err = 8 //!< Error log message - }; - -public: - - /** @brief Virtual destructor */ - virtual ~Logger(); - - // ---------------------------------------------------------------------- - /** @brief Writes a debug message - * @param message Debug message*/ - void debug(const char* message); - void debug(const std::string &message); - - // ---------------------------------------------------------------------- - /** @brief Writes a info message - * @param message Info message*/ - void info(const char* message); - void info(const std::string &message); - - // ---------------------------------------------------------------------- - /** @brief Writes a warning message - * @param message Warn message*/ - void warn(const char* message); - void warn(const std::string &message); - - // ---------------------------------------------------------------------- - /** @brief Writes an error message - * @param message Error message*/ - void error(const char* message); - void error(const std::string &message); - - // ---------------------------------------------------------------------- - /** @brief Set a new log severity. - * @param log_severity New severity for logging*/ - void setLogSeverity(LogSeverity log_severity); - - // ---------------------------------------------------------------------- - /** @brief Get the current log severity*/ - LogSeverity getLogSeverity() const; - - // ---------------------------------------------------------------------- - /** @brief Attach a new log-stream - * - * The logger takes ownership of the stream and is responsible - * for its destruction (which is done using ::delete when the logger - * itself is destroyed). Call detachStream to detach a stream and to - * gain ownership of it again. - * @param pStream Log-stream to attach - * @param severity Message filter, specified which types of log - * messages are dispatched to the stream. Provide a bitwise - * combination of the ErrorSeverity flags. - * @return true if the stream has been attached, false otherwise.*/ - virtual bool attachStream(LogStream *pStream, - unsigned int severity = Debugging | Err | Warn | Info) = 0; - - // ---------------------------------------------------------------------- - /** @brief Detach a still attached stream from the logger (or - * modify the filter flags bits) - * @param pStream Log-stream instance for detaching - * @param severity Provide a bitwise combination of the ErrorSeverity - * flags. This value is &~ed with the current flags of the stream, - * if the result is 0 the stream is detached from the Logger and - * the caller retakes the possession of the stream. - * @return true if the stream has been detached, false otherwise.*/ - virtual bool detatchStream(LogStream *pStream, - unsigned int severity = Debugging | Err | Warn | Info) = 0; - -protected: - /** - * Default constructor - */ - Logger() AI_NO_EXCEPT; - - /** - * Construction with a given log severity - */ - explicit Logger(LogSeverity severity); - - // ---------------------------------------------------------------------- - /** - * @brief Called as a request to write a specific debug message - * @param message Debug message. Never longer than - * MAX_LOG_MESSAGE_LENGTH characters (excluding the '0'). - * @note The message string is only valid until the scope of - * the function is left. - */ - virtual void OnDebug(const char* message)= 0; - - // ---------------------------------------------------------------------- - /** - * @brief Called as a request to write a specific info message - * @param message Info message. Never longer than - * MAX_LOG_MESSAGE_LENGTH characters (ecxluding the '0'). - * @note The message string is only valid until the scope of - * the function is left. - */ - virtual void OnInfo(const char* message) = 0; - - // ---------------------------------------------------------------------- - /** - * @brief Called as a request to write a specific warn message - * @param message Warn message. Never longer than - * MAX_LOG_MESSAGE_LENGTH characters (exluding the '0'). - * @note The message string is only valid until the scope of - * the function is left. - */ - virtual void OnWarn(const char* essage) = 0; - - // ---------------------------------------------------------------------- - /** - * @brief Called as a request to write a specific error message - * @param message Error message. Never longer than - * MAX_LOG_MESSAGE_LENGTH characters (exluding the '0'). - * @note The message string is only valid until the scope of - * the function is left. - */ - virtual void OnError(const char* message) = 0; - -protected: - LogSeverity m_Severity; -}; - -// ---------------------------------------------------------------------------------- -// Default constructor -inline -Logger::Logger() AI_NO_EXCEPT -: m_Severity(NORMAL) { - // empty -} - -// ---------------------------------------------------------------------------------- -// Virtual destructor -inline -Logger::~Logger() { - // empty -} - -// ---------------------------------------------------------------------------------- -// Construction with given logging severity -inline -Logger::Logger(LogSeverity severity) -: m_Severity(severity) { - // empty -} - -// ---------------------------------------------------------------------------------- -// Log severity setter -inline -void Logger::setLogSeverity(LogSeverity log_severity){ - m_Severity = log_severity; -} - -// ---------------------------------------------------------------------------------- -// Log severity getter -inline -Logger::LogSeverity Logger::getLogSeverity() const { - return m_Severity; -} - -// ---------------------------------------------------------------------------------- -inline -void Logger::debug(const std::string &message) { - return debug(message.c_str()); -} - -// ---------------------------------------------------------------------------------- -inline -void Logger::error(const std::string &message) { - return error(message.c_str()); -} - -// ---------------------------------------------------------------------------------- -inline -void Logger::warn(const std::string &message) { - return warn(message.c_str()); -} - -// ---------------------------------------------------------------------------------- -inline -void Logger::info(const std::string &message) { - return info(message.c_str()); -} - -// ------------------------------------------------------------------------------------------------ -#define ASSIMP_LOG_WARN_F(string,...)\ - DefaultLogger::get()->warn((Formatter::format(string),__VA_ARGS__)) - -#define ASSIMP_LOG_ERROR_F(string,...)\ - DefaultLogger::get()->error((Formatter::format(string),__VA_ARGS__)) - -#define ASSIMP_LOG_DEBUG_F(string,...)\ - DefaultLogger::get()->debug((Formatter::format(string),__VA_ARGS__)) - -#define ASSIMP_LOG_INFO_F(string,...)\ - DefaultLogger::get()->info((Formatter::format(string),__VA_ARGS__)) - - -#define ASSIMP_LOG_WARN(string)\ - DefaultLogger::get()->warn(string) - -#define ASSIMP_LOG_ERROR(string)\ - DefaultLogger::get()->error(string) - -#define ASSIMP_LOG_DEBUG(string)\ - DefaultLogger::get()->debug(string) - -#define ASSIMP_LOG_INFO(string)\ - DefaultLogger::get()->info(string) - - -} // Namespace Assimp - -#endif // !! INCLUDED_AI_LOGGER_H diff --git a/thirdparty/assimp/include/assimp/MathFunctions.h b/thirdparty/assimp/include/assimp/MathFunctions.h deleted file mode 100644 index b6c5872a72..0000000000 --- a/thirdparty/assimp/include/assimp/MathFunctions.h +++ /dev/null @@ -1,90 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2016, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -#pragma once - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -/** @file MathFunctions.h -* @brief Implementation of math utility functions. - * -*/ - -#include <limits> - -namespace Assimp { -namespace Math { - -// TODO: use binary GCD for unsigned integers .... -template < typename IntegerType > -inline -IntegerType gcd( IntegerType a, IntegerType b ) { - const IntegerType zero = (IntegerType)0; - while ( true ) { - if ( a == zero ) - return b; - b %= a; - - if ( b == zero ) - return a; - a %= b; - } -} - -template < typename IntegerType > -inline -IntegerType lcm( IntegerType a, IntegerType b ) { - const IntegerType t = gcd (a,b); - if (!t) - return t; - return a / t * b; -} - -template<class T> -inline -T getEpsilon() { - return std::numeric_limits<T>::epsilon(); -} - -} -} diff --git a/thirdparty/assimp/include/assimp/MemoryIOWrapper.h b/thirdparty/assimp/include/assimp/MemoryIOWrapper.h deleted file mode 100644 index 5598d4fc5f..0000000000 --- a/thirdparty/assimp/include/assimp/MemoryIOWrapper.h +++ /dev/null @@ -1,250 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file MemoryIOWrapper.h - * Handy IOStream/IOSystem implemetation to read directly from a memory buffer */ -#pragma once -#ifndef AI_MEMORYIOSTREAM_H_INC -#define AI_MEMORYIOSTREAM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/IOStream.hpp> -#include <assimp/IOSystem.hpp> -#include <assimp/ai_assert.h> - -#include <stdint.h> - -namespace Assimp { - -#define AI_MEMORYIO_MAGIC_FILENAME "$$$___magic___$$$" -#define AI_MEMORYIO_MAGIC_FILENAME_LENGTH 17 - -// ---------------------------------------------------------------------------------- -/** Implementation of IOStream to read directly from a memory buffer */ -// ---------------------------------------------------------------------------------- -class MemoryIOStream : public IOStream { -public: - MemoryIOStream (const uint8_t* buff, size_t len, bool own = false) - : buffer (buff) - , length(len) - , pos((size_t)0) - , own(own) { - // empty - } - - ~MemoryIOStream () { - if(own) { - delete[] buffer; - } - } - - // ------------------------------------------------------------------- - // Read from stream - size_t Read(void* pvBuffer, size_t pSize, size_t pCount) { - ai_assert(nullptr != pvBuffer); - ai_assert(0 != pSize); - - const size_t cnt = std::min( pCount, (length-pos) / pSize); - const size_t ofs = pSize * cnt; - - ::memcpy(pvBuffer,buffer+pos,ofs); - pos += ofs; - - return cnt; - } - - // ------------------------------------------------------------------- - // Write to stream - size_t Write(const void* /*pvBuffer*/, size_t /*pSize*/,size_t /*pCount*/) { - ai_assert(false); // won't be needed - return 0; - } - - // ------------------------------------------------------------------- - // Seek specific position - aiReturn Seek(size_t pOffset, aiOrigin pOrigin) { - if (aiOrigin_SET == pOrigin) { - if (pOffset > length) { - return AI_FAILURE; - } - pos = pOffset; - } else if (aiOrigin_END == pOrigin) { - if (pOffset > length) { - return AI_FAILURE; - } - pos = length-pOffset; - } else { - if (pOffset+pos > length) { - return AI_FAILURE; - } - pos += pOffset; - } - return AI_SUCCESS; - } - - // ------------------------------------------------------------------- - // Get current seek position - size_t Tell() const { - return pos; - } - - // ------------------------------------------------------------------- - // Get size of file - size_t FileSize() const { - return length; - } - - // ------------------------------------------------------------------- - // Flush file contents - void Flush() { - ai_assert(false); // won't be needed - } - -private: - const uint8_t* buffer; - size_t length,pos; - bool own; -}; - -// --------------------------------------------------------------------------- -/** Dummy IO system to read from a memory buffer */ -class MemoryIOSystem : public IOSystem { -public: - /** Constructor. */ - MemoryIOSystem(const uint8_t* buff, size_t len, IOSystem* io) - : buffer(buff) - , length(len) - , existing_io(io) - , created_streams() { - // empty - } - - /** Destructor. */ - ~MemoryIOSystem() { - } - - // ------------------------------------------------------------------- - /** Tests for the existence of a file at the given path. */ - bool Exists(const char* pFile) const override { - if (0 == strncmp( pFile, AI_MEMORYIO_MAGIC_FILENAME, AI_MEMORYIO_MAGIC_FILENAME_LENGTH ) ) { - return true; - } - return existing_io ? existing_io->Exists(pFile) : false; - } - - // ------------------------------------------------------------------- - /** Returns the directory separator. */ - char getOsSeparator() const override { - return existing_io ? existing_io->getOsSeparator() - : '/'; // why not? it doesn't care - } - - // ------------------------------------------------------------------- - /** Open a new file with a given path. */ - IOStream* Open(const char* pFile, const char* pMode = "rb") override { - if ( 0 == strncmp( pFile, AI_MEMORYIO_MAGIC_FILENAME, AI_MEMORYIO_MAGIC_FILENAME_LENGTH ) ) { - created_streams.emplace_back(new MemoryIOStream(buffer, length)); - return created_streams.back(); - } - return existing_io ? existing_io->Open(pFile, pMode) : NULL; - } - - // ------------------------------------------------------------------- - /** Closes the given file and releases all resources associated with it. */ - void Close( IOStream* pFile) override { - auto it = std::find(created_streams.begin(), created_streams.end(), pFile); - if (it != created_streams.end()) { - delete pFile; - created_streams.erase(it); - } else if (existing_io) { - existing_io->Close(pFile); - } - } - - // ------------------------------------------------------------------- - /** Compare two paths */ - bool ComparePaths(const char* one, const char* second) const override { - return existing_io ? existing_io->ComparePaths(one, second) : false; - } - - bool PushDirectory( const std::string &path ) override { - return existing_io ? existing_io->PushDirectory(path) : false; - } - - const std::string &CurrentDirectory() const override { - static std::string empty; - return existing_io ? existing_io->CurrentDirectory() : empty; - } - - size_t StackSize() const override { - return existing_io ? existing_io->StackSize() : 0; - } - - bool PopDirectory() override { - return existing_io ? existing_io->PopDirectory() : false; - } - - bool CreateDirectory( const std::string &path ) override { - return existing_io ? existing_io->CreateDirectory(path) : false; - } - - bool ChangeDirectory( const std::string &path ) override { - return existing_io ? existing_io->ChangeDirectory(path) : false; - } - - bool DeleteFile( const std::string &file ) override { - return existing_io ? existing_io->DeleteFile(file) : false; - } - -private: - const uint8_t* buffer; - size_t length; - IOSystem* existing_io; - std::vector<IOStream*> created_streams; -}; - -} // end namespace Assimp - -#endif diff --git a/thirdparty/assimp/include/assimp/NullLogger.hpp b/thirdparty/assimp/include/assimp/NullLogger.hpp deleted file mode 100644 index c45d01bd48..0000000000 --- a/thirdparty/assimp/include/assimp/NullLogger.hpp +++ /dev/null @@ -1,99 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file NullLogger.hpp - * @brief Dummy logger -*/ - -#ifndef INCLUDED_AI_NULLLOGGER_H -#define INCLUDED_AI_NULLLOGGER_H - -#include "Logger.hpp" - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** @brief CPP-API: Empty logging implementation. - * - * Does nothing! Used by default if the application hasn't requested a - * custom logger via #DefaultLogger::set() or #DefaultLogger::create(); */ -class ASSIMP_API NullLogger - : public Logger { - -public: - - /** @brief Logs a debug message */ - void OnDebug(const char* message) { - (void)message; //this avoids compiler warnings - } - - /** @brief Logs an info message */ - void OnInfo(const char* message) { - (void)message; //this avoids compiler warnings - } - - /** @brief Logs a warning message */ - void OnWarn(const char* message) { - (void)message; //this avoids compiler warnings - } - - /** @brief Logs an error message */ - void OnError(const char* message) { - (void)message; //this avoids compiler warnings - } - - /** @brief Detach a still attached stream from logger */ - bool attachStream(LogStream *pStream, unsigned int severity) { - (void)pStream; (void)severity; //this avoids compiler warnings - return false; - } - - /** @brief Detach a still attached stream from logger */ - bool detatchStream(LogStream *pStream, unsigned int severity) { - (void)pStream; (void)severity; //this avoids compiler warnings - return false; - } - -private: -}; -} -#endif // !! AI_NULLLOGGER_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/ParsingUtils.h b/thirdparty/assimp/include/assimp/ParsingUtils.h deleted file mode 100644 index 3025601246..0000000000 --- a/thirdparty/assimp/include/assimp/ParsingUtils.h +++ /dev/null @@ -1,264 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - - -/** @file ParsingUtils.h - * @brief Defines helper functions for text parsing - */ -#pragma once -#ifndef AI_PARSING_UTILS_H_INC -#define AI_PARSING_UTILS_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/StringComparison.h> -#include <assimp/StringUtils.h> -#include <assimp/defs.h> - -namespace Assimp { - -// NOTE: the functions below are mostly intended as replacement for -// std::upper, std::lower, std::isupper, std::islower, std::isspace. -// we don't bother of locales. We don't want them. We want reliable -// (i.e. identical) results across all locales. - -// The functions below accept any character type, but know only -// about ASCII. However, UTF-32 is the only safe ASCII superset to -// use since it doesn't have multi-byte sequences. - -static const unsigned int BufferSize = 4096; - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -char_t ToLower( char_t in ) { - return (in >= (char_t)'A' && in <= (char_t)'Z') ? (char_t)(in+0x20) : in; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -char_t ToUpper( char_t in) { - return (in >= (char_t)'a' && in <= (char_t)'z') ? (char_t)(in-0x20) : in; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool IsUpper( char_t in) { - return (in >= (char_t)'A' && in <= (char_t)'Z'); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool IsLower( char_t in) { - return (in >= (char_t)'a' && in <= (char_t)'z'); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool IsSpace( char_t in) { - return (in == (char_t)' ' || in == (char_t)'\t'); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool IsLineEnd( char_t in) { - return (in==(char_t)'\r'||in==(char_t)'\n'||in==(char_t)'\0'||in==(char_t)'\f'); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool IsSpaceOrNewLine( char_t in) { - return IsSpace<char_t>(in) || IsLineEnd<char_t>(in); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipSpaces( const char_t* in, const char_t** out) { - while( *in == ( char_t )' ' || *in == ( char_t )'\t' ) { - ++in; - } - *out = in; - return !IsLineEnd<char_t>(*in); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipSpaces( const char_t** inout) { - return SkipSpaces<char_t>(*inout,inout); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipLine( const char_t* in, const char_t** out) { - while( *in != ( char_t )'\r' && *in != ( char_t )'\n' && *in != ( char_t )'\0' ) { - ++in; - } - - // files are opened in binary mode. Ergo there are both NL and CR - while( *in == ( char_t )'\r' || *in == ( char_t )'\n' ) { - ++in; - } - *out = in; - return *in != (char_t)'\0'; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipLine( const char_t** inout) { - return SkipLine<char_t>(*inout,inout); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipSpacesAndLineEnd( const char_t* in, const char_t** out) { - while( *in == ( char_t )' ' || *in == ( char_t )'\t' || *in == ( char_t )'\r' || *in == ( char_t )'\n' ) { - ++in; - } - *out = in; - return *in != '\0'; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool SkipSpacesAndLineEnd( const char_t** inout) { - return SkipSpacesAndLineEnd<char_t>(*inout,inout); -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool GetNextLine( const char_t*& buffer, char_t out[ BufferSize ] ) { - if( ( char_t )'\0' == *buffer ) { - return false; - } - - char* _out = out; - char* const end = _out + BufferSize; - while( !IsLineEnd( *buffer ) && _out < end ) { - *_out++ = *buffer++; - } - *_out = (char_t)'\0'; - - while( IsLineEnd( *buffer ) && '\0' != *buffer ) { - ++buffer; - } - - return true; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE bool IsNumeric( char_t in) { - return ( in >= '0' && in <= '9' ) || '-' == in || '+' == in; -} - -// --------------------------------------------------------------------------------- -template <class char_t> -AI_FORCE_INLINE -bool TokenMatch(char_t*& in, const char* token, unsigned int len) -{ - if (!::strncmp(token,in,len) && IsSpaceOrNewLine(in[len])) { - if (in[len] != '\0') { - in += len+1; - } else { - // If EOF after the token make sure we don't go past end of buffer - in += len; - } - return true; - } - - return false; -} -// --------------------------------------------------------------------------------- -/** @brief Case-ignoring version of TokenMatch - * @param in Input - * @param token Token to check for - * @param len Number of characters to check - */ -AI_FORCE_INLINE -bool TokenMatchI(const char*& in, const char* token, unsigned int len) { - if (!ASSIMP_strincmp(token,in,len) && IsSpaceOrNewLine(in[len])) { - in += len+1; - return true; - } - return false; -} - -// --------------------------------------------------------------------------------- -AI_FORCE_INLINE -void SkipToken(const char*& in) { - SkipSpaces(&in); - while ( !IsSpaceOrNewLine( *in ) ) { - ++in; - } -} - -// --------------------------------------------------------------------------------- -AI_FORCE_INLINE -std::string GetNextToken(const char*& in) { - SkipSpacesAndLineEnd(&in); - const char* cur = in; - while ( !IsSpaceOrNewLine( *in ) ) { - ++in; - } - return std::string(cur,(size_t)(in-cur)); -} - -// --------------------------------------------------------------------------------- - -} // ! namespace Assimp - -#endif // ! AI_PARSING_UTILS_H_INC diff --git a/thirdparty/assimp/include/assimp/Profiler.h b/thirdparty/assimp/include/assimp/Profiler.h deleted file mode 100644 index 624029be99..0000000000 --- a/thirdparty/assimp/include/assimp/Profiler.h +++ /dev/null @@ -1,103 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Profiler.h - * @brief Utility to measure the respective runtime of each import step - */ -#pragma once -#ifndef AI_INCLUDED_PROFILER_H -#define AI_INCLUDED_PROFILER_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <chrono> -#include <assimp/DefaultLogger.hpp> -#include <assimp/TinyFormatter.h> - -#include <map> - -namespace Assimp { -namespace Profiling { - -using namespace Formatter; - -// ------------------------------------------------------------------------------------------------ -/** Simple wrapper around boost::timer to simplify reporting. Timings are automatically - * dumped to the log file. - */ -class Profiler { -public: - Profiler() { - // empty - } - - - /** Start a named timer */ - void BeginRegion(const std::string& region) { - regions[region] = std::chrono::system_clock::now(); - ASSIMP_LOG_DEBUG((format("START `"),region,"`")); - } - - - /** End a specific named timer and write its end time to the log */ - void EndRegion(const std::string& region) { - RegionMap::const_iterator it = regions.find(region); - if (it == regions.end()) { - return; - } - - std::chrono::duration<double> elapsedSeconds = std::chrono::system_clock::now() - regions[region]; - ASSIMP_LOG_DEBUG((format("END `"),region,"`, dt= ", elapsedSeconds.count()," s")); - } - -private: - typedef std::map<std::string,std::chrono::time_point<std::chrono::system_clock>> RegionMap; - RegionMap regions; -}; - -} -} - -#endif // AI_INCLUDED_PROFILER_H - diff --git a/thirdparty/assimp/include/assimp/ProgressHandler.hpp b/thirdparty/assimp/include/assimp/ProgressHandler.hpp deleted file mode 100644 index 8991a64618..0000000000 --- a/thirdparty/assimp/include/assimp/ProgressHandler.hpp +++ /dev/null @@ -1,149 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file ProgressHandler.hpp - * @brief Abstract base class 'ProgressHandler'. - */ -#pragma once -#ifndef AI_PROGRESSHANDLER_H_INC -#define AI_PROGRESSHANDLER_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -namespace Assimp { - -// ------------------------------------------------------------------------------------ -/** @brief CPP-API: Abstract interface for custom progress report receivers. - * - * Each #Importer instance maintains its own #ProgressHandler. The default - * implementation provided by Assimp doesn't do anything at all. */ -class ASSIMP_API ProgressHandler -#ifndef SWIG - : public Intern::AllocateFromAssimpHeap -#endif -{ -protected: - /// @brief Default constructor - ProgressHandler () AI_NO_EXCEPT { - // empty - } - -public: - /// @brief Virtual destructor. - virtual ~ProgressHandler () { - } - - // ------------------------------------------------------------------- - /** @brief Progress callback. - * @param percentage An estimate of the current loading progress, - * in percent. Or -1.f if such an estimate is not available. - * - * There are restriction on what you may do from within your - * implementation of this method: no exceptions may be thrown and no - * non-const #Importer methods may be called. It is - * not generally possible to predict the number of callbacks - * fired during a single import. - * - * @return Return false to abort loading at the next possible - * occasion (loaders and Assimp are generally allowed to perform - * all needed cleanup tasks prior to returning control to the - * caller). If the loading is aborted, #Importer::ReadFile() - * returns always NULL. - * */ - virtual bool Update(float percentage = -1.f) = 0; - - // ------------------------------------------------------------------- - /** @brief Progress callback for file loading steps - * @param numberOfSteps The number of total post-processing - * steps - * @param currentStep The index of the current post-processing - * step that will run, or equal to numberOfSteps if all of - * them has finished. This number is always strictly monotone - * increasing, although not necessarily linearly. - * - * @note This is currently only used at the start and the end - * of the file parsing. - * */ - virtual void UpdateFileRead(int currentStep /*= 0*/, int numberOfSteps /*= 0*/) { - float f = numberOfSteps ? currentStep / (float)numberOfSteps : 1.0f; - Update( f * 0.5f ); - } - - // ------------------------------------------------------------------- - /** @brief Progress callback for post-processing steps - * @param numberOfSteps The number of total post-processing - * steps - * @param currentStep The index of the current post-processing - * step that will run, or equal to numberOfSteps if all of - * them has finished. This number is always strictly monotone - * increasing, although not necessarily linearly. - * */ - virtual void UpdatePostProcess(int currentStep /*= 0*/, int numberOfSteps /*= 0*/) { - float f = numberOfSteps ? currentStep / (float)numberOfSteps : 1.0f; - Update( f * 0.5f + 0.5f ); - } - - - // ------------------------------------------------------------------- - /** @brief Progress callback for export steps. - * @param numberOfSteps The number of total processing - * steps - * @param currentStep The index of the current post-processing - * step that will run, or equal to numberOfSteps if all of - * them has finished. This number is always strictly monotone - * increasing, although not necessarily linearly. - * */ - virtual void UpdateFileWrite(int currentStep /*= 0*/, int numberOfSteps /*= 0*/) { - float f = numberOfSteps ? currentStep / (float)numberOfSteps : 1.0f; - Update(f * 0.5f); - } -}; // !class ProgressHandler - -// ------------------------------------------------------------------------------------ - -} // Namespace Assimp - -#endif // AI_PROGRESSHANDLER_H_INC diff --git a/thirdparty/assimp/include/assimp/RemoveComments.h b/thirdparty/assimp/include/assimp/RemoveComments.h deleted file mode 100644 index f129420535..0000000000 --- a/thirdparty/assimp/include/assimp/RemoveComments.h +++ /dev/null @@ -1,93 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Declares a helper class, "CommentRemover", which can be - * used to remove comments (single and multi line) from a text file. - */ -#pragma once -#ifndef AI_REMOVE_COMMENTS_H_INC -#define AI_REMOVE_COMMENTS_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** \brief Helper class to remove single and multi line comments from a file - * - * Some mesh formats like MD5 have comments that are quite similar - * to those in C or C++ so this code has been moved to a separate - * module. - */ -class ASSIMP_API CommentRemover { - // class cannot be instanced - CommentRemover() {} - -public: - - //! Remove single-line comments. The end of a line is - //! expected to be either NL or CR or NLCR. - //! \param szComment The start sequence of the comment, e.g. "//" - //! \param szBuffer Buffer to work with - //! \param chReplacement Character to be used as replacement - //! for commented lines. By default this is ' ' - static void RemoveLineComments(const char* szComment, - char* szBuffer, char chReplacement = ' '); - - //! Remove multi-line comments. The end of a line is - //! expected to be either NL or CR or NLCR. Multi-line comments - //! may not be nested (as in C). - //! \param szCommentStart The start sequence of the comment, e.g. "/*" - //! \param szCommentEnd The end sequence of the comment, e.g. "*/" - //! \param szBuffer Buffer to work with - //! \param chReplacement Character to be used as replacement - //! for commented lines. By default this is ' ' - static void RemoveMultiLineComments(const char* szCommentStart, - const char* szCommentEnd,char* szBuffer, - char chReplacement = ' '); -}; -} // ! Assimp - -#endif // !! AI_REMOVE_COMMENTS_H_INC diff --git a/thirdparty/assimp/include/assimp/SGSpatialSort.h b/thirdparty/assimp/include/assimp/SGSpatialSort.h deleted file mode 100644 index fdb5ce8174..0000000000 --- a/thirdparty/assimp/include/assimp/SGSpatialSort.h +++ /dev/null @@ -1,155 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** Small helper classes to optimize finding vertices close to a given location - */ -#pragma once -#ifndef AI_D3DSSPATIALSORT_H_INC -#define AI_D3DSSPATIALSORT_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <vector> -#include <stdint.h> - -namespace Assimp { - -// ---------------------------------------------------------------------------------- -/** Specialized version of SpatialSort to support smoothing groups - * This is used in by the 3DS, ASE and LWO loaders. 3DS and ASE share their - * normal computation code in SmoothingGroups.inl, the LWO loader has its own - * implementation to handle all details of its file format correctly. - */ -// ---------------------------------------------------------------------------------- -class ASSIMP_API SGSpatialSort -{ -public: - - SGSpatialSort(); - - // ------------------------------------------------------------------- - /** Construction from a given face array, handling smoothing groups - * properly - */ - explicit SGSpatialSort(const std::vector<aiVector3D>& vPositions); - - // ------------------------------------------------------------------- - /** Add a vertex to the spatial sort - * @param vPosition Vertex position to be added - * @param index Index of the vrtex - * @param smoothingGroup SmoothingGroup for this vertex - */ - void Add(const aiVector3D& vPosition, unsigned int index, - unsigned int smoothingGroup); - - // ------------------------------------------------------------------- - /** Prepare the spatial sorter for use. This step runs in O(logn) - */ - void Prepare(); - - /** Destructor */ - ~SGSpatialSort(); - - // ------------------------------------------------------------------- - /** Returns an iterator for all positions close to the given position. - * @param pPosition The position to look for vertices. - * @param pSG Only included vertices with at least one shared smooth group - * @param pRadius Maximal distance from the position a vertex may have - * to be counted in. - * @param poResults The container to store the indices of the found - * positions. Will be emptied by the call so it may contain anything. - * @param exactMatch Specifies whether smoothing groups are bit masks - * (false) or integral values (true). In the latter case, a vertex - * cannot belong to more than one smoothing group. - * @return An iterator to iterate over all vertices in the given area. - */ - // ------------------------------------------------------------------- - void FindPositions( const aiVector3D& pPosition, uint32_t pSG, - float pRadius, std::vector<unsigned int>& poResults, - bool exactMatch = false) const; - -protected: - /** Normal of the sorting plane, normalized. The center is always at (0, 0, 0) */ - aiVector3D mPlaneNormal; - - // ------------------------------------------------------------------- - /** An entry in a spatially sorted position array. Consists of a - * vertex index, its position and its pre-calculated distance from - * the reference plane */ - // ------------------------------------------------------------------- - struct Entry { - unsigned int mIndex; ///< The vertex referred by this entry - aiVector3D mPosition; ///< Position - uint32_t mSmoothGroups; - float mDistance; ///< Distance of this vertex to the sorting plane - - Entry() AI_NO_EXCEPT - : mIndex(0) - , mPosition() - , mSmoothGroups(0) - , mDistance(0.0f) { - // empty - } - - Entry( unsigned int pIndex, const aiVector3D& pPosition, float pDistance,uint32_t pSG) - : mIndex( pIndex) - , mPosition( pPosition) - , mSmoothGroups(pSG) - , mDistance( pDistance) { - // empty - } - - bool operator < (const Entry& e) const { - return mDistance < e.mDistance; - } - }; - - // all positions, sorted by distance to the sorting plane - std::vector<Entry> mPositions; -}; - -} // end of namespace Assimp - -#endif // AI_SPATIALSORT_H_INC diff --git a/thirdparty/assimp/include/assimp/SceneCombiner.h b/thirdparty/assimp/include/assimp/SceneCombiner.h deleted file mode 100644 index 0683c1e052..0000000000 --- a/thirdparty/assimp/include/assimp/SceneCombiner.h +++ /dev/null @@ -1,410 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Declares a helper class, "SceneCombiner" providing various - * utilities to merge scenes. - */ -#pragma once -#ifndef AI_SCENE_COMBINER_H_INC -#define AI_SCENE_COMBINER_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/ai_assert.h> -#include <assimp/types.h> -#include <assimp/Defines.h> - -#include <stddef.h> -#include <set> -#include <list> -#include <stdint.h> -#include <vector> - -struct aiScene; -struct aiNode; -struct aiMaterial; -struct aiTexture; -struct aiCamera; -struct aiLight; -struct aiMetadata; -struct aiBone; -struct aiMesh; -struct aiAnimMesh; -struct aiAnimation; -struct aiNodeAnim; -struct aiMeshMorphAnim; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** \brief Helper data structure for SceneCombiner. - * - * Describes to which node a scene must be attached to. - */ -struct AttachmentInfo -{ - AttachmentInfo() - : scene (NULL) - , attachToNode (NULL) - {} - - AttachmentInfo(aiScene* _scene, aiNode* _attachToNode) - : scene (_scene) - , attachToNode (_attachToNode) - {} - - aiScene* scene; - aiNode* attachToNode; -}; - -// --------------------------------------------------------------------------- -struct NodeAttachmentInfo -{ - NodeAttachmentInfo() - : node (NULL) - , attachToNode (NULL) - , resolved (false) - , src_idx (SIZE_MAX) - {} - - NodeAttachmentInfo(aiNode* _scene, aiNode* _attachToNode,size_t idx) - : node (_scene) - , attachToNode (_attachToNode) - , resolved (false) - , src_idx (idx) - {} - - aiNode* node; - aiNode* attachToNode; - bool resolved; - size_t src_idx; -}; - -// --------------------------------------------------------------------------- -/** @def AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES - * Generate unique names for all named scene items - */ -#define AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES 0x1 - -/** @def AI_INT_MERGE_SCENE_GEN_UNIQUE_MATNAMES - * Generate unique names for materials, too. - * This is not absolutely required to pass the validation. - */ -#define AI_INT_MERGE_SCENE_GEN_UNIQUE_MATNAMES 0x2 - -/** @def AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY - * Use deep copies of duplicate scenes - */ -#define AI_INT_MERGE_SCENE_DUPLICATES_DEEP_CPY 0x4 - -/** @def AI_INT_MERGE_SCENE_RESOLVE_CROSS_ATTACHMENTS - * If attachment nodes are not found in the given master scene, - * search the other imported scenes for them in an any order. - */ -#define AI_INT_MERGE_SCENE_RESOLVE_CROSS_ATTACHMENTS 0x8 - -/** @def AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY - * Can be combined with AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES. - * Unique names are generated, but only if this is absolutely - * required to avoid name conflicts. - */ -#define AI_INT_MERGE_SCENE_GEN_UNIQUE_NAMES_IF_NECESSARY 0x10 - -typedef std::pair<aiBone*,unsigned int> BoneSrcIndex; - -// --------------------------------------------------------------------------- -/** @brief Helper data structure for SceneCombiner::MergeBones. - */ -struct BoneWithHash : public std::pair<uint32_t,aiString*> { - std::vector<BoneSrcIndex> pSrcBones; -}; - -// --------------------------------------------------------------------------- -/** @brief Utility for SceneCombiner - */ -struct SceneHelper -{ - SceneHelper () - : scene (NULL) - , idlen (0) - { - id[0] = 0; - } - - explicit SceneHelper (aiScene* _scene) - : scene (_scene) - , idlen (0) - { - id[0] = 0; - } - - AI_FORCE_INLINE aiScene* operator-> () const - { - return scene; - } - - // scene we're working on - aiScene* scene; - - // prefix to be added to all identifiers in the scene ... - char id [32]; - - // and its strlen() - unsigned int idlen; - - // hash table to quickly check whether a name is contained in the scene - std::set<unsigned int> hashes; -}; - -// --------------------------------------------------------------------------- -/** \brief Static helper class providing various utilities to merge two - * scenes. It is intended as internal utility and NOT for use by - * applications. - * - * The class is currently being used by various postprocessing steps - * and loaders (ie. LWS). - */ -class ASSIMP_API SceneCombiner { - // class cannot be instanced - SceneCombiner() { - // empty - } - - ~SceneCombiner() { - // empty - } - -public: - // ------------------------------------------------------------------- - /** Merges two or more scenes. - * - * @param dest Receives a pointer to the destination scene. If the - * pointer doesn't point to NULL when the function is called, the - * existing scene is cleared and refilled. - * @param src Non-empty list of scenes to be merged. The function - * deletes the input scenes afterwards. There may be duplicate scenes. - * @param flags Combination of the AI_INT_MERGE_SCENE flags defined above - */ - static void MergeScenes(aiScene** dest,std::vector<aiScene*>& src, - unsigned int flags = 0); - - // ------------------------------------------------------------------- - /** Merges two or more scenes and attaches all scenes to a specific - * position in the node graph of the master scene. - * - * @param dest Receives a pointer to the destination scene. If the - * pointer doesn't point to NULL when the function is called, the - * existing scene is cleared and refilled. - * @param master Master scene. It will be deleted afterwards. All - * other scenes will be inserted in its node graph. - * @param src Non-empty list of scenes to be merged along with their - * corresponding attachment points in the master scene. The function - * deletes the input scenes afterwards. There may be duplicate scenes. - * @param flags Combination of the AI_INT_MERGE_SCENE flags defined above - */ - static void MergeScenes(aiScene** dest, aiScene* master, - std::vector<AttachmentInfo>& src, - unsigned int flags = 0); - - // ------------------------------------------------------------------- - /** Merges two or more meshes - * - * The meshes should have equal vertex formats. Only components - * that are provided by ALL meshes will be present in the output mesh. - * An exception is made for VColors - they are set to black. The - * meshes should have the same material indices, too. The output - * material index is always the material index of the first mesh. - * - * @param dest Destination mesh. Must be empty. - * @param flags Currently no parameters - * @param begin First mesh to be processed - * @param end Points to the mesh after the last mesh to be processed - */ - static void MergeMeshes(aiMesh** dest,unsigned int flags, - std::vector<aiMesh*>::const_iterator begin, - std::vector<aiMesh*>::const_iterator end); - - // ------------------------------------------------------------------- - /** Merges two or more bones - * - * @param out Mesh to receive the output bone list - * @param flags Currently no parameters - * @param begin First mesh to be processed - * @param end Points to the mesh after the last mesh to be processed - */ - static void MergeBones(aiMesh* out,std::vector<aiMesh*>::const_iterator it, - std::vector<aiMesh*>::const_iterator end); - - // ------------------------------------------------------------------- - /** Merges two or more materials - * - * The materials should be complementary as much as possible. In case - * of a property present in different materials, the first occurrence - * is used. - * - * @param dest Destination material. Must be empty. - * @param begin First material to be processed - * @param end Points to the material after the last material to be processed - */ - static void MergeMaterials(aiMaterial** dest, - std::vector<aiMaterial*>::const_iterator begin, - std::vector<aiMaterial*>::const_iterator end); - - // ------------------------------------------------------------------- - /** Builds a list of uniquely named bones in a mesh list - * - * @param asBones Receives the output list - * @param it First mesh to be processed - * @param end Last mesh to be processed - */ - static void BuildUniqueBoneList(std::list<BoneWithHash>& asBones, - std::vector<aiMesh*>::const_iterator it, - std::vector<aiMesh*>::const_iterator end); - - // ------------------------------------------------------------------- - /** Add a name prefix to all nodes in a scene. - * - * @param Current node. This function is called recursively. - * @param prefix Prefix to be added to all nodes - * @param len STring length - */ - static void AddNodePrefixes(aiNode* node, const char* prefix, - unsigned int len); - - // ------------------------------------------------------------------- - /** Add an offset to all mesh indices in a node graph - * - * @param Current node. This function is called recursively. - * @param offset Offset to be added to all mesh indices - */ - static void OffsetNodeMeshIndices (aiNode* node, unsigned int offset); - - // ------------------------------------------------------------------- - /** Attach a list of node graphs to well-defined nodes in a master - * graph. This is a helper for MergeScenes() - * - * @param master Master scene - * @param srcList List of source scenes along with their attachment - * points. If an attachment point is NULL (or does not exist in - * the master graph), a scene is attached to the root of the master - * graph (as an additional child node) - * @duplicates List of duplicates. If elem[n] == n the scene is not - * a duplicate. Otherwise elem[n] links scene n to its first occurrence. - */ - static void AttachToGraph ( aiScene* master, - std::vector<NodeAttachmentInfo>& srcList); - - static void AttachToGraph (aiNode* attach, - std::vector<NodeAttachmentInfo>& srcList); - - - // ------------------------------------------------------------------- - /** Get a deep copy of a scene - * - * @param dest Receives a pointer to the destination scene - * @param src Source scene - remains unmodified. - */ - static void CopyScene(aiScene** dest,const aiScene* source,bool allocate = true); - - - // ------------------------------------------------------------------- - /** Get a flat copy of a scene - * - * Only the first hierarchy layer is copied. All pointer members of - * aiScene are shared by source and destination scene. If the - * pointer doesn't point to NULL when the function is called, the - * existing scene is cleared and refilled. - * @param dest Receives a pointer to the destination scene - * @param src Source scene - remains unmodified. - */ - static void CopySceneFlat(aiScene** dest,const aiScene* source); - - - // ------------------------------------------------------------------- - /** Get a deep copy of a mesh - * - * @param dest Receives a pointer to the destination mesh - * @param src Source mesh - remains unmodified. - */ - static void Copy (aiMesh** dest, const aiMesh* src); - - // similar to Copy(): - static void Copy (aiAnimMesh** dest, const aiAnimMesh* src); - static void Copy (aiMaterial** dest, const aiMaterial* src); - static void Copy (aiTexture** dest, const aiTexture* src); - static void Copy (aiAnimation** dest, const aiAnimation* src); - static void Copy (aiCamera** dest, const aiCamera* src); - static void Copy (aiBone** dest, const aiBone* src); - static void Copy (aiLight** dest, const aiLight* src); - static void Copy (aiNodeAnim** dest, const aiNodeAnim* src); - static void Copy (aiMeshMorphAnim** dest, const aiMeshMorphAnim* src); - static void Copy (aiMetadata** dest, const aiMetadata* src); - - // recursive, of course - static void Copy (aiNode** dest, const aiNode* src); - - -private: - - // ------------------------------------------------------------------- - // Same as AddNodePrefixes, but with an additional check - static void AddNodePrefixesChecked(aiNode* node, const char* prefix, - unsigned int len, - std::vector<SceneHelper>& input, - unsigned int cur); - - // ------------------------------------------------------------------- - // Add node identifiers to a hashing set - static void AddNodeHashes(aiNode* node, std::set<unsigned int>& hashes); - - - // ------------------------------------------------------------------- - // Search for duplicate names - static bool FindNameMatch(const aiString& name, - std::vector<SceneHelper>& input, unsigned int cur); -}; - -} - -#endif // !! AI_SCENE_COMBINER_H_INC diff --git a/thirdparty/assimp/include/assimp/SkeletonMeshBuilder.h b/thirdparty/assimp/include/assimp/SkeletonMeshBuilder.h deleted file mode 100644 index ad979a33fa..0000000000 --- a/thirdparty/assimp/include/assimp/SkeletonMeshBuilder.h +++ /dev/null @@ -1,130 +0,0 @@ -/** Helper class to construct a dummy mesh for file formats containing only motion data */ - -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file SkeletonMeshBuilder.h - * Declares SkeletonMeshBuilder, a tiny utility to build dummy meshes - * for animation skeletons. - */ - -#pragma once -#ifndef AI_SKELETONMESHBUILDER_H_INC -#define AI_SKELETONMESHBUILDER_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <vector> -#include <assimp/mesh.h> - -struct aiMaterial; -struct aiScene; -struct aiNode; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** - * This little helper class constructs a dummy mesh for a given scene - * the resembles the node hierarchy. This is useful for file formats - * that don't carry any mesh data but only animation data. - */ -class ASSIMP_API SkeletonMeshBuilder -{ -public: - - // ------------------------------------------------------------------- - /** The constructor processes the given scene and adds a mesh there. - * - * Does nothing if the scene already has mesh data. - * @param pScene The scene for which a skeleton mesh should be constructed. - * @param root The node to start with. NULL is the scene root - * @param bKnobsOnly Set this to true if you don't want the connectors - * between the knobs representing the nodes. - */ - SkeletonMeshBuilder( aiScene* pScene, aiNode* root = NULL, - bool bKnobsOnly = false); - -protected: - - // ------------------------------------------------------------------- - /** Recursively builds a simple mesh representation for the given node - * and also creates a joint for the node that affects this part of - * the mesh. - * @param pNode The node to build geometry for. - */ - void CreateGeometry( const aiNode* pNode); - - // ------------------------------------------------------------------- - /** Creates the mesh from the internally accumulated stuff and returns it. - */ - aiMesh* CreateMesh(); - - // ------------------------------------------------------------------- - /** Creates a dummy material and returns it. */ - aiMaterial* CreateMaterial(); - -protected: - /** space to assemble the mesh data: points */ - std::vector<aiVector3D> mVertices; - - /** faces */ - struct Face - { - unsigned int mIndices[3]; - Face(); - Face( unsigned int p0, unsigned int p1, unsigned int p2) - { mIndices[0] = p0; mIndices[1] = p1; mIndices[2] = p2; } - }; - std::vector<Face> mFaces; - - /** bones */ - std::vector<aiBone*> mBones; - - bool mKnobsOnly; -}; - -} // end of namespace Assimp - -#endif // AI_SKELETONMESHBUILDER_H_INC diff --git a/thirdparty/assimp/include/assimp/SmoothingGroups.h b/thirdparty/assimp/include/assimp/SmoothingGroups.h deleted file mode 100644 index c1a93947f1..0000000000 --- a/thirdparty/assimp/include/assimp/SmoothingGroups.h +++ /dev/null @@ -1,114 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines the helper data structures for importing 3DS files. -http://www.jalix.org/ressources/graphics/3DS/_unofficials/3ds-unofficial.txt */ - -#pragma once -#ifndef AI_SMOOTHINGGROUPS_H_INC -#define AI_SMOOTHINGGROUPS_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/vector3.h> - -#include <stdint.h> -#include <vector> - -// --------------------------------------------------------------------------- -/** Helper structure representing a face with smoothing groups assigned */ -struct FaceWithSmoothingGroup { - FaceWithSmoothingGroup() AI_NO_EXCEPT - : mIndices() - , iSmoothGroup(0) { - // in debug builds set all indices to a common magic value -#ifdef ASSIMP_BUILD_DEBUG - this->mIndices[0] = 0xffffffff; - this->mIndices[1] = 0xffffffff; - this->mIndices[2] = 0xffffffff; -#endif - } - - - //! Indices. .3ds is using uint16. However, after - //! an unique vertex set has been generated, - //! individual index values might exceed 2^16 - uint32_t mIndices[3]; - - //! specifies to which smoothing group the face belongs to - uint32_t iSmoothGroup; -}; - -// --------------------------------------------------------------------------- -/** Helper structure representing a mesh whose faces have smoothing - groups assigned. This allows us to reuse the code for normal computations - from smoothings groups for several loaders (3DS, ASE). All of them - use face structures which inherit from #FaceWithSmoothingGroup, - but as they add extra members and need to be copied by value we - need to use a template here. - */ -template <class T> -struct MeshWithSmoothingGroups -{ - //! Vertex positions - std::vector<aiVector3D> mPositions; - - //! Face lists - std::vector<T> mFaces; - - //! List of normal vectors - std::vector<aiVector3D> mNormals; -}; - -// --------------------------------------------------------------------------- -/** Computes normal vectors for the mesh - */ -template <class T> -void ComputeNormalsWithSmoothingsGroups(MeshWithSmoothingGroups<T>& sMesh); - - -// include implementations -#include "SmoothingGroups.inl" - -#endif // !! AI_SMOOTHINGGROUPS_H_INC diff --git a/thirdparty/assimp/include/assimp/SmoothingGroups.inl b/thirdparty/assimp/include/assimp/SmoothingGroups.inl deleted file mode 100644 index 37ea083dbe..0000000000 --- a/thirdparty/assimp/include/assimp/SmoothingGroups.inl +++ /dev/null @@ -1,141 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2012, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Generation of normal vectors basing on smoothing groups */ - -#pragma once -#ifndef AI_SMOOTHINGGROUPS_INL_INCLUDED -#define AI_SMOOTHINGGROUPS_INL_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/SGSpatialSort.h> - -#include <algorithm> - -using namespace Assimp; - -// ------------------------------------------------------------------------------------------------ -template <class T> -void ComputeNormalsWithSmoothingsGroups(MeshWithSmoothingGroups<T>& sMesh) -{ - // First generate face normals - sMesh.mNormals.resize(sMesh.mPositions.size(),aiVector3D()); - for( unsigned int a = 0; a < sMesh.mFaces.size(); a++) - { - T& face = sMesh.mFaces[a]; - - aiVector3D* pV1 = &sMesh.mPositions[face.mIndices[0]]; - aiVector3D* pV2 = &sMesh.mPositions[face.mIndices[1]]; - aiVector3D* pV3 = &sMesh.mPositions[face.mIndices[2]]; - - aiVector3D pDelta1 = *pV2 - *pV1; - aiVector3D pDelta2 = *pV3 - *pV1; - aiVector3D vNor = pDelta1 ^ pDelta2; - - for (unsigned int c = 0; c < 3;++c) - sMesh.mNormals[face.mIndices[c]] = vNor; - } - - // calculate the position bounds so we have a reliable epsilon to check position differences against - aiVector3D minVec( 1e10f, 1e10f, 1e10f), maxVec( -1e10f, -1e10f, -1e10f); - for( unsigned int a = 0; a < sMesh.mPositions.size(); a++) - { - minVec.x = std::min( minVec.x, sMesh.mPositions[a].x); - minVec.y = std::min( minVec.y, sMesh.mPositions[a].y); - minVec.z = std::min( minVec.z, sMesh.mPositions[a].z); - maxVec.x = std::max( maxVec.x, sMesh.mPositions[a].x); - maxVec.y = std::max( maxVec.y, sMesh.mPositions[a].y); - maxVec.z = std::max( maxVec.z, sMesh.mPositions[a].z); - } - const float posEpsilon = (maxVec - minVec).Length() * 1e-5f; - std::vector<aiVector3D> avNormals; - avNormals.resize(sMesh.mNormals.size()); - - // now generate the spatial sort tree - SGSpatialSort sSort; - for( typename std::vector<T>::iterator i = sMesh.mFaces.begin(); - i != sMesh.mFaces.end();++i) - { - for (unsigned int c = 0; c < 3;++c) - sSort.Add(sMesh.mPositions[(*i).mIndices[c]],(*i).mIndices[c],(*i).iSmoothGroup); - } - sSort.Prepare(); - - std::vector<bool> vertexDone(sMesh.mPositions.size(),false); - for( typename std::vector<T>::iterator i = sMesh.mFaces.begin(); - i != sMesh.mFaces.end();++i) - { - std::vector<unsigned int> poResult; - for (unsigned int c = 0; c < 3;++c) - { - unsigned int idx = (*i).mIndices[c]; - if (vertexDone[idx])continue; - - sSort.FindPositions(sMesh.mPositions[idx],(*i).iSmoothGroup, - posEpsilon,poResult); - - aiVector3D vNormals; - for (std::vector<unsigned int>::const_iterator - a = poResult.begin(); - a != poResult.end();++a) - { - vNormals += sMesh.mNormals[(*a)]; - } - vNormals.NormalizeSafe(); - - // write back into all affected normals - for (std::vector<unsigned int>::const_iterator - a = poResult.begin(); - a != poResult.end();++a) - { - idx = *a; - avNormals [idx] = vNormals; - vertexDone[idx] = true; - } - } - } - sMesh.mNormals = avNormals; -} - -#endif // !! AI_SMOOTHINGGROUPS_INL_INCLUDED diff --git a/thirdparty/assimp/include/assimp/SpatialSort.h b/thirdparty/assimp/include/assimp/SpatialSort.h deleted file mode 100644 index 9f93543150..0000000000 --- a/thirdparty/assimp/include/assimp/SpatialSort.h +++ /dev/null @@ -1,179 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** Small helper classes to optimise finding vertizes close to a given location */ -#pragma once -#ifndef AI_SPATIALSORT_H_INC -#define AI_SPATIALSORT_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <vector> -#include <assimp/types.h> - -namespace Assimp { - -// ------------------------------------------------------------------------------------------------ -/** A little helper class to quickly find all vertices in the epsilon environment of a given - * position. Construct an instance with an array of positions. The class stores the given positions - * by their indices and sorts them by their distance to an arbitrary chosen plane. - * You can then query the instance for all vertices close to a given position in an average O(log n) - * time, with O(n) worst case complexity when all vertices lay on the plane. The plane is chosen - * so that it avoids common planes in usual data sets. */ -// ------------------------------------------------------------------------------------------------ -class ASSIMP_API SpatialSort -{ -public: - - SpatialSort(); - - // ------------------------------------------------------------------------------------ - /** Constructs a spatially sorted representation from the given position array. - * Supply the positions in its layout in memory, the class will only refer to them - * by index. - * @param pPositions Pointer to the first position vector of the array. - * @param pNumPositions Number of vectors to expect in that array. - * @param pElementOffset Offset in bytes from the beginning of one vector in memory - * to the beginning of the next vector. */ - SpatialSort( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset); - - /** Destructor */ - ~SpatialSort(); - -public: - - // ------------------------------------------------------------------------------------ - /** Sets the input data for the SpatialSort. This replaces existing data, if any. - * The new data receives new indices in ascending order. - * - * @param pPositions Pointer to the first position vector of the array. - * @param pNumPositions Number of vectors to expect in that array. - * @param pElementOffset Offset in bytes from the beginning of one vector in memory - * to the beginning of the next vector. - * @param pFinalize Specifies whether the SpatialSort's internal representation - * is finalized after the new data has been added. Finalization is - * required in order to use #FindPosition() or #GenerateMappingTable(). - * If you don't finalize yet, you can use #Append() to add data from - * other sources.*/ - void Fill( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset, - bool pFinalize = true); - - - // ------------------------------------------------------------------------------------ - /** Same as #Fill(), except the method appends to existing data in the #SpatialSort. */ - void Append( const aiVector3D* pPositions, unsigned int pNumPositions, - unsigned int pElementOffset, - bool pFinalize = true); - - - // ------------------------------------------------------------------------------------ - /** Finalize the spatial hash data structure. This can be useful after - * multiple calls to #Append() with the pFinalize parameter set to false. - * This is finally required before one of #FindPositions() and #GenerateMappingTable() - * can be called to query the spatial sort.*/ - void Finalize(); - - // ------------------------------------------------------------------------------------ - /** Returns an iterator for all positions close to the given position. - * @param pPosition The position to look for vertices. - * @param pRadius Maximal distance from the position a vertex may have to be counted in. - * @param poResults The container to store the indices of the found positions. - * Will be emptied by the call so it may contain anything. - * @return An iterator to iterate over all vertices in the given area.*/ - void FindPositions( const aiVector3D& pPosition, ai_real pRadius, - std::vector<unsigned int>& poResults) const; - - // ------------------------------------------------------------------------------------ - /** Fills an array with indices of all positions identical to the given position. In - * opposite to FindPositions(), not an epsilon is used but a (very low) tolerance of - * four floating-point units. - * @param pPosition The position to look for vertices. - * @param poResults The container to store the indices of the found positions. - * Will be emptied by the call so it may contain anything.*/ - void FindIdenticalPositions( const aiVector3D& pPosition, - std::vector<unsigned int>& poResults) const; - - // ------------------------------------------------------------------------------------ - /** Compute a table that maps each vertex ID referring to a spatially close - * enough position to the same output ID. Output IDs are assigned in ascending order - * from 0...n. - * @param fill Will be filled with numPositions entries. - * @param pRadius Maximal distance from the position a vertex may have to - * be counted in. - * @return Number of unique vertices (n). */ - unsigned int GenerateMappingTable(std::vector<unsigned int>& fill, - ai_real pRadius) const; - -protected: - /** Normal of the sorting plane, normalized. The center is always at (0, 0, 0) */ - aiVector3D mPlaneNormal; - - /** An entry in a spatially sorted position array. Consists of a vertex index, - * its position and its pre-calculated distance from the reference plane */ - struct Entry { - unsigned int mIndex; ///< The vertex referred by this entry - aiVector3D mPosition; ///< Position - ai_real mDistance; ///< Distance of this vertex to the sorting plane - - Entry() AI_NO_EXCEPT - : mIndex( 999999999 ), mPosition(), mDistance( 99999. ) { - // empty - } - Entry( unsigned int pIndex, const aiVector3D& pPosition, ai_real pDistance) - : mIndex( pIndex), mPosition( pPosition), mDistance( pDistance) { - // empty - } - - bool operator < (const Entry& e) const { return mDistance < e.mDistance; } - }; - - // all positions, sorted by distance to the sorting plane - std::vector<Entry> mPositions; -}; - -} // end of namespace Assimp - -#endif // AI_SPATIALSORT_H_INC diff --git a/thirdparty/assimp/include/assimp/StandardShapes.h b/thirdparty/assimp/include/assimp/StandardShapes.h deleted file mode 100644 index c594cb63f4..0000000000 --- a/thirdparty/assimp/include/assimp/StandardShapes.h +++ /dev/null @@ -1,205 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Declares a helper class, "StandardShapes" which generates - * vertices for standard shapes, such as cylinders, cones, spheres .. - */ -#pragma once -#ifndef AI_STANDARD_SHAPES_H_INC -#define AI_STANDARD_SHAPES_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/vector3.h> -#include <vector> - -struct aiMesh; - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** \brief Helper class to generate vertex buffers for standard geometric - * shapes, such as cylinders, cones, boxes, spheres, elipsoids ... . - */ -class ASSIMP_API StandardShapes -{ - // class cannot be instanced - StandardShapes() {} - -public: - - - // ---------------------------------------------------------------- - /** Generates a mesh from an array of vertex positions. - * - * @param positions List of vertex positions - * @param numIndices Number of indices per primitive - * @return Output mesh - */ - static aiMesh* MakeMesh(const std::vector<aiVector3D>& positions, - unsigned int numIndices); - - - static aiMesh* MakeMesh ( unsigned int (*GenerateFunc) - (std::vector<aiVector3D>&)); - - static aiMesh* MakeMesh ( unsigned int (*GenerateFunc) - (std::vector<aiVector3D>&, bool)); - - static aiMesh* MakeMesh ( unsigned int n, void (*GenerateFunc) - (unsigned int,std::vector<aiVector3D>&)); - - // ---------------------------------------------------------------- - /** @brief Generates a hexahedron (cube) - * - * Hexahedrons can be scaled on all axes. - * @param positions Receives output triangles. - * @param polygons If you pass true here quads will be returned - * @return Number of vertices per face - */ - static unsigned int MakeHexahedron( - std::vector<aiVector3D>& positions, - bool polygons = false); - - // ---------------------------------------------------------------- - /** @brief Generates an icosahedron - * - * @param positions Receives output triangles. - * @return Number of vertices per face - */ - static unsigned int MakeIcosahedron( - std::vector<aiVector3D>& positions); - - - // ---------------------------------------------------------------- - /** @brief Generates a dodecahedron - * - * @param positions Receives output triangles - * @param polygons If you pass true here pentagons will be returned - * @return Number of vertices per face - */ - static unsigned int MakeDodecahedron( - std::vector<aiVector3D>& positions, - bool polygons = false); - - - // ---------------------------------------------------------------- - /** @brief Generates an octahedron - * - * @param positions Receives output triangles. - * @return Number of vertices per face - */ - static unsigned int MakeOctahedron( - std::vector<aiVector3D>& positions); - - - // ---------------------------------------------------------------- - /** @brief Generates a tetrahedron - * - * @param positions Receives output triangles. - * @return Number of vertices per face - */ - static unsigned int MakeTetrahedron( - std::vector<aiVector3D>& positions); - - - - // ---------------------------------------------------------------- - /** @brief Generates a sphere - * - * @param tess Number of subdivions - 0 generates a octahedron - * @param positions Receives output triangles. - */ - static void MakeSphere(unsigned int tess, - std::vector<aiVector3D>& positions); - - - // ---------------------------------------------------------------- - /** @brief Generates a cone or a cylinder, either open or closed. - * - * @code - * - * |-----| <- radius 1 - * - * __x__ <- ] ^ - * / \ | height | - * / \ | Y - * / \ | - * / \ | - * /______x______\ <- ] <- end cap - * - * |-------------| <- radius 2 - * - * @endcode - * - * @param height Height of the cone - * @param radius1 First radius - * @param radius2 Second radius - * @param tess Number of triangles. - * @param bOpened true for an open cone/cylinder. An open shape has - * no 'end caps' - * @param positions Receives output triangles - */ - static void MakeCone(ai_real height,ai_real radius1, - ai_real radius2,unsigned int tess, - std::vector<aiVector3D>& positions,bool bOpen= false); - - - // ---------------------------------------------------------------- - /** @brief Generates a flat circle - * - * The circle is constructed in the planned formed by the x,z - * axes of the cartesian coordinate system. - * - * @param radius Radius of the circle - * @param tess Number of segments. - * @param positions Receives output triangles. - */ - static void MakeCircle(ai_real radius, unsigned int tess, - std::vector<aiVector3D>& positions); - -}; -} // ! Assimp - -#endif // !! AI_STANDARD_SHAPES_H_INC diff --git a/thirdparty/assimp/include/assimp/StreamReader.h b/thirdparty/assimp/include/assimp/StreamReader.h deleted file mode 100644 index cb24f1595b..0000000000 --- a/thirdparty/assimp/include/assimp/StreamReader.h +++ /dev/null @@ -1,347 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Defines the StreamReader class which reads data from - * a binary stream with a well-defined endianness. - */ -#pragma once -#ifndef AI_STREAMREADER_H_INCLUDED -#define AI_STREAMREADER_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/IOStream.hpp> -#include <assimp/Defines.h> -#include <assimp/ByteSwapper.h> -#include <assimp/Exceptional.h> - -#include <memory> - -namespace Assimp { - -// -------------------------------------------------------------------------------------------- -/** Wrapper class around IOStream to allow for consistent reading of binary data in both - * little and big endian format. Don't attempt to instance the template directly. Use - * StreamReaderLE to read from a little-endian stream and StreamReaderBE to read from a - * BE stream. The class expects that the endianness of any input data is known at - * compile-time, which should usually be true (#BaseImporter::ConvertToUTF8 implements - * runtime endianness conversions for text files). - * - * XXX switch from unsigned int for size types to size_t? or ptrdiff_t?*/ -// -------------------------------------------------------------------------------------------- -template <bool SwapEndianess = false, bool RuntimeSwitch = false> -class StreamReader { -public: - // FIXME: use these data types throughout the whole library, - // then change them to 64 bit values :-) - using diff = int; - using pos = unsigned int; - - // --------------------------------------------------------------------- - /** Construction from a given stream with a well-defined endianness. - * - * The StreamReader holds a permanent strong reference to the - * stream, which is released upon destruction. - * @param stream Input stream. The stream is not restarted if - * its file pointer is not at 0. Instead, the stream reader - * reads from the current position to the end of the stream. - * @param le If @c RuntimeSwitch is true: specifies whether the - * stream is in little endian byte order. Otherwise the - * endianness information is contained in the @c SwapEndianess - * template parameter and this parameter is meaningless. */ - StreamReader(std::shared_ptr<IOStream> stream, bool le = false) - : stream(stream) - , le(le) - { - ai_assert(stream); - InternBegin(); - } - - // --------------------------------------------------------------------- - StreamReader(IOStream* stream, bool le = false) - : stream(std::shared_ptr<IOStream>(stream)) - , le(le) - { - ai_assert(stream); - InternBegin(); - } - - // --------------------------------------------------------------------- - ~StreamReader() { - delete[] buffer; - } - - // deprecated, use overloaded operator>> instead - - // --------------------------------------------------------------------- - /** Read a float from the stream */ - float GetF4() - { - return Get<float>(); - } - - // --------------------------------------------------------------------- - /** Read a double from the stream */ - double GetF8() { - return Get<double>(); - } - - // --------------------------------------------------------------------- - /** Read a signed 16 bit integer from the stream */ - int16_t GetI2() { - return Get<int16_t>(); - } - - // --------------------------------------------------------------------- - /** Read a signed 8 bit integer from the stream */ - int8_t GetI1() { - return Get<int8_t>(); - } - - // --------------------------------------------------------------------- - /** Read an signed 32 bit integer from the stream */ - int32_t GetI4() { - return Get<int32_t>(); - } - - // --------------------------------------------------------------------- - /** Read a signed 64 bit integer from the stream */ - int64_t GetI8() { - return Get<int64_t>(); - } - - // --------------------------------------------------------------------- - /** Read a unsigned 16 bit integer from the stream */ - uint16_t GetU2() { - return Get<uint16_t>(); - } - - // --------------------------------------------------------------------- - /** Read a unsigned 8 bit integer from the stream */ - uint8_t GetU1() { - return Get<uint8_t>(); - } - - // --------------------------------------------------------------------- - /** Read an unsigned 32 bit integer from the stream */ - uint32_t GetU4() { - return Get<uint32_t>(); - } - - // --------------------------------------------------------------------- - /** Read a unsigned 64 bit integer from the stream */ - uint64_t GetU8() { - return Get<uint64_t>(); - } - - // --------------------------------------------------------------------- - /** Get the remaining stream size (to the end of the stream) */ - unsigned int GetRemainingSize() const { - return (unsigned int)(end - current); - } - - // --------------------------------------------------------------------- - /** Get the remaining stream size (to the current read limit). The - * return value is the remaining size of the stream if no custom - * read limit has been set. */ - unsigned int GetRemainingSizeToLimit() const { - return (unsigned int)(limit - current); - } - - // --------------------------------------------------------------------- - /** Increase the file pointer (relative seeking) */ - void IncPtr(intptr_t plus) { - current += plus; - if (current > limit) { - throw DeadlyImportError("End of file or read limit was reached"); - } - } - - // --------------------------------------------------------------------- - /** Get the current file pointer */ - int8_t* GetPtr() const { - return current; - } - - // --------------------------------------------------------------------- - /** Set current file pointer (Get it from #GetPtr). This is if you - * prefer to do pointer arithmetics on your own or want to copy - * large chunks of data at once. - * @param p The new pointer, which is validated against the size - * limit and buffer boundaries. */ - void SetPtr(int8_t* p) { - current = p; - if (current > limit || current < buffer) { - throw DeadlyImportError("End of file or read limit was reached"); - } - } - - // --------------------------------------------------------------------- - /** Copy n bytes to an external buffer - * @param out Destination for copying - * @param bytes Number of bytes to copy */ - void CopyAndAdvance(void* out, size_t bytes) { - int8_t* ur = GetPtr(); - SetPtr(ur+bytes); // fire exception if eof - - ::memcpy(out,ur,bytes); - } - - // --------------------------------------------------------------------- - /** Get the current offset from the beginning of the file */ - int GetCurrentPos() const { - return (unsigned int)(current - buffer); - } - - void SetCurrentPos(size_t pos) { - SetPtr(buffer + pos); - } - - // --------------------------------------------------------------------- - /** Setup a temporary read limit - * - * @param limit Maximum number of bytes to be read from - * the beginning of the file. Specifying UINT_MAX - * resets the limit to the original end of the stream. - * Returns the previously set limit. */ - unsigned int SetReadLimit(unsigned int _limit) { - unsigned int prev = GetReadLimit(); - if (UINT_MAX == _limit) { - limit = end; - return prev; - } - - limit = buffer + _limit; - if (limit > end) { - throw DeadlyImportError("StreamReader: Invalid read limit"); - } - return prev; - } - - // --------------------------------------------------------------------- - /** Get the current read limit in bytes. Reading over this limit - * accidentally raises an exception. */ - unsigned int GetReadLimit() const { - return (unsigned int)(limit - buffer); - } - - // --------------------------------------------------------------------- - /** Skip to the read limit in bytes. Reading over this limit - * accidentally raises an exception. */ - void SkipToReadLimit() { - current = limit; - } - - // --------------------------------------------------------------------- - /** overload operator>> and allow chaining of >> ops. */ - template <typename T> - StreamReader& operator >> (T& f) { - f = Get<T>(); - return *this; - } - - // --------------------------------------------------------------------- - /** Generic read method. ByteSwap::Swap(T*) *must* be defined */ - template <typename T> - T Get() { - if ( current + sizeof(T) > limit) { - throw DeadlyImportError("End of file or stream limit was reached"); - } - - T f; - ::memcpy (&f, current, sizeof(T)); - Intern::Getter<SwapEndianess,T,RuntimeSwitch>() (&f,le); - current += sizeof(T); - - return f; - } - -private: - // --------------------------------------------------------------------- - void InternBegin() { - if (!stream) { - // in case someone wonders: StreamReader is frequently invoked with - // no prior validation whether the input stream is valid. Since - // no one bothers changing the error message, this message here - // is passed down to the caller and 'unable to open file' - // simply describes best what happened. - throw DeadlyImportError("StreamReader: Unable to open file"); - } - - const size_t s = stream->FileSize() - stream->Tell(); - if (!s) { - throw DeadlyImportError("StreamReader: File is empty or EOF is already reached"); - } - - current = buffer = new int8_t[s]; - const size_t read = stream->Read(current,1,s); - // (read < s) can only happen if the stream was opened in text mode, in which case FileSize() is not reliable - ai_assert(read <= s); - end = limit = &buffer[read-1] + 1; - } - -private: - std::shared_ptr<IOStream> stream; - int8_t *buffer, *current, *end, *limit; - bool le; -}; - -// -------------------------------------------------------------------------------------------- -// `static` StreamReaders. Their byte order is fixed and they might be a little bit faster. -#ifdef AI_BUILD_BIG_ENDIAN - typedef StreamReader<true> StreamReaderLE; - typedef StreamReader<false> StreamReaderBE; -#else - typedef StreamReader<true> StreamReaderBE; - typedef StreamReader<false> StreamReaderLE; -#endif - -// `dynamic` StreamReader. The byte order of the input data is specified in the -// c'tor. This involves runtime branching and might be a little bit slower. -typedef StreamReader<true,true> StreamReaderAny; - -} // end namespace Assimp - -#endif // !! AI_STREAMREADER_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/StreamWriter.h b/thirdparty/assimp/include/assimp/StreamWriter.h deleted file mode 100644 index 489e8adfe3..0000000000 --- a/thirdparty/assimp/include/assimp/StreamWriter.h +++ /dev/null @@ -1,307 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file Defines the StreamWriter class which writes data to - * a binary stream with a well-defined endianness. */ -#pragma once -#ifndef AI_STREAMWRITER_H_INCLUDED -#define AI_STREAMWRITER_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/ByteSwapper.h> -#include <assimp/IOStream.hpp> - -#include <memory> -#include <vector> - -namespace Assimp { - -// -------------------------------------------------------------------------------------------- -/** Wrapper class around IOStream to allow for consistent writing of binary data in both - * little and big endian format. Don't attempt to instance the template directly. Use - * StreamWriterLE to write to a little-endian stream and StreamWriterBE to write to a - * BE stream. Alternatively, there is StreamWriterAny if the endianness of the output - * stream is to be determined at runtime. - */ -// -------------------------------------------------------------------------------------------- -template <bool SwapEndianess = false, bool RuntimeSwitch = false> -class StreamWriter -{ - enum { - INITIAL_CAPACITY = 1024 - }; - -public: - - // --------------------------------------------------------------------- - /** Construction from a given stream with a well-defined endianness. - * - * The StreamReader holds a permanent strong reference to the - * stream, which is released upon destruction. - * @param stream Input stream. The stream is not re-seeked and writing - continues at the current position of the stream cursor. - * @param le If @c RuntimeSwitch is true: specifies whether the - * stream is in little endian byte order. Otherwise the - * endianness information is defined by the @c SwapEndianess - * template parameter and this parameter is meaningless. */ - StreamWriter(std::shared_ptr<IOStream> stream, bool le = false) - : stream(stream) - , le(le) - , cursor() - { - ai_assert(stream); - buffer.reserve(INITIAL_CAPACITY); - } - - // --------------------------------------------------------------------- - StreamWriter(IOStream* stream, bool le = false) - : stream(std::shared_ptr<IOStream>(stream)) - , le(le) - , cursor() - { - ai_assert(stream); - buffer.reserve(INITIAL_CAPACITY); - } - - // --------------------------------------------------------------------- - ~StreamWriter() { - stream->Write(buffer.data(), 1, buffer.size()); - stream->Flush(); - } - -public: - - // --------------------------------------------------------------------- - /** Flush the contents of the internal buffer, and the output IOStream */ - void Flush() - { - stream->Write(buffer.data(), 1, buffer.size()); - stream->Flush(); - buffer.clear(); - cursor = 0; - } - - // --------------------------------------------------------------------- - /** Seek to the given offset / origin in the output IOStream. - * - * Flushes the internal buffer and the output IOStream prior to seeking. */ - aiReturn Seek(size_t pOffset, aiOrigin pOrigin=aiOrigin_SET) - { - Flush(); - return stream->Seek(pOffset, pOrigin); - } - - // --------------------------------------------------------------------- - /** Tell the current position in the output IOStream. - * - * First flushes the internal buffer and the output IOStream. */ - size_t Tell() - { - Flush(); - return stream->Tell(); - } - -public: - - // --------------------------------------------------------------------- - /** Write a float to the stream */ - void PutF4(float f) - { - Put(f); - } - - // --------------------------------------------------------------------- - /** Write a double to the stream */ - void PutF8(double d) { - Put(d); - } - - // --------------------------------------------------------------------- - /** Write a signed 16 bit integer to the stream */ - void PutI2(int16_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a signed 8 bit integer to the stream */ - void PutI1(int8_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write an signed 32 bit integer to the stream */ - void PutI4(int32_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a signed 64 bit integer to the stream */ - void PutI8(int64_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a unsigned 16 bit integer to the stream */ - void PutU2(uint16_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a unsigned 8 bit integer to the stream */ - void PutU1(uint8_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write an unsigned 32 bit integer to the stream */ - void PutU4(uint32_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a unsigned 64 bit integer to the stream */ - void PutU8(uint64_t n) { - Put(n); - } - - // --------------------------------------------------------------------- - /** Write a single character to the stream */ - void PutChar(char c) { - Put(c); - } - - // --------------------------------------------------------------------- - /** Write an aiString to the stream */ - void PutString(const aiString& s) - { - // as Put(T f) below - if (cursor + s.length >= buffer.size()) { - buffer.resize(cursor + s.length); - } - void* dest = &buffer[cursor]; - ::memcpy(dest, s.C_Str(), s.length); - cursor += s.length; - } - - // --------------------------------------------------------------------- - /** Write a std::string to the stream */ - void PutString(const std::string& s) - { - // as Put(T f) below - if (cursor + s.size() >= buffer.size()) { - buffer.resize(cursor + s.size()); - } - void* dest = &buffer[cursor]; - ::memcpy(dest, s.c_str(), s.size()); - cursor += s.size(); - } - -public: - - // --------------------------------------------------------------------- - /** overload operator<< and allow chaining of MM ops. */ - template <typename T> - StreamWriter& operator << (T f) { - Put(f); - return *this; - } - - // --------------------------------------------------------------------- - std::size_t GetCurrentPos() const { - return cursor; - } - - // --------------------------------------------------------------------- - void SetCurrentPos(std::size_t new_cursor) { - cursor = new_cursor; - } - - // --------------------------------------------------------------------- - /** Generic write method. ByteSwap::Swap(T*) *must* be defined */ - template <typename T> - void Put(T f) { - Intern :: Getter<SwapEndianess,T,RuntimeSwitch>() (&f, le); - - if (cursor + sizeof(T) >= buffer.size()) { - buffer.resize(cursor + sizeof(T)); - } - - void* dest = &buffer[cursor]; - - // reinterpret_cast + assignment breaks strict aliasing rules - // and generally causes trouble on platforms such as ARM that - // do not silently ignore alignment faults. - ::memcpy(dest, &f, sizeof(T)); - cursor += sizeof(T); - } - -private: - - std::shared_ptr<IOStream> stream; - bool le; - - std::vector<uint8_t> buffer; - std::size_t cursor; -}; - - -// -------------------------------------------------------------------------------------------- -// `static` StreamWriter. Their byte order is fixed and they might be a little bit faster. -#ifdef AI_BUILD_BIG_ENDIAN - typedef StreamWriter<true> StreamWriterLE; - typedef StreamWriter<false> StreamWriterBE; -#else - typedef StreamWriter<true> StreamWriterBE; - typedef StreamWriter<false> StreamWriterLE; -#endif - -// `dynamic` StreamWriter. The byte order of the input data is specified in the -// c'tor. This involves runtime branching and might be a little bit slower. -typedef StreamWriter<true,true> StreamWriterAny; - -} // end namespace Assimp - -#endif // !! AI_STREAMWriter_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/StringComparison.h b/thirdparty/assimp/include/assimp/StringComparison.h deleted file mode 100644 index d3ca3e9714..0000000000 --- a/thirdparty/assimp/include/assimp/StringComparison.h +++ /dev/null @@ -1,238 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Definition of platform independent string workers: - - ASSIMP_itoa10 - ASSIMP_stricmp - ASSIMP_strincmp - - These functions are not consistently available on all platforms, - or the provided implementations behave too differently. -*/ -#pragma once -#ifndef INCLUDED_AI_STRING_WORKERS_H -#define INCLUDED_AI_STRING_WORKERS_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/ai_assert.h> -#include <assimp/defs.h> -#include <assimp/StringComparison.h> - -#include <string.h> -#include <stdint.h> -#include <string> - -namespace Assimp { - -// ------------------------------------------------------------------------------- -/** @brief itoa with a fixed base 10 - * 'itoa' is not consistently available on all platforms so it is quite useful - * to have a small replacement function here. No need to use a full sprintf() - * if we just want to print a number ... - * @param out Output buffer - * @param max Maximum number of characters to be written, including '\0'. - * This parameter may not be 0. - * @param number Number to be written - * @return Length of the output string, excluding the '\0' - */ -AI_FORCE_INLINE -unsigned int ASSIMP_itoa10( char* out, unsigned int max, int32_t number) { - ai_assert(NULL != out); - - // write the unary minus to indicate we have a negative number - unsigned int written = 1u; - if (number < 0 && written < max) { - *out++ = '-'; - ++written; - number = -number; - } - - // We begin with the largest number that is not zero. - int32_t cur = 1000000000; // 2147483648 - bool mustPrint = false; - while (written < max) { - - const unsigned int digit = number / cur; - if (mustPrint || digit > 0 || 1 == cur) { - // print all future zeroe's from now - mustPrint = true; - - *out++ = '0'+static_cast<char>(digit); - - ++written; - number -= digit*cur; - if (1 == cur) { - break; - } - } - cur /= 10; - } - - // append a terminal zero - *out++ = '\0'; - return written-1; -} - -// ------------------------------------------------------------------------------- -/** @brief itoa with a fixed base 10 (Secure template overload) - * The compiler should choose this function if he or she is able to determine the - * size of the array automatically. - */ -template <size_t length> -AI_FORCE_INLINE -unsigned int ASSIMP_itoa10( char(& out)[length], int32_t number) { - return ASSIMP_itoa10(out,length,number); -} - -// ------------------------------------------------------------------------------- -/** @brief Helper function to do platform independent string comparison. - * - * This is required since stricmp() is not consistently available on - * all platforms. Some platforms use the '_' prefix, others don't even - * have such a function. - * - * @param s1 First input string - * @param s2 Second input string - * @return 0 if the given strings are identical - */ -AI_FORCE_INLINE -int ASSIMP_stricmp(const char *s1, const char *s2) { - ai_assert( NULL != s1 ); - ai_assert( NULL != s2 ); - -#if (defined _MSC_VER) - - return ::_stricmp(s1,s2); -#elif defined( __GNUC__ ) - - return ::strcasecmp(s1,s2); -#else - - char c1, c2; - do { - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } - while ( c1 && (c1 == c2) ); - return c1 - c2; -#endif -} - -// ------------------------------------------------------------------------------- -/** @brief Case independent comparison of two std::strings - * - * @param a First string - * @param b Second string - * @return 0 if a == b - */ -AI_FORCE_INLINE -int ASSIMP_stricmp(const std::string& a, const std::string& b) { - int i = (int)b.length()-(int)a.length(); - return (i ? i : ASSIMP_stricmp(a.c_str(),b.c_str())); -} - -// ------------------------------------------------------------------------------- -/** @brief Helper function to do platform independent string comparison. - * - * This is required since strincmp() is not consistently available on - * all platforms. Some platforms use the '_' prefix, others don't even - * have such a function. - * - * @param s1 First input string - * @param s2 Second input string - * @param n Macimum number of characters to compare - * @return 0 if the given strings are identical - */ -AI_FORCE_INLINE -int ASSIMP_strincmp(const char *s1, const char *s2, unsigned int n) { - ai_assert( NULL != s1 ); - ai_assert( NULL != s2 ); - if ( !n ) { - return 0; - } - -#if (defined _MSC_VER) - - return ::_strnicmp(s1,s2,n); - -#elif defined( __GNUC__ ) - - return ::strncasecmp(s1,s2, n); - -#else - char c1, c2; - unsigned int p = 0; - do - { - if (p++ >= n)return 0; - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } - while ( c1 && (c1 == c2) ); - - return c1 - c2; -#endif -} - - -// ------------------------------------------------------------------------------- -/** @brief Evaluates an integer power - * - * todo: move somewhere where it fits better in than here - */ -AI_FORCE_INLINE -unsigned int integer_pow( unsigned int base, unsigned int power ) { - unsigned int res = 1; - for ( unsigned int i = 0; i < power; ++i ) { - res *= base; - } - - return res; -} - -} // end of namespace - -#endif // ! AI_STRINGCOMPARISON_H_INC diff --git a/thirdparty/assimp/include/assimp/StringUtils.h b/thirdparty/assimp/include/assimp/StringUtils.h deleted file mode 100644 index af481f819e..0000000000 --- a/thirdparty/assimp/include/assimp/StringUtils.h +++ /dev/null @@ -1,148 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -#pragma once -#ifndef INCLUDED_AI_STRINGUTILS_H -#define INCLUDED_AI_STRINGUTILS_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -#include <sstream> -#include <stdarg.h> -#include <cstdlib> - -/// @fn ai_snprintf -/// @brief The portable version of the function snprintf ( C99 standard ), which works on visual studio compilers 2013 and earlier. -/// @param outBuf The buffer to write in -/// @param size The buffer size -/// @param format The format string -/// @param ap The additional arguments. -/// @return The number of written characters if the buffer size was big enough. If an encoding error occurs, a negative number is returned. -#if defined(_MSC_VER) && _MSC_VER < 1900 - - AI_FORCE_INLINE - int c99_ai_vsnprintf(char *outBuf, size_t size, const char *format, va_list ap) { - int count(-1); - if (0 != size) { - count = _vsnprintf_s(outBuf, size, _TRUNCATE, format, ap); - } - if (count == -1) { - count = _vscprintf(format, ap); - } - - return count; - } - - AI_FORCE_INLINE - int ai_snprintf(char *outBuf, size_t size, const char *format, ...) { - int count; - va_list ap; - - va_start(ap, format); - count = c99_ai_vsnprintf(outBuf, size, format, ap); - va_end(ap); - - return count; - } - -#else -# define ai_snprintf snprintf -#endif - -/// @fn to_string -/// @brief The portable version of to_string ( some gcc-versions on embedded devices are not supporting this). -/// @param value The value to write into the std::string. -/// @return The value as a std::string -template <typename T> -AI_FORCE_INLINE -std::string to_string( T value ) { - std::ostringstream os; - os << value; - - return os.str(); -} - -/// @fn ai_strtof -/// @brief The portable version of strtof. -/// @param begin The first character of the string. -/// @param end The last character -/// @return The float value, 0.0f in cas of an error. -AI_FORCE_INLINE -float ai_strtof( const char *begin, const char *end ) { - if ( nullptr == begin ) { - return 0.0f; - } - float val( 0.0f ); - if ( nullptr == end ) { - val = static_cast< float >( ::atof( begin ) ); - } else { - std::string::size_type len( end - begin ); - std::string token( begin, len ); - val = static_cast< float >( ::atof( token.c_str() ) ); - } - - return val; -} - -/// @fn DecimalToHexa -/// @brief The portable to convert a decimal value into a hexadecimal string. -/// @param toConvert Value to convert -/// @return The hexadecimal string, is empty in case of an error. -template<class T> -AI_FORCE_INLINE -std::string DecimalToHexa( T toConvert ) { - std::string result; - std::stringstream ss; - ss << std::hex << toConvert; - ss >> result; - - for ( size_t i = 0; i < result.size(); ++i ) { - result[ i ] = toupper( result[ i ] ); - } - - return result; -} - -#endif // INCLUDED_AI_STRINGUTILS_H diff --git a/thirdparty/assimp/include/assimp/Subdivision.h b/thirdparty/assimp/include/assimp/Subdivision.h deleted file mode 100644 index e9450267ec..0000000000 --- a/thirdparty/assimp/include/assimp/Subdivision.h +++ /dev/null @@ -1,134 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Defines a helper class to evaluate subdivision surfaces.*/ -#pragma once -#ifndef AI_SUBDISIVION_H_INC -#define AI_SUBDISIVION_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -struct aiMesh; - -namespace Assimp { - -// ------------------------------------------------------------------------------ -/** Helper class to evaluate subdivision surfaces. Different algorithms - * are provided for choice. */ -// ------------------------------------------------------------------------------ -class ASSIMP_API Subdivider { -public: - - /** Enumerates all supported subvidision algorithms */ - enum Algorithm { - CATMULL_CLARKE = 0x1 - }; - - virtual ~Subdivider(); - - // --------------------------------------------------------------- - /** Create a subdivider of a specific type - * - * @param algo Algorithm to be used for subdivision - * @return Subdivider instance. */ - static Subdivider* Create (Algorithm algo); - - // --------------------------------------------------------------- - /** Subdivide a mesh using the selected algorithm - * - * @param mesh First mesh to be subdivided. Must be in verbose - * format. - * @param out Receives the output mesh, allocated by me. - * @param num Number of subdivisions to perform. - * @param discard_input If true is passed, the input mesh is - * deleted after the subdivision is complete. This can - * improve performance because it allows the optimization - * to reuse the existing mesh for intermediate results. - * @pre out!=mesh*/ - virtual void Subdivide ( aiMesh* mesh, - aiMesh*& out, unsigned int num, - bool discard_input = false) = 0; - - // --------------------------------------------------------------- - /** Subdivide multiple meshes using the selected algorithm. This - * avoids erroneous smoothing on objects consisting of multiple - * per-material meshes. Usually, most 3d modellers smooth on a - * per-object base, regardless the materials assigned to the - * meshes. - * - * @param smesh Array of meshes to be subdivided. Must be in - * verbose format. - * @param nmesh Number of meshes in smesh. - * @param out Receives the output meshes. The array must be - * sufficiently large (at least @c nmesh elements) and may not - * overlap the input array. Output meshes map one-to-one to - * their corresponding input meshes. The meshes are allocated - * by the function. - * @param discard_input If true is passed, input meshes are - * deleted after the subdivision is complete. This can - * improve performance because it allows the optimization - * of reusing existing meshes for intermediate results. - * @param num Number of subdivisions to perform. - * @pre nmesh != 0, smesh and out may not overlap*/ - virtual void Subdivide ( - aiMesh** smesh, - size_t nmesh, - aiMesh** out, - unsigned int num, - bool discard_input = false) = 0; - -}; - -inline -Subdivider::~Subdivider() { - // empty -} - -} // end namespace Assimp - - -#endif // !! AI_SUBDISIVION_H_INC - diff --git a/thirdparty/assimp/include/assimp/TinyFormatter.h b/thirdparty/assimp/include/assimp/TinyFormatter.h deleted file mode 100644 index 6227e42c52..0000000000 --- a/thirdparty/assimp/include/assimp/TinyFormatter.h +++ /dev/null @@ -1,158 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file TinyFormatter.h - * @brief Utility to format log messages more easily. Introduced - * to get rid of the boost::format dependency. Much slinker, - * basically just extends stringstream. - */ -#pragma once -#ifndef INCLUDED_TINY_FORMATTER_H -#define INCLUDED_TINY_FORMATTER_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <sstream> - -namespace Assimp { -namespace Formatter { - -// ------------------------------------------------------------------------------------------------ -/** stringstream utility. Usage: - * @code - * void writelog(const std::string&s); - * void writelog(const std::wstring&s); - * ... - * writelog(format()<< "hi! this is a number: " << 4); - * writelog(wformat()<< L"hi! this is a number: " << 4); - * - * @endcode */ -template < typename T, - typename CharTraits = std::char_traits<T>, - typename Allocator = std::allocator<T> > -class basic_formatter { -public: - typedef class std::basic_string<T,CharTraits,Allocator> string; - typedef class std::basic_ostringstream<T,CharTraits,Allocator> stringstream; - - basic_formatter() { - // empty - } - - /* Allow basic_formatter<T>'s to be used almost interchangeably - * with std::(w)string or const (w)char* arguments because the - * conversion c'tor is called implicitly. */ - template <typename TT> - basic_formatter(const TT& sin) { - underlying << sin; - } - - - // The problem described here: - // https://sourceforge.net/tracker/?func=detail&atid=1067632&aid=3358562&group_id=226462 - // can also cause trouble here. Apparently, older gcc versions sometimes copy temporaries - // being bound to const ref& function parameters. Copying streams is not permitted, though. - // This workaround avoids this by manually specifying a copy ctor. -#if !defined(__GNUC__) || !defined(__APPLE__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) - explicit basic_formatter(const basic_formatter& other) { - underlying << (string)other; - } -#endif - - operator string () const { - return underlying.str(); - } - - /* note - this is declared const because binding temporaries does only - * work for const references, so many function prototypes will - * include const basic_formatter<T>& s but might still want to - * modify the formatted string without the need for a full copy.*/ - template <typename TToken> - const basic_formatter& operator << (const TToken& s) const { - underlying << s; - return *this; - } - - template <typename TToken> - basic_formatter& operator << (const TToken& s) { - underlying << s; - return *this; - } - - - // comma operator overloaded as well, choose your preferred way. - template <typename TToken> - const basic_formatter& operator, (const TToken& s) const { - underlying << s; - return *this; - } - - template <typename TToken> - basic_formatter& operator, (const TToken& s) { - underlying << s; - return *this; - } - - // Fix for MSVC8 - // See https://sourceforge.net/projects/assimp/forums/forum/817654/topic/4372824 - template <typename TToken> - basic_formatter& operator, (TToken& s) { - underlying << s; - return *this; - } - - -private: - mutable stringstream underlying; -}; - - -typedef basic_formatter< char > format; -typedef basic_formatter< wchar_t > wformat; - -} // ! namespace Formatter - -} // ! namespace Assimp - -#endif diff --git a/thirdparty/assimp/include/assimp/Vertex.h b/thirdparty/assimp/include/assimp/Vertex.h deleted file mode 100644 index 5e63db5fe4..0000000000 --- a/thirdparty/assimp/include/assimp/Vertex.h +++ /dev/null @@ -1,297 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ -/** @file Defines a helper class to represent an interleaved vertex - along with arithmetic operations to support vertex operations - such as subdivision, smoothing etc. - - While the code is kept as general as possible, arithmetic operations - that are not currently well-defined (and would cause compile errors - due to missing operators in the math library), are commented. - */ -#pragma once -#ifndef AI_VERTEX_H_INC -#define AI_VERTEX_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/vector3.h> -#include <assimp/mesh.h> -#include <assimp/ai_assert.h> - -#include <functional> - -namespace Assimp { - - /////////////////////////////////////////////////////////////////////////// - // std::plus-family operates on operands with identical types - we need to - // support all the (vectype op float) combinations in vector maths. - // Providing T(float) would open the way to endless implicit conversions. - /////////////////////////////////////////////////////////////////////////// - namespace Intern { - template <typename T0, typename T1, typename TRES = T0> struct plus { - TRES operator() (const T0& t0, const T1& t1) const { - return t0+t1; - } - }; - template <typename T0, typename T1, typename TRES = T0> struct minus { - TRES operator() (const T0& t0, const T1& t1) const { - return t0-t1; - } - }; - template <typename T0, typename T1, typename TRES = T0> struct multiplies { - TRES operator() (const T0& t0, const T1& t1) const { - return t0*t1; - } - }; - template <typename T0, typename T1, typename TRES = T0> struct divides { - TRES operator() (const T0& t0, const T1& t1) const { - return t0/t1; - } - }; - } - -// ------------------------------------------------------------------------------------------------ -/** Intermediate description a vertex with all possible components. Defines a full set of - * operators, so you may use such a 'Vertex' in basic arithmetics. All operators are applied - * to *all* vertex components equally. This is useful for stuff like interpolation - * or subdivision, but won't work if special handling is required for some vertex components. */ -// ------------------------------------------------------------------------------------------------ -class Vertex { - friend Vertex operator + (const Vertex&,const Vertex&); - friend Vertex operator - (const Vertex&,const Vertex&); - friend Vertex operator * (const Vertex&,ai_real); - friend Vertex operator / (const Vertex&,ai_real); - friend Vertex operator * (ai_real, const Vertex&); - -public: - Vertex() {} - - // ---------------------------------------------------------------------------- - /** Extract a particular vertex from a mesh and interleave all components */ - explicit Vertex(const aiMesh* msh, unsigned int idx) { - ai_assert(idx < msh->mNumVertices); - position = msh->mVertices[idx]; - - if (msh->HasNormals()) { - normal = msh->mNormals[idx]; - } - - if (msh->HasTangentsAndBitangents()) { - tangent = msh->mTangents[idx]; - bitangent = msh->mBitangents[idx]; - } - - for (unsigned int i = 0; msh->HasTextureCoords(i); ++i) { - texcoords[i] = msh->mTextureCoords[i][idx]; - } - - for (unsigned int i = 0; msh->HasVertexColors(i); ++i) { - colors[i] = msh->mColors[i][idx]; - } - } - - // ---------------------------------------------------------------------------- - /** Extract a particular vertex from a anim mesh and interleave all components */ - explicit Vertex(const aiAnimMesh* msh, unsigned int idx) { - ai_assert(idx < msh->mNumVertices); - position = msh->mVertices[idx]; - - if (msh->HasNormals()) { - normal = msh->mNormals[idx]; - } - - if (msh->HasTangentsAndBitangents()) { - tangent = msh->mTangents[idx]; - bitangent = msh->mBitangents[idx]; - } - - for (unsigned int i = 0; msh->HasTextureCoords(i); ++i) { - texcoords[i] = msh->mTextureCoords[i][idx]; - } - - for (unsigned int i = 0; msh->HasVertexColors(i); ++i) { - colors[i] = msh->mColors[i][idx]; - } - } - - Vertex& operator += (const Vertex& v) { - *this = *this+v; - return *this; - } - - Vertex& operator -= (const Vertex& v) { - *this = *this-v; - return *this; - } - - Vertex& operator *= (ai_real v) { - *this = *this*v; - return *this; - } - - Vertex& operator /= (ai_real v) { - *this = *this/v; - return *this; - } - - // ---------------------------------------------------------------------------- - /** Convert back to non-interleaved storage */ - void SortBack(aiMesh* out, unsigned int idx) const { - ai_assert(idx<out->mNumVertices); - out->mVertices[idx] = position; - - if (out->HasNormals()) { - out->mNormals[idx] = normal; - } - - if (out->HasTangentsAndBitangents()) { - out->mTangents[idx] = tangent; - out->mBitangents[idx] = bitangent; - } - - for(unsigned int i = 0; out->HasTextureCoords(i); ++i) { - out->mTextureCoords[i][idx] = texcoords[i]; - } - - for(unsigned int i = 0; out->HasVertexColors(i); ++i) { - out->mColors[i][idx] = colors[i]; - } - } - -private: - - // ---------------------------------------------------------------------------- - /** Construct from two operands and a binary operation to combine them */ - template <template <typename t> class op> static Vertex BinaryOp(const Vertex& v0, const Vertex& v1) { - // this is a heavy task for the compiler to optimize ... *pray* - - Vertex res; - res.position = op<aiVector3D>()(v0.position,v1.position); - res.normal = op<aiVector3D>()(v0.normal,v1.normal); - res.tangent = op<aiVector3D>()(v0.tangent,v1.tangent); - res.bitangent = op<aiVector3D>()(v0.bitangent,v1.bitangent); - - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - res.texcoords[i] = op<aiVector3D>()(v0.texcoords[i],v1.texcoords[i]); - } - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i) { - res.colors[i] = op<aiColor4D>()(v0.colors[i],v1.colors[i]); - } - return res; - } - - // ---------------------------------------------------------------------------- - /** This time binary arithmetics of v0 with a floating-point number */ - template <template <typename, typename, typename> class op> static Vertex BinaryOp(const Vertex& v0, ai_real f) { - // this is a heavy task for the compiler to optimize ... *pray* - - Vertex res; - res.position = op<aiVector3D,ai_real,aiVector3D>()(v0.position,f); - res.normal = op<aiVector3D,ai_real,aiVector3D>()(v0.normal,f); - res.tangent = op<aiVector3D,ai_real,aiVector3D>()(v0.tangent,f); - res.bitangent = op<aiVector3D,ai_real,aiVector3D>()(v0.bitangent,f); - - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - res.texcoords[i] = op<aiVector3D,ai_real,aiVector3D>()(v0.texcoords[i],f); - } - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i) { - res.colors[i] = op<aiColor4D,ai_real,aiColor4D>()(v0.colors[i],f); - } - return res; - } - - // ---------------------------------------------------------------------------- - /** This time binary arithmetics of v0 with a floating-point number */ - template <template <typename, typename, typename> class op> static Vertex BinaryOp(ai_real f, const Vertex& v0) { - // this is a heavy task for the compiler to optimize ... *pray* - - Vertex res; - res.position = op<ai_real,aiVector3D,aiVector3D>()(f,v0.position); - res.normal = op<ai_real,aiVector3D,aiVector3D>()(f,v0.normal); - res.tangent = op<ai_real,aiVector3D,aiVector3D>()(f,v0.tangent); - res.bitangent = op<ai_real,aiVector3D,aiVector3D>()(f,v0.bitangent); - - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { - res.texcoords[i] = op<ai_real,aiVector3D,aiVector3D>()(f,v0.texcoords[i]); - } - for (unsigned int i = 0; i < AI_MAX_NUMBER_OF_COLOR_SETS; ++i) { - res.colors[i] = op<ai_real,aiColor4D,aiColor4D>()(f,v0.colors[i]); - } - return res; - } - -public: - - aiVector3D position; - aiVector3D normal; - aiVector3D tangent, bitangent; - - aiVector3D texcoords[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - aiColor4D colors[AI_MAX_NUMBER_OF_COLOR_SETS]; -}; - -// ------------------------------------------------------------------------------------------------ -AI_FORCE_INLINE Vertex operator + (const Vertex& v0,const Vertex& v1) { - return Vertex::BinaryOp<std::plus>(v0,v1); -} - -AI_FORCE_INLINE Vertex operator - (const Vertex& v0,const Vertex& v1) { - return Vertex::BinaryOp<std::minus>(v0,v1); -} - -AI_FORCE_INLINE Vertex operator * (const Vertex& v0,ai_real f) { - return Vertex::BinaryOp<Intern::multiplies>(v0,f); -} - -AI_FORCE_INLINE Vertex operator / (const Vertex& v0,ai_real f) { - return Vertex::BinaryOp<Intern::multiplies>(v0,1.f/f); -} - -AI_FORCE_INLINE Vertex operator * (ai_real f,const Vertex& v0) { - return Vertex::BinaryOp<Intern::multiplies>(f,v0); -} - -} - -#endif // AI_VERTEX_H_INC diff --git a/thirdparty/assimp/include/assimp/XMLTools.h b/thirdparty/assimp/include/assimp/XMLTools.h deleted file mode 100644 index 95f12cdebf..0000000000 --- a/thirdparty/assimp/include/assimp/XMLTools.h +++ /dev/null @@ -1,88 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -#pragma once -#ifndef INCLUDED_ASSIMP_XML_TOOLS_H -#define INCLUDED_ASSIMP_XML_TOOLS_H - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <string> - -namespace Assimp { - // XML escape the 5 XML special characters (",',<,> and &) in |data| - // Based on http://stackoverflow.com/questions/5665231 - std::string XMLEscape(const std::string& data) { - std::string buffer; - - const size_t size = data.size(); - buffer.reserve(size + size / 8); - for(size_t i = 0; i < size; ++i) { - const char c = data[i]; - switch(c) { - case '&' : - buffer.append("&"); - break; - case '\"': - buffer.append("""); - break; - case '\'': - buffer.append("'"); - break; - case '<' : - buffer.append("<"); - break; - case '>' : - buffer.append(">"); - break; - default: - buffer.append(&c, 1); - break; - } - } - return buffer; - } -} - -#endif // INCLUDED_ASSIMP_XML_TOOLS_H diff --git a/thirdparty/assimp/include/assimp/aabb.h b/thirdparty/assimp/include/assimp/aabb.h deleted file mode 100644 index 83bb62256b..0000000000 --- a/thirdparty/assimp/include/assimp/aabb.h +++ /dev/null @@ -1,79 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -#pragma once -#ifndef AI_AABB_H_INC -#define AI_AABB_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/vector3.h> - -struct aiAABB { - C_STRUCT aiVector3D mMin; - C_STRUCT aiVector3D mMax; - -#ifdef __cplusplus - - aiAABB() - : mMin() - , mMax() { - // empty - } - - aiAABB(const aiVector3D &min, const aiVector3D &max ) - : mMin(min) - , mMax(max) { - // empty - } - - ~aiAABB() { - // empty - } - -#endif // __cplusplus - -}; - - -#endif // AI_AABB_H_INC diff --git a/thirdparty/assimp/include/assimp/ai_assert.h b/thirdparty/assimp/include/assimp/ai_assert.h deleted file mode 100644 index 2b32b01d3e..0000000000 --- a/thirdparty/assimp/include/assimp/ai_assert.h +++ /dev/null @@ -1,61 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -#pragma once -#ifndef AI_ASSERT_H_INC -#define AI_ASSERT_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef ASSIMP_BUILD_DEBUG -# include <assert.h> -# define ai_assert(expression) assert( expression ) -# define ai_assert_entry() assert( false ) -#else -# define ai_assert(expression) -# define ai_assert_entry() -#endif // ASSIMP_BUILD_DEBUG - -#endif // AI_ASSERT_H_INC - diff --git a/thirdparty/assimp/include/assimp/anim.h b/thirdparty/assimp/include/assimp/anim.h deleted file mode 100644 index e208b11adb..0000000000 --- a/thirdparty/assimp/include/assimp/anim.h +++ /dev/null @@ -1,581 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** - * @file anim.h - * @brief Defines the data structures in which the imported animations - * are returned. - */ -#pragma once -#ifndef AI_ANIM_H_INC -#define AI_ANIM_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <assimp/quaternion.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -/** A time-value pair specifying a certain 3D vector for the given time. */ -struct aiVectorKey -{ - /** The time of this key */ - double mTime; - - /** The value of this key */ - C_STRUCT aiVector3D mValue; - -#ifdef __cplusplus - - /// @brief The default constructor. - aiVectorKey() AI_NO_EXCEPT - : mTime( 0.0 ) - , mValue() { - // empty - } - - /// @brief Construction from a given time and key value. - - aiVectorKey(double time, const aiVector3D& value) - : mTime( time ) - , mValue( value ) { - // empty - } - - typedef aiVector3D elem_type; - - // Comparison operators. For use with std::find(); - bool operator == (const aiVectorKey& rhs) const { - return rhs.mValue == this->mValue; - } - bool operator != (const aiVectorKey& rhs ) const { - return rhs.mValue != this->mValue; - } - - // Relational operators. For use with std::sort(); - bool operator < (const aiVectorKey& rhs ) const { - return mTime < rhs.mTime; - } - bool operator > (const aiVectorKey& rhs ) const { - return mTime > rhs.mTime; - } -#endif // __cplusplus -}; - -// --------------------------------------------------------------------------- -/** A time-value pair specifying a rotation for the given time. - * Rotations are expressed with quaternions. */ -struct aiQuatKey -{ - /** The time of this key */ - double mTime; - - /** The value of this key */ - C_STRUCT aiQuaternion mValue; - -#ifdef __cplusplus - aiQuatKey() AI_NO_EXCEPT - : mTime( 0.0 ) - , mValue() { - // empty - } - - /** Construction from a given time and key value */ - aiQuatKey(double time, const aiQuaternion& value) - : mTime (time) - , mValue (value) - {} - - typedef aiQuaternion elem_type; - - // Comparison operators. For use with std::find(); - bool operator == (const aiQuatKey& rhs ) const { - return rhs.mValue == this->mValue; - } - bool operator != (const aiQuatKey& rhs ) const { - return rhs.mValue != this->mValue; - } - - // Relational operators. For use with std::sort(); - bool operator < (const aiQuatKey& rhs ) const { - return mTime < rhs.mTime; - } - bool operator > (const aiQuatKey& rhs ) const { - return mTime > rhs.mTime; - } -#endif -}; - -// --------------------------------------------------------------------------- -/** Binds a anim-mesh to a specific point in time. */ -struct aiMeshKey -{ - /** The time of this key */ - double mTime; - - /** Index into the aiMesh::mAnimMeshes array of the - * mesh corresponding to the #aiMeshAnim hosting this - * key frame. The referenced anim mesh is evaluated - * according to the rules defined in the docs for #aiAnimMesh.*/ - unsigned int mValue; - -#ifdef __cplusplus - - aiMeshKey() AI_NO_EXCEPT - : mTime(0.0) - , mValue(0) - { - } - - /** Construction from a given time and key value */ - aiMeshKey(double time, const unsigned int value) - : mTime (time) - , mValue (value) - {} - - typedef unsigned int elem_type; - - // Comparison operators. For use with std::find(); - bool operator == (const aiMeshKey& o) const { - return o.mValue == this->mValue; - } - bool operator != (const aiMeshKey& o) const { - return o.mValue != this->mValue; - } - - // Relational operators. For use with std::sort(); - bool operator < (const aiMeshKey& o) const { - return mTime < o.mTime; - } - bool operator > (const aiMeshKey& o) const { - return mTime > o.mTime; - } - -#endif -}; - -// --------------------------------------------------------------------------- -/** Binds a morph anim mesh to a specific point in time. */ -struct aiMeshMorphKey -{ - /** The time of this key */ - double mTime; - - /** The values and weights at the time of this key */ - unsigned int *mValues; - double *mWeights; - - /** The number of values and weights */ - unsigned int mNumValuesAndWeights; -#ifdef __cplusplus - aiMeshMorphKey() AI_NO_EXCEPT - : mTime(0.0) - , mValues(nullptr) - , mWeights(nullptr) - , mNumValuesAndWeights(0) - { - - } - - ~aiMeshMorphKey() - { - if (mNumValuesAndWeights && mValues && mWeights) { - delete [] mValues; - delete [] mWeights; - } - } -#endif -}; - -// --------------------------------------------------------------------------- -/** Defines how an animation channel behaves outside the defined time - * range. This corresponds to aiNodeAnim::mPreState and - * aiNodeAnim::mPostState.*/ -enum aiAnimBehaviour -{ - /** The value from the default node transformation is taken*/ - aiAnimBehaviour_DEFAULT = 0x0, - - /** The nearest key value is used without interpolation */ - aiAnimBehaviour_CONSTANT = 0x1, - - /** The value of the nearest two keys is linearly - * extrapolated for the current time value.*/ - aiAnimBehaviour_LINEAR = 0x2, - - /** The animation is repeated. - * - * If the animation key go from n to m and the current - * time is t, use the value at (t-n) % (|m-n|).*/ - aiAnimBehaviour_REPEAT = 0x3, - - /** This value is not used, it is just here to force the - * the compiler to map this enum to a 32 Bit integer */ -#ifndef SWIG - _aiAnimBehaviour_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** Describes the animation of a single node. The name specifies the - * bone/node which is affected by this animation channel. The keyframes - * are given in three separate series of values, one each for position, - * rotation and scaling. The transformation matrix computed from these - * values replaces the node's original transformation matrix at a - * specific time. - * This means all keys are absolute and not relative to the bone default pose. - * The order in which the transformations are applied is - * - as usual - scaling, rotation, translation. - * - * @note All keys are returned in their correct, chronological order. - * Duplicate keys don't pass the validation step. Most likely there - * will be no negative time values, but they are not forbidden also ( so - * implementations need to cope with them! ) */ -struct aiNodeAnim { - /** The name of the node affected by this animation. The node - * must exist and it must be unique.*/ - C_STRUCT aiString mNodeName; - - /** The number of position keys */ - unsigned int mNumPositionKeys; - - /** The position keys of this animation channel. Positions are - * specified as 3D vector. The array is mNumPositionKeys in size. - * - * If there are position keys, there will also be at least one - * scaling and one rotation key.*/ - C_STRUCT aiVectorKey* mPositionKeys; - - /** The number of rotation keys */ - unsigned int mNumRotationKeys; - - /** The rotation keys of this animation channel. Rotations are - * given as quaternions, which are 4D vectors. The array is - * mNumRotationKeys in size. - * - * If there are rotation keys, there will also be at least one - * scaling and one position key. */ - C_STRUCT aiQuatKey* mRotationKeys; - - /** The number of scaling keys */ - unsigned int mNumScalingKeys; - - /** The scaling keys of this animation channel. Scalings are - * specified as 3D vector. The array is mNumScalingKeys in size. - * - * If there are scaling keys, there will also be at least one - * position and one rotation key.*/ - C_STRUCT aiVectorKey* mScalingKeys; - - /** Defines how the animation behaves before the first - * key is encountered. - * - * The default value is aiAnimBehaviour_DEFAULT (the original - * transformation matrix of the affected node is used).*/ - C_ENUM aiAnimBehaviour mPreState; - - /** Defines how the animation behaves after the last - * key was processed. - * - * The default value is aiAnimBehaviour_DEFAULT (the original - * transformation matrix of the affected node is taken).*/ - C_ENUM aiAnimBehaviour mPostState; - -#ifdef __cplusplus - aiNodeAnim() AI_NO_EXCEPT - : mNumPositionKeys( 0 ) - , mPositionKeys( nullptr ) - , mNumRotationKeys( 0 ) - , mRotationKeys( nullptr ) - , mNumScalingKeys( 0 ) - , mScalingKeys( nullptr ) - , mPreState( aiAnimBehaviour_DEFAULT ) - , mPostState( aiAnimBehaviour_DEFAULT ) { - // empty - } - - ~aiNodeAnim() { - delete [] mPositionKeys; - delete [] mRotationKeys; - delete [] mScalingKeys; - } -#endif // __cplusplus -}; - -// --------------------------------------------------------------------------- -/** Describes vertex-based animations for a single mesh or a group of - * meshes. Meshes carry the animation data for each frame in their - * aiMesh::mAnimMeshes array. The purpose of aiMeshAnim is to - * define keyframes linking each mesh attachment to a particular - * point in time. */ -struct aiMeshAnim -{ - /** Name of the mesh to be animated. An empty string is not allowed, - * animated meshes need to be named (not necessarily uniquely, - * the name can basically serve as wild-card to select a group - * of meshes with similar animation setup)*/ - C_STRUCT aiString mName; - - /** Size of the #mKeys array. Must be 1, at least. */ - unsigned int mNumKeys; - - /** Key frames of the animation. May not be NULL. */ - C_STRUCT aiMeshKey* mKeys; - -#ifdef __cplusplus - - aiMeshAnim() AI_NO_EXCEPT - : mNumKeys() - , mKeys() - {} - - ~aiMeshAnim() - { - delete[] mKeys; - } - -#endif -}; - -// --------------------------------------------------------------------------- -/** Describes a morphing animation of a given mesh. */ -struct aiMeshMorphAnim -{ - /** Name of the mesh to be animated. An empty string is not allowed, - * animated meshes need to be named (not necessarily uniquely, - * the name can basically serve as wildcard to select a group - * of meshes with similar animation setup)*/ - C_STRUCT aiString mName; - - /** Size of the #mKeys array. Must be 1, at least. */ - unsigned int mNumKeys; - - /** Key frames of the animation. May not be NULL. */ - C_STRUCT aiMeshMorphKey* mKeys; - -#ifdef __cplusplus - - aiMeshMorphAnim() AI_NO_EXCEPT - : mNumKeys() - , mKeys() - {} - - ~aiMeshMorphAnim() - { - delete[] mKeys; - } - -#endif -}; - -// --------------------------------------------------------------------------- -/** An animation consists of key-frame data for a number of nodes. For - * each node affected by the animation a separate series of data is given.*/ -struct aiAnimation { - /** The name of the animation. If the modeling package this data was - * exported from does support only a single animation channel, this - * name is usually empty (length is zero). */ - C_STRUCT aiString mName; - - /** Duration of the animation in ticks. */ - double mDuration; - - /** Ticks per second. 0 if not specified in the imported file */ - double mTicksPerSecond; - - /** The number of bone animation channels. Each channel affects - * a single node. */ - unsigned int mNumChannels; - - /** The node animation channels. Each channel affects a single node. - * The array is mNumChannels in size. */ - C_STRUCT aiNodeAnim** mChannels; - - - /** The number of mesh animation channels. Each channel affects - * a single mesh and defines vertex-based animation. */ - unsigned int mNumMeshChannels; - - /** The mesh animation channels. Each channel affects a single mesh. - * The array is mNumMeshChannels in size. */ - C_STRUCT aiMeshAnim** mMeshChannels; - - /** The number of mesh animation channels. Each channel affects - * a single mesh and defines morphing animation. */ - unsigned int mNumMorphMeshChannels; - - /** The morph mesh animation channels. Each channel affects a single mesh. - * The array is mNumMorphMeshChannels in size. */ - C_STRUCT aiMeshMorphAnim **mMorphMeshChannels; - -#ifdef __cplusplus - aiAnimation() AI_NO_EXCEPT - : mDuration(-1.) - , mTicksPerSecond(0.) - , mNumChannels(0) - , mChannels(nullptr) - , mNumMeshChannels(0) - , mMeshChannels(nullptr) - , mNumMorphMeshChannels(0) - , mMorphMeshChannels(nullptr) { - // empty - } - - ~aiAnimation() { - // DO NOT REMOVE THIS ADDITIONAL CHECK - if ( mNumChannels && mChannels ) { - for( unsigned int a = 0; a < mNumChannels; a++) { - delete mChannels[ a ]; - } - - delete [] mChannels; - } - if (mNumMeshChannels && mMeshChannels) { - for( unsigned int a = 0; a < mNumMeshChannels; a++) { - delete mMeshChannels[a]; - } - - delete [] mMeshChannels; - } - if (mNumMorphMeshChannels && mMorphMeshChannels) { - for( unsigned int a = 0; a < mNumMorphMeshChannels; a++) { - delete mMorphMeshChannels[a]; - } - - delete [] mMorphMeshChannels; - } - } -#endif // __cplusplus -}; - -#ifdef __cplusplus - -} - -/// @brief Some C++ utilities for inter- and extrapolation -namespace Assimp { - -// --------------------------------------------------------------------------- -/** - * @brief CPP-API: Utility class to simplify interpolations of various data types. - * - * The type of interpolation is chosen automatically depending on the - * types of the arguments. - */ -template <typename T> -struct Interpolator -{ - // ------------------------------------------------------------------ - /** @brief Get the result of the interpolation between a,b. - * - * The interpolation algorithm depends on the type of the operands. - * aiQuaternion's and aiQuatKey's SLERP, the rest does a simple - * linear interpolation. */ - void operator () (T& out,const T& a, const T& b, ai_real d) const { - out = a + (b-a)*d; - } -}; // ! Interpolator <T> - -//! @cond Never - -template <> -struct Interpolator <aiQuaternion> { - void operator () (aiQuaternion& out,const aiQuaternion& a, - const aiQuaternion& b, ai_real d) const - { - aiQuaternion::Interpolate(out,a,b,d); - } -}; // ! Interpolator <aiQuaternion> - -template <> -struct Interpolator <unsigned int> { - void operator () (unsigned int& out,unsigned int a, - unsigned int b, ai_real d) const - { - out = d>0.5f ? b : a; - } -}; // ! Interpolator <aiQuaternion> - -template <> -struct Interpolator<aiVectorKey> { - void operator () (aiVector3D& out,const aiVectorKey& a, - const aiVectorKey& b, ai_real d) const - { - Interpolator<aiVector3D> ipl; - ipl(out,a.mValue,b.mValue,d); - } -}; // ! Interpolator <aiVectorKey> - -template <> -struct Interpolator<aiQuatKey> { - void operator () (aiQuaternion& out, const aiQuatKey& a, - const aiQuatKey& b, ai_real d) const - { - Interpolator<aiQuaternion> ipl; - ipl(out,a.mValue,b.mValue,d); - } -}; // ! Interpolator <aiQuatKey> - -template <> -struct Interpolator<aiMeshKey> { - void operator () (unsigned int& out, const aiMeshKey& a, - const aiMeshKey& b, ai_real d) const - { - Interpolator<unsigned int> ipl; - ipl(out,a.mValue,b.mValue,d); - } -}; // ! Interpolator <aiQuatKey> - -//! @endcond - -} // ! end namespace Assimp - -#endif // __cplusplus - -#endif // AI_ANIM_H_INC diff --git a/thirdparty/assimp/include/assimp/camera.h b/thirdparty/assimp/include/assimp/camera.h deleted file mode 100644 index adb749ff59..0000000000 --- a/thirdparty/assimp/include/assimp/camera.h +++ /dev/null @@ -1,225 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file camera.h - * @brief Defines the aiCamera data structure - */ - -#pragma once -#ifndef AI_CAMERA_H_INC -#define AI_CAMERA_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include "types.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -/** Helper structure to describe a virtual camera. - * - * Cameras have a representation in the node graph and can be animated. - * An important aspect is that the camera itself is also part of the - * scene-graph. This means, any values such as the look-at vector are not - * *absolute*, they're <b>relative</b> to the coordinate system defined - * by the node which corresponds to the camera. This allows for camera - * animations. For static cameras parameters like the 'look-at' or 'up' vectors - * are usually specified directly in aiCamera, but beware, they could also - * be encoded in the node transformation. The following (pseudo)code sample - * shows how to do it: <br><br> - * @code - * // Get the camera matrix for a camera at a specific time - * // if the node hierarchy for the camera does not contain - * // at least one animated node this is a static computation - * get-camera-matrix (node sceneRoot, camera cam) : matrix - * { - * node cnd = find-node-for-camera(cam) - * matrix cmt = identity() - * - * // as usual - get the absolute camera transformation for this frame - * for each node nd in hierarchy from sceneRoot to cnd - * matrix cur - * if (is-animated(nd)) - * cur = eval-animation(nd) - * else cur = nd->mTransformation; - * cmt = mult-matrices( cmt, cur ) - * end for - * - * // now multiply with the camera's own local transform - * cam = mult-matrices (cam, get-camera-matrix(cmt) ) - * } - * @endcode - * - * @note some file formats (such as 3DS, ASE) export a "target point" - - * the point the camera is looking at (it can even be animated). Assimp - * writes the target point as a subnode of the camera's main node, - * called "<camName>.Target". However this is just additional information - * then the transformation tracks of the camera main node make the - * camera already look in the right direction. - * -*/ -struct aiCamera -{ - /** The name of the camera. - * - * There must be a node in the scenegraph with the same name. - * This node specifies the position of the camera in the scene - * hierarchy and can be animated. - */ - C_STRUCT aiString mName; - - /** Position of the camera relative to the coordinate space - * defined by the corresponding node. - * - * The default value is 0|0|0. - */ - C_STRUCT aiVector3D mPosition; - - /** 'Up' - vector of the camera coordinate system relative to - * the coordinate space defined by the corresponding node. - * - * The 'right' vector of the camera coordinate system is - * the cross product of the up and lookAt vectors. - * The default value is 0|1|0. The vector - * may be normalized, but it needn't. - */ - C_STRUCT aiVector3D mUp; - - - /** 'LookAt' - vector of the camera coordinate system relative to - * the coordinate space defined by the corresponding node. - * - * This is the viewing direction of the user. - * The default value is 0|0|1. The vector - * may be normalized, but it needn't. - */ - C_STRUCT aiVector3D mLookAt; - - /** Half horizontal field of view angle, in radians. - * - * The field of view angle is the angle between the center - * line of the screen and the left or right border. - * The default value is 1/4PI. - */ - float mHorizontalFOV; - - /** Distance of the near clipping plane from the camera. - * - * The value may not be 0.f (for arithmetic reasons to prevent - * a division through zero). The default value is 0.1f. - */ - float mClipPlaneNear; - - /** Distance of the far clipping plane from the camera. - * - * The far clipping plane must, of course, be further away than the - * near clipping plane. The default value is 1000.f. The ratio - * between the near and the far plane should not be too - * large (between 1000-10000 should be ok) to avoid floating-point - * inaccuracies which could lead to z-fighting. - */ - float mClipPlaneFar; - - /** Screen aspect ratio. - * - * This is the ration between the width and the height of the - * screen. Typical values are 4/3, 1/2 or 1/1. This value is - * 0 if the aspect ratio is not defined in the source file. - * 0 is also the default value. - */ - float mAspect; - -#ifdef __cplusplus - - aiCamera() AI_NO_EXCEPT - : mUp (0.f,1.f,0.f) - , mLookAt (0.f,0.f,1.f) - , mHorizontalFOV (0.25f * (float)AI_MATH_PI) - , mClipPlaneNear (0.1f) - , mClipPlaneFar (1000.f) - , mAspect (0.f) - {} - - /** @brief Get a *right-handed* camera matrix from me - * @param out Camera matrix to be filled - */ - void GetCameraMatrix (aiMatrix4x4& out) const - { - /** todo: test ... should work, but i'm not absolutely sure */ - - /** We don't know whether these vectors are already normalized ...*/ - aiVector3D zaxis = mLookAt; zaxis.Normalize(); - aiVector3D yaxis = mUp; yaxis.Normalize(); - aiVector3D xaxis = mUp^mLookAt; xaxis.Normalize(); - - out.a4 = -(xaxis * mPosition); - out.b4 = -(yaxis * mPosition); - out.c4 = -(zaxis * mPosition); - - out.a1 = xaxis.x; - out.a2 = xaxis.y; - out.a3 = xaxis.z; - - out.b1 = yaxis.x; - out.b2 = yaxis.y; - out.b3 = yaxis.z; - - out.c1 = zaxis.x; - out.c2 = zaxis.y; - out.c3 = zaxis.z; - - out.d1 = out.d2 = out.d3 = 0.f; - out.d4 = 1.f; - } - -#endif -}; - - -#ifdef __cplusplus -} -#endif - -#endif // AI_CAMERA_H_INC diff --git a/thirdparty/assimp/include/assimp/cexport.h b/thirdparty/assimp/include/assimp/cexport.h deleted file mode 100644 index cbc0253d50..0000000000 --- a/thirdparty/assimp/include/assimp/cexport.h +++ /dev/null @@ -1,265 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above -copyright notice, this list of conditions and the -following disclaimer. - -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the -following disclaimer in the documentation and/or other -materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its -contributors may be used to endorse or promote products -derived from this software without specific prior -written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file cexport.h -* @brief Defines the C-API for the Assimp export interface -*/ -#pragma once -#ifndef AI_EXPORT_H_INC -#define AI_EXPORT_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifndef ASSIMP_BUILD_NO_EXPORT - -// Public ASSIMP data structures -#include <assimp/types.h> - -#ifdef __cplusplus -extern "C" { -#endif - -struct aiScene; // aiScene.h -struct aiFileIO; // aiFileIO.h - -// -------------------------------------------------------------------------------- -/** Describes an file format which Assimp can export to. Use #aiGetExportFormatCount() to -* learn how many export formats the current Assimp build supports and #aiGetExportFormatDescription() -* to retrieve a description of an export format option. -*/ -struct aiExportFormatDesc -{ - /// a short string ID to uniquely identify the export format. Use this ID string to - /// specify which file format you want to export to when calling #aiExportScene(). - /// Example: "dae" or "obj" - const char* id; - - /// A short description of the file format to present to users. Useful if you want - /// to allow the user to select an export format. - const char* description; - - /// Recommended file extension for the exported file in lower case. - const char* fileExtension; -}; - - -// -------------------------------------------------------------------------------- -/** Returns the number of export file formats available in the current Assimp build. - * Use aiGetExportFormatDescription() to retrieve infos of a specific export format. - */ -ASSIMP_API size_t aiGetExportFormatCount(void); - -// -------------------------------------------------------------------------------- -/** Returns a description of the nth export file format. Use #aiGetExportFormatCount() - * to learn how many export formats are supported. The description must be released by - * calling aiReleaseExportFormatDescription afterwards. - * @param pIndex Index of the export format to retrieve information for. Valid range is - * 0 to #aiGetExportFormatCount() - * @return A description of that specific export format. NULL if pIndex is out of range. - */ -ASSIMP_API const C_STRUCT aiExportFormatDesc* aiGetExportFormatDescription( size_t pIndex); - -// -------------------------------------------------------------------------------- -/** Release a description of the nth export file format. Must be returned by -* aiGetExportFormatDescription -* @param desc Pointer to the description -*/ -ASSIMP_API void aiReleaseExportFormatDescription( const C_STRUCT aiExportFormatDesc *desc ); - -// -------------------------------------------------------------------------------- -/** Create a modifiable copy of a scene. - * This is useful to import files via Assimp, change their topology and - * export them again. Since the scene returned by the various importer functions - * is const, a modifiable copy is needed. - * @param pIn Valid scene to be copied - * @param pOut Receives a modifyable copy of the scene. Use aiFreeScene() to - * delete it again. - */ -ASSIMP_API void aiCopyScene(const C_STRUCT aiScene* pIn, - C_STRUCT aiScene** pOut); - - -// -------------------------------------------------------------------------------- -/** Frees a scene copy created using aiCopyScene() */ -ASSIMP_API void aiFreeScene(const C_STRUCT aiScene* pIn); - -// -------------------------------------------------------------------------------- -/** Exports the given scene to a chosen file format and writes the result file(s) to disk. -* @param pScene The scene to export. Stays in possession of the caller, is not changed by the function. -* The scene is expected to conform to Assimp's Importer output format as specified -* in the @link data Data Structures Page @endlink. In short, this means the model data -* should use a right-handed coordinate systems, face winding should be counter-clockwise -* and the UV coordinate origin is assumed to be in the upper left. If your input data -* uses different conventions, have a look at the last parameter. -* @param pFormatId ID string to specify to which format you want to export to. Use -* aiGetExportFormatCount() / aiGetExportFormatDescription() to learn which export formats are available. -* @param pFileName Output file to write -* @param pPreprocessing Accepts any choice of the #aiPostProcessSteps enumerated -* flags, but in reality only a subset of them makes sense here. Specifying -* 'preprocessing' flags is useful if the input scene does not conform to -* Assimp's default conventions as specified in the @link data Data Structures Page @endlink. -* In short, this means the geometry data should use a right-handed coordinate systems, face -* winding should be counter-clockwise and the UV coordinate origin is assumed to be in -* the upper left. The #aiProcess_MakeLeftHanded, #aiProcess_FlipUVs and -* #aiProcess_FlipWindingOrder flags are used in the import side to allow users -* to have those defaults automatically adapted to their conventions. Specifying those flags -* for exporting has the opposite effect, respectively. Some other of the -* #aiPostProcessSteps enumerated values may be useful as well, but you'll need -* to try out what their effect on the exported file is. Many formats impose -* their own restrictions on the structure of the geometry stored therein, -* so some preprocessing may have little or no effect at all, or may be -* redundant as exporters would apply them anyhow. A good example -* is triangulation - whilst you can enforce it by specifying -* the #aiProcess_Triangulate flag, most export formats support only -* triangulate data so they would run the step anyway. -* -* If assimp detects that the input scene was directly taken from the importer side of -* the library (i.e. not copied using aiCopyScene and potetially modified afterwards), -* any postprocessing steps already applied to the scene will not be applied again, unless -* they show non-idempotent behaviour (#aiProcess_MakeLeftHanded, #aiProcess_FlipUVs and -* #aiProcess_FlipWindingOrder). -* @return a status code indicating the result of the export -* @note Use aiCopyScene() to get a modifiable copy of a previously -* imported scene. -*/ -ASSIMP_API aiReturn aiExportScene( const C_STRUCT aiScene* pScene, - const char* pFormatId, - const char* pFileName, - unsigned int pPreprocessing); - - -// -------------------------------------------------------------------------------- -/** Exports the given scene to a chosen file format using custom IO logic supplied by you. -* @param pScene The scene to export. Stays in possession of the caller, is not changed by the function. -* @param pFormatId ID string to specify to which format you want to export to. Use -* aiGetExportFormatCount() / aiGetExportFormatDescription() to learn which export formats are available. -* @param pFileName Output file to write -* @param pIO custom IO implementation to be used. Use this if you use your own storage methods. -* If none is supplied, a default implementation using standard file IO is used. Note that -* #aiExportSceneToBlob is provided as convenience function to export to memory buffers. -* @param pPreprocessing Please see the documentation for #aiExportScene -* @return a status code indicating the result of the export -* @note Include <aiFileIO.h> for the definition of #aiFileIO. -* @note Use aiCopyScene() to get a modifiable copy of a previously -* imported scene. -*/ -ASSIMP_API aiReturn aiExportSceneEx( const C_STRUCT aiScene* pScene, - const char* pFormatId, - const char* pFileName, - C_STRUCT aiFileIO* pIO, - unsigned int pPreprocessing ); - -// -------------------------------------------------------------------------------- -/** Describes a blob of exported scene data. Use #aiExportSceneToBlob() to create a blob containing an -* exported scene. The memory referred by this structure is owned by Assimp. -* to free its resources. Don't try to free the memory on your side - it will crash for most build configurations -* due to conflicting heaps. -* -* Blobs can be nested - each blob may reference another blob, which may in turn reference another blob and so on. -* This is used when exporters write more than one output file for a given #aiScene. See the remarks for -* #aiExportDataBlob::name for more information. -*/ -struct aiExportDataBlob -{ - /// Size of the data in bytes - size_t size; - - /// The data. - void* data; - - /** Name of the blob. An empty string always - indicates the first (and primary) blob, - which contains the actual file data. - Any other blobs are auxiliary files produced - by exporters (i.e. material files). Existence - of such files depends on the file format. Most - formats don't split assets across multiple files. - - If used, blob names usually contain the file - extension that should be used when writing - the data to disc. - */ - C_STRUCT aiString name; - - /** Pointer to the next blob in the chain or NULL if there is none. */ - C_STRUCT aiExportDataBlob * next; - -#ifdef __cplusplus - /// Default constructor - aiExportDataBlob() { size = 0; data = next = NULL; } - /// Releases the data - ~aiExportDataBlob() { delete [] static_cast<unsigned char*>( data ); delete next; } - -private: - // no copying - aiExportDataBlob(const aiExportDataBlob& ); - aiExportDataBlob& operator= (const aiExportDataBlob& ); -#endif // __cplusplus -}; - -// -------------------------------------------------------------------------------- -/** Exports the given scene to a chosen file format. Returns the exported data as a binary blob which -* you can write into a file or something. When you're done with the data, use #aiReleaseExportBlob() -* to free the resources associated with the export. -* @param pScene The scene to export. Stays in possession of the caller, is not changed by the function. -* @param pFormatId ID string to specify to which format you want to export to. Use -* #aiGetExportFormatCount() / #aiGetExportFormatDescription() to learn which export formats are available. -* @param pPreprocessing Please see the documentation for #aiExportScene -* @return the exported data or NULL in case of error -*/ -ASSIMP_API const C_STRUCT aiExportDataBlob* aiExportSceneToBlob( const C_STRUCT aiScene* pScene, const char* pFormatId, - unsigned int pPreprocessing ); - -// -------------------------------------------------------------------------------- -/** Releases the memory associated with the given exported data. Use this function to free a data blob -* returned by aiExportScene(). -* @param pData the data blob returned by #aiExportSceneToBlob -*/ -ASSIMP_API void aiReleaseExportBlob( const C_STRUCT aiExportDataBlob* pData ); - -#ifdef __cplusplus -} -#endif - -#endif // ASSIMP_BUILD_NO_EXPORT -#endif // AI_EXPORT_H_INC diff --git a/thirdparty/assimp/include/assimp/cfileio.h b/thirdparty/assimp/include/assimp/cfileio.h deleted file mode 100644 index be90999d87..0000000000 --- a/thirdparty/assimp/include/assimp/cfileio.h +++ /dev/null @@ -1,144 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file cfileio.h - * @brief Defines generic C routines to access memory-mapped files - */ -#pragma once -#ifndef AI_FILEIO_H_INC -#define AI_FILEIO_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -#ifdef __cplusplus -extern "C" { -#endif - -struct aiFileIO; -struct aiFile; - -// aiFile callbacks -typedef size_t (*aiFileWriteProc) (C_STRUCT aiFile*, const char*, size_t, size_t); -typedef size_t (*aiFileReadProc) (C_STRUCT aiFile*, char*, size_t,size_t); -typedef size_t (*aiFileTellProc) (C_STRUCT aiFile*); -typedef void (*aiFileFlushProc) (C_STRUCT aiFile*); -typedef C_ENUM aiReturn (*aiFileSeek) (C_STRUCT aiFile*, size_t, C_ENUM aiOrigin); - -// aiFileIO callbacks -typedef C_STRUCT aiFile* (*aiFileOpenProc) (C_STRUCT aiFileIO*, const char*, const char*); -typedef void (*aiFileCloseProc) (C_STRUCT aiFileIO*, C_STRUCT aiFile*); - -// Represents user-defined data -typedef char* aiUserData; - -// ---------------------------------------------------------------------------------- -/** @brief C-API: File system callbacks - * - * Provided are functions to open and close files. Supply a custom structure to - * the import function. If you don't, a default implementation is used. Use custom - * file systems to enable reading from other sources, such as ZIPs - * or memory locations. */ -struct aiFileIO -{ - /** Function used to open a new file - */ - aiFileOpenProc OpenProc; - - /** Function used to close an existing file - */ - aiFileCloseProc CloseProc; - - /** User-defined, opaque data */ - aiUserData UserData; -}; - -// ---------------------------------------------------------------------------------- -/** @brief C-API: File callbacks - * - * Actually, it's a data structure to wrap a set of fXXXX (e.g fopen) - * replacement functions. - * - * The default implementation of the functions utilizes the fXXX functions from - * the CRT. However, you can supply a custom implementation to Assimp by - * delivering a custom aiFileIO. Use this to enable reading from other sources, - * such as ZIP archives or memory locations. */ -struct aiFile -{ - /** Callback to read from a file */ - aiFileReadProc ReadProc; - - /** Callback to write to a file */ - aiFileWriteProc WriteProc; - - /** Callback to retrieve the current position of - * the file cursor (ftell()) - */ - aiFileTellProc TellProc; - - /** Callback to retrieve the size of the file, - * in bytes - */ - aiFileTellProc FileSizeProc; - - /** Callback to set the current position - * of the file cursor (fseek()) - */ - aiFileSeek SeekProc; - - /** Callback to flush the file contents - */ - aiFileFlushProc FlushProc; - - /** User-defined, opaque data - */ - aiUserData UserData; -}; - -#ifdef __cplusplus -} -#endif -#endif // AI_FILEIO_H_INC diff --git a/thirdparty/assimp/include/assimp/cimport.h b/thirdparty/assimp/include/assimp/cimport.h deleted file mode 100644 index 66b1c9a174..0000000000 --- a/thirdparty/assimp/include/assimp/cimport.h +++ /dev/null @@ -1,569 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file cimport.h - * @brief Defines the C-API to the Open Asset Import Library. - */ -#pragma once -#ifndef AI_ASSIMP_H_INC -#define AI_ASSIMP_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <assimp/importerdesc.h> - -#ifdef __cplusplus -extern "C" { -#endif - -struct aiScene; // aiScene.h -struct aiFileIO; // aiFileIO.h -typedef void (*aiLogStreamCallback)(const char* /* message */, char* /* user */); - -// -------------------------------------------------------------------------------- -/** C-API: Represents a log stream. A log stream receives all log messages and - * streams them _somewhere_. - * @see aiGetPredefinedLogStream - * @see aiAttachLogStream - * @see aiDetachLogStream */ -// -------------------------------------------------------------------------------- -struct aiLogStream -{ - /** callback to be called */ - aiLogStreamCallback callback; - - /** user data to be passed to the callback */ - char* user; -}; - - -// -------------------------------------------------------------------------------- -/** C-API: Represents an opaque set of settings to be used during importing. - * @see aiCreatePropertyStore - * @see aiReleasePropertyStore - * @see aiImportFileExWithProperties - * @see aiSetPropertyInteger - * @see aiSetPropertyFloat - * @see aiSetPropertyString - * @see aiSetPropertyMatrix - */ -// -------------------------------------------------------------------------------- -struct aiPropertyStore { char sentinel; }; - -/** Our own C boolean type */ -typedef int aiBool; - -#define AI_FALSE 0 -#define AI_TRUE 1 - -// -------------------------------------------------------------------------------- -/** Reads the given file and returns its content. - * - * If the call succeeds, the imported data is returned in an aiScene structure. - * The data is intended to be read-only, it stays property of the ASSIMP - * library and will be stable until aiReleaseImport() is called. After you're - * done with it, call aiReleaseImport() to free the resources associated with - * this file. If the import fails, NULL is returned instead. Call - * aiGetErrorString() to retrieve a human-readable error text. - * @param pFile Path and filename of the file to be imported, - * expected to be a null-terminated c-string. NULL is not a valid value. - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. - * @return Pointer to the imported data or NULL if the import failed. - */ -ASSIMP_API const C_STRUCT aiScene* aiImportFile( - const char* pFile, - unsigned int pFlags); - -// -------------------------------------------------------------------------------- -/** Reads the given file using user-defined I/O functions and returns - * its content. - * - * If the call succeeds, the imported data is returned in an aiScene structure. - * The data is intended to be read-only, it stays property of the ASSIMP - * library and will be stable until aiReleaseImport() is called. After you're - * done with it, call aiReleaseImport() to free the resources associated with - * this file. If the import fails, NULL is returned instead. Call - * aiGetErrorString() to retrieve a human-readable error text. - * @param pFile Path and filename of the file to be imported, - * expected to be a null-terminated c-string. NULL is not a valid value. - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. - * @param pFS aiFileIO structure. Will be used to open the model file itself - * and any other files the loader needs to open. Pass NULL to use the default - * implementation. - * @return Pointer to the imported data or NULL if the import failed. - * @note Include <aiFileIO.h> for the definition of #aiFileIO. - */ -ASSIMP_API const C_STRUCT aiScene* aiImportFileEx( - const char* pFile, - unsigned int pFlags, - C_STRUCT aiFileIO* pFS); - -// -------------------------------------------------------------------------------- -/** Same as #aiImportFileEx, but adds an extra parameter containing importer settings. - * - * @param pFile Path and filename of the file to be imported, - * expected to be a null-terminated c-string. NULL is not a valid value. - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. - * @param pFS aiFileIO structure. Will be used to open the model file itself - * and any other files the loader needs to open. Pass NULL to use the default - * implementation. - * @param pProps #aiPropertyStore instance containing import settings. - * @return Pointer to the imported data or NULL if the import failed. - * @note Include <aiFileIO.h> for the definition of #aiFileIO. - * @see aiImportFileEx - */ -ASSIMP_API const C_STRUCT aiScene* aiImportFileExWithProperties( - const char* pFile, - unsigned int pFlags, - C_STRUCT aiFileIO* pFS, - const C_STRUCT aiPropertyStore* pProps); - -// -------------------------------------------------------------------------------- -/** Reads the given file from a given memory buffer, - * - * If the call succeeds, the contents of the file are returned as a pointer to an - * aiScene object. The returned data is intended to be read-only, the importer keeps - * ownership of the data and will destroy it upon destruction. If the import fails, - * NULL is returned. - * A human-readable error description can be retrieved by calling aiGetErrorString(). - * @param pBuffer Pointer to the file data - * @param pLength Length of pBuffer, in bytes - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. If you wish to inspect the imported - * scene first in order to fine-tune your post-processing setup, - * consider to use #aiApplyPostProcessing(). - * @param pHint An additional hint to the library. If this is a non empty string, - * the library looks for a loader to support the file extension specified by pHint - * and passes the file to the first matching loader. If this loader is unable to - * completely the request, the library continues and tries to determine the file - * format on its own, a task that may or may not be successful. - * Check the return value, and you'll know ... - * @return A pointer to the imported data, NULL if the import failed. - * - * @note This is a straightforward way to decode models from memory - * buffers, but it doesn't handle model formats that spread their - * data across multiple files or even directories. Examples include - * OBJ or MD3, which outsource parts of their material info into - * external scripts. If you need full functionality, provide - * a custom IOSystem to make Assimp find these files and use - * the regular aiImportFileEx()/aiImportFileExWithProperties() API. - */ -ASSIMP_API const C_STRUCT aiScene* aiImportFileFromMemory( - const char* pBuffer, - unsigned int pLength, - unsigned int pFlags, - const char* pHint); - -// -------------------------------------------------------------------------------- -/** Same as #aiImportFileFromMemory, but adds an extra parameter containing importer settings. - * - * @param pBuffer Pointer to the file data - * @param pLength Length of pBuffer, in bytes - * @param pFlags Optional post processing steps to be executed after - * a successful import. Provide a bitwise combination of the - * #aiPostProcessSteps flags. If you wish to inspect the imported - * scene first in order to fine-tune your post-processing setup, - * consider to use #aiApplyPostProcessing(). - * @param pHint An additional hint to the library. If this is a non empty string, - * the library looks for a loader to support the file extension specified by pHint - * and passes the file to the first matching loader. If this loader is unable to - * completely the request, the library continues and tries to determine the file - * format on its own, a task that may or may not be successful. - * Check the return value, and you'll know ... - * @param pProps #aiPropertyStore instance containing import settings. - * @return A pointer to the imported data, NULL if the import failed. - * - * @note This is a straightforward way to decode models from memory - * buffers, but it doesn't handle model formats that spread their - * data across multiple files or even directories. Examples include - * OBJ or MD3, which outsource parts of their material info into - * external scripts. If you need full functionality, provide - * a custom IOSystem to make Assimp find these files and use - * the regular aiImportFileEx()/aiImportFileExWithProperties() API. - * @see aiImportFileFromMemory - */ -ASSIMP_API const C_STRUCT aiScene* aiImportFileFromMemoryWithProperties( - const char* pBuffer, - unsigned int pLength, - unsigned int pFlags, - const char* pHint, - const C_STRUCT aiPropertyStore* pProps); - -// -------------------------------------------------------------------------------- -/** Apply post-processing to an already-imported scene. - * - * This is strictly equivalent to calling #aiImportFile()/#aiImportFileEx with the - * same flags. However, you can use this separate function to inspect the imported - * scene first to fine-tune your post-processing setup. - * @param pScene Scene to work on. - * @param pFlags Provide a bitwise combination of the #aiPostProcessSteps flags. - * @return A pointer to the post-processed data. Post processing is done in-place, - * meaning this is still the same #aiScene which you passed for pScene. However, - * _if_ post-processing failed, the scene could now be NULL. That's quite a rare - * case, post processing steps are not really designed to 'fail'. To be exact, - * the #aiProcess_ValidateDataStructure flag is currently the only post processing step - * which can actually cause the scene to be reset to NULL. - */ -ASSIMP_API const C_STRUCT aiScene* aiApplyPostProcessing( - const C_STRUCT aiScene* pScene, - unsigned int pFlags); - -// -------------------------------------------------------------------------------- -/** Get one of the predefine log streams. This is the quick'n'easy solution to - * access Assimp's log system. Attaching a log stream can slightly reduce Assimp's - * overall import performance. - * - * Usage is rather simple (this will stream the log to a file, named log.txt, and - * the stdout stream of the process: - * @code - * struct aiLogStream c; - * c = aiGetPredefinedLogStream(aiDefaultLogStream_FILE,"log.txt"); - * aiAttachLogStream(&c); - * c = aiGetPredefinedLogStream(aiDefaultLogStream_STDOUT,NULL); - * aiAttachLogStream(&c); - * @endcode - * - * @param pStreams One of the #aiDefaultLogStream enumerated values. - * @param file Solely for the #aiDefaultLogStream_FILE flag: specifies the file to write to. - * Pass NULL for all other flags. - * @return The log stream. callback is set to NULL if something went wrong. - */ -ASSIMP_API C_STRUCT aiLogStream aiGetPredefinedLogStream( - C_ENUM aiDefaultLogStream pStreams, - const char* file); - -// -------------------------------------------------------------------------------- -/** Attach a custom log stream to the libraries' logging system. - * - * Attaching a log stream can slightly reduce Assimp's overall import - * performance. Multiple log-streams can be attached. - * @param stream Describes the new log stream. - * @note To ensure proper destruction of the logging system, you need to manually - * call aiDetachLogStream() on every single log stream you attach. - * Alternatively (for the lazy folks) #aiDetachAllLogStreams is provided. - */ -ASSIMP_API void aiAttachLogStream( - const C_STRUCT aiLogStream* stream); - -// -------------------------------------------------------------------------------- -/** Enable verbose logging. Verbose logging includes debug-related stuff and - * detailed import statistics. This can have severe impact on import performance - * and memory consumption. However, it might be useful to find out why a file - * didn't read correctly. - * @param d AI_TRUE or AI_FALSE, your decision. - */ -ASSIMP_API void aiEnableVerboseLogging(aiBool d); - -// -------------------------------------------------------------------------------- -/** Detach a custom log stream from the libraries' logging system. - * - * This is the counterpart of #aiAttachLogStream. If you attached a stream, - * don't forget to detach it again. - * @param stream The log stream to be detached. - * @return AI_SUCCESS if the log stream has been detached successfully. - * @see aiDetachAllLogStreams - */ -ASSIMP_API C_ENUM aiReturn aiDetachLogStream( - const C_STRUCT aiLogStream* stream); - -// -------------------------------------------------------------------------------- -/** Detach all active log streams from the libraries' logging system. - * This ensures that the logging system is terminated properly and all - * resources allocated by it are actually freed. If you attached a stream, - * don't forget to detach it again. - * @see aiAttachLogStream - * @see aiDetachLogStream - */ -ASSIMP_API void aiDetachAllLogStreams(void); - -// -------------------------------------------------------------------------------- -/** Releases all resources associated with the given import process. - * - * Call this function after you're done with the imported data. - * @param pScene The imported data to release. NULL is a valid value. - */ -ASSIMP_API void aiReleaseImport( - const C_STRUCT aiScene* pScene); - -// -------------------------------------------------------------------------------- -/** Returns the error text of the last failed import process. - * - * @return A textual description of the error that occurred at the last - * import process. NULL if there was no error. There can't be an error if you - * got a non-NULL #aiScene from #aiImportFile/#aiImportFileEx/#aiApplyPostProcessing. - */ -ASSIMP_API const char* aiGetErrorString(void); - -// -------------------------------------------------------------------------------- -/** Returns whether a given file extension is supported by ASSIMP - * - * @param szExtension Extension for which the function queries support for. - * Must include a leading dot '.'. Example: ".3ds", ".md3" - * @return AI_TRUE if the file extension is supported. - */ -ASSIMP_API aiBool aiIsExtensionSupported( - const char* szExtension); - -// -------------------------------------------------------------------------------- -/** Get a list of all file extensions supported by ASSIMP. - * - * If a file extension is contained in the list this does, of course, not - * mean that ASSIMP is able to load all files with this extension. - * @param szOut String to receive the extension list. - * Format of the list: "*.3ds;*.obj;*.dae". NULL is not a valid parameter. - */ -ASSIMP_API void aiGetExtensionList( - C_STRUCT aiString* szOut); - -// -------------------------------------------------------------------------------- -/** Get the approximated storage required by an imported asset - * @param pIn Input asset. - * @param in Data structure to be filled. - */ -ASSIMP_API void aiGetMemoryRequirements( - const C_STRUCT aiScene* pIn, - C_STRUCT aiMemoryInfo* in); - - - -// -------------------------------------------------------------------------------- -/** Create an empty property store. Property stores are used to collect import - * settings. - * @return New property store. Property stores need to be manually destroyed using - * the #aiReleasePropertyStore API function. - */ -ASSIMP_API C_STRUCT aiPropertyStore* aiCreatePropertyStore(void); - -// -------------------------------------------------------------------------------- -/** Delete a property store. - * @param p Property store to be deleted. - */ -ASSIMP_API void aiReleasePropertyStore(C_STRUCT aiPropertyStore* p); - -// -------------------------------------------------------------------------------- -/** Set an integer property. - * - * This is the C-version of #Assimp::Importer::SetPropertyInteger(). In the C - * interface, properties are always shared by all imports. It is not possible to - * specify them per import. - * - * @param store Store to modify. Use #aiCreatePropertyStore to obtain a store. - * @param szName Name of the configuration property to be set. All supported - * public properties are defined in the config.h header file (AI_CONFIG_XXX). - * @param value New value for the property - */ -ASSIMP_API void aiSetImportPropertyInteger( - C_STRUCT aiPropertyStore* store, - const char* szName, - int value); - -// -------------------------------------------------------------------------------- -/** Set a floating-point property. - * - * This is the C-version of #Assimp::Importer::SetPropertyFloat(). In the C - * interface, properties are always shared by all imports. It is not possible to - * specify them per import. - * - * @param store Store to modify. Use #aiCreatePropertyStore to obtain a store. - * @param szName Name of the configuration property to be set. All supported - * public properties are defined in the config.h header file (AI_CONFIG_XXX). - * @param value New value for the property - */ -ASSIMP_API void aiSetImportPropertyFloat( - C_STRUCT aiPropertyStore* store, - const char* szName, - ai_real value); - -// -------------------------------------------------------------------------------- -/** Set a string property. - * - * This is the C-version of #Assimp::Importer::SetPropertyString(). In the C - * interface, properties are always shared by all imports. It is not possible to - * specify them per import. - * - * @param store Store to modify. Use #aiCreatePropertyStore to obtain a store. - * @param szName Name of the configuration property to be set. All supported - * public properties are defined in the config.h header file (AI_CONFIG_XXX). - * @param st New value for the property - */ -ASSIMP_API void aiSetImportPropertyString( - C_STRUCT aiPropertyStore* store, - const char* szName, - const C_STRUCT aiString* st); - -// -------------------------------------------------------------------------------- -/** Set a matrix property. - * - * This is the C-version of #Assimp::Importer::SetPropertyMatrix(). In the C - * interface, properties are always shared by all imports. It is not possible to - * specify them per import. - * - * @param store Store to modify. Use #aiCreatePropertyStore to obtain a store. - * @param szName Name of the configuration property to be set. All supported - * public properties are defined in the config.h header file (AI_CONFIG_XXX). - * @param mat New value for the property - */ -ASSIMP_API void aiSetImportPropertyMatrix( - C_STRUCT aiPropertyStore* store, - const char* szName, - const C_STRUCT aiMatrix4x4* mat); - -// -------------------------------------------------------------------------------- -/** Construct a quaternion from a 3x3 rotation matrix. - * @param quat Receives the output quaternion. - * @param mat Matrix to 'quaternionize'. - * @see aiQuaternion(const aiMatrix3x3& pRotMatrix) - */ -ASSIMP_API void aiCreateQuaternionFromMatrix( - C_STRUCT aiQuaternion* quat, - const C_STRUCT aiMatrix3x3* mat); - -// -------------------------------------------------------------------------------- -/** Decompose a transformation matrix into its rotational, translational and - * scaling components. - * - * @param mat Matrix to decompose - * @param scaling Receives the scaling component - * @param rotation Receives the rotational component - * @param position Receives the translational component. - * @see aiMatrix4x4::Decompose (aiVector3D&, aiQuaternion&, aiVector3D&) const; - */ -ASSIMP_API void aiDecomposeMatrix( - const C_STRUCT aiMatrix4x4* mat, - C_STRUCT aiVector3D* scaling, - C_STRUCT aiQuaternion* rotation, - C_STRUCT aiVector3D* position); - -// -------------------------------------------------------------------------------- -/** Transpose a 4x4 matrix. - * @param mat Pointer to the matrix to be transposed - */ -ASSIMP_API void aiTransposeMatrix4( - C_STRUCT aiMatrix4x4* mat); - -// -------------------------------------------------------------------------------- -/** Transpose a 3x3 matrix. - * @param mat Pointer to the matrix to be transposed - */ -ASSIMP_API void aiTransposeMatrix3( - C_STRUCT aiMatrix3x3* mat); - -// -------------------------------------------------------------------------------- -/** Transform a vector by a 3x3 matrix - * @param vec Vector to be transformed. - * @param mat Matrix to transform the vector with. - */ -ASSIMP_API void aiTransformVecByMatrix3( - C_STRUCT aiVector3D* vec, - const C_STRUCT aiMatrix3x3* mat); - -// -------------------------------------------------------------------------------- -/** Transform a vector by a 4x4 matrix - * @param vec Vector to be transformed. - * @param mat Matrix to transform the vector with. - */ -ASSIMP_API void aiTransformVecByMatrix4( - C_STRUCT aiVector3D* vec, - const C_STRUCT aiMatrix4x4* mat); - -// -------------------------------------------------------------------------------- -/** Multiply two 4x4 matrices. - * @param dst First factor, receives result. - * @param src Matrix to be multiplied with 'dst'. - */ -ASSIMP_API void aiMultiplyMatrix4( - C_STRUCT aiMatrix4x4* dst, - const C_STRUCT aiMatrix4x4* src); - -// -------------------------------------------------------------------------------- -/** Multiply two 3x3 matrices. - * @param dst First factor, receives result. - * @param src Matrix to be multiplied with 'dst'. - */ -ASSIMP_API void aiMultiplyMatrix3( - C_STRUCT aiMatrix3x3* dst, - const C_STRUCT aiMatrix3x3* src); - -// -------------------------------------------------------------------------------- -/** Get a 3x3 identity matrix. - * @param mat Matrix to receive its personal identity - */ -ASSIMP_API void aiIdentityMatrix3( - C_STRUCT aiMatrix3x3* mat); - -// -------------------------------------------------------------------------------- -/** Get a 4x4 identity matrix. - * @param mat Matrix to receive its personal identity - */ -ASSIMP_API void aiIdentityMatrix4( - C_STRUCT aiMatrix4x4* mat); - -// -------------------------------------------------------------------------------- -/** Returns the number of import file formats available in the current Assimp build. - * Use aiGetImportFormatDescription() to retrieve infos of a specific import format. - */ -ASSIMP_API size_t aiGetImportFormatCount(void); - -// -------------------------------------------------------------------------------- -/** Returns a description of the nth import file format. Use #aiGetImportFormatCount() - * to learn how many import formats are supported. - * @param pIndex Index of the import format to retrieve information for. Valid range is - * 0 to #aiGetImportFormatCount() - * @return A description of that specific import format. NULL if pIndex is out of range. - */ -ASSIMP_API const C_STRUCT aiImporterDesc* aiGetImportFormatDescription( size_t pIndex); -#ifdef __cplusplus -} -#endif - -#endif // AI_ASSIMP_H_INC diff --git a/thirdparty/assimp/include/assimp/color4.h b/thirdparty/assimp/include/assimp/color4.h deleted file mode 100644 index fa86128f4f..0000000000 --- a/thirdparty/assimp/include/assimp/color4.h +++ /dev/null @@ -1,103 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file color4.h - * @brief RGBA color structure, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_COLOR4D_H_INC -#define AI_COLOR4D_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -#ifdef __cplusplus - -// ---------------------------------------------------------------------------------- -/** Represents a color in Red-Green-Blue space including an -* alpha component. Color values range from 0 to 1. */ -// ---------------------------------------------------------------------------------- -template <typename TReal> -class aiColor4t { -public: - aiColor4t() AI_NO_EXCEPT : r(), g(), b(), a() {} - aiColor4t (TReal _r, TReal _g, TReal _b, TReal _a) - : r(_r), g(_g), b(_b), a(_a) {} - explicit aiColor4t (TReal _r) : r(_r), g(_r), b(_r), a(_r) {} - aiColor4t (const aiColor4t& o) = default; - - // combined operators - const aiColor4t& operator += (const aiColor4t& o); - const aiColor4t& operator -= (const aiColor4t& o); - const aiColor4t& operator *= (TReal f); - const aiColor4t& operator /= (TReal f); - - // comparison - bool operator == (const aiColor4t& other) const; - bool operator != (const aiColor4t& other) const; - bool operator < (const aiColor4t& other) const; - - // color tuple access, rgba order - inline TReal operator[](unsigned int i) const; - inline TReal& operator[](unsigned int i); - - /** check whether a color is (close to) black */ - inline bool IsBlack() const; - - // Red, green, blue and alpha color values - TReal r, g, b, a; -}; // !struct aiColor4D - -typedef aiColor4t<ai_real> aiColor4D; - -#else - -struct aiColor4D { - ai_real r, g, b, a; -}; - -#endif // __cplusplus - -#endif // AI_COLOR4D_H_INC diff --git a/thirdparty/assimp/include/assimp/color4.inl b/thirdparty/assimp/include/assimp/color4.inl deleted file mode 100644 index d4a2a98109..0000000000 --- a/thirdparty/assimp/include/assimp/color4.inl +++ /dev/null @@ -1,251 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file color4.inl - * @brief Inline implementation of aiColor4t<TReal> operators - */ -#pragma once -#ifndef AI_COLOR4D_INL_INC -#define AI_COLOR4D_INL_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -#include <assimp/color4.h> - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiColor4t<TReal>& aiColor4t<TReal>::operator += (const aiColor4t<TReal>& o) { - r += o.r; - g += o.g; - b += o.b; - a += o.a; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiColor4t<TReal>& aiColor4t<TReal>::operator -= (const aiColor4t<TReal>& o) { - r -= o.r; - g -= o.g; - b -= o.b; - a -= o.a; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiColor4t<TReal>& aiColor4t<TReal>::operator *= (TReal f) { - r *= f; - g *= f; - b *= f; - a *= f; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiColor4t<TReal>& aiColor4t<TReal>::operator /= (TReal f) { - r /= f; - g /= f; - b /= f; - a /= f; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal aiColor4t<TReal>::operator[](unsigned int i) const { - switch ( i ) { - case 0: - return r; - case 1: - return g; - case 2: - return b; - case 3: - return a; - default: - break; - } - return r; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal& aiColor4t<TReal>::operator[](unsigned int i) { - switch ( i ) { - case 0: - return r; - case 1: - return g; - case 2: - return b; - case 3: - return a; - default: - break; - } - return r; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiColor4t<TReal>::operator== (const aiColor4t<TReal>& other) const { - return r == other.r && g == other.g && b == other.b && a == other.a; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiColor4t<TReal>::operator!= (const aiColor4t<TReal>& other) const { - return r != other.r || g != other.g || b != other.b || a != other.a; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiColor4t<TReal>::operator< (const aiColor4t<TReal>& other) const { - return r < other.r || ( - r == other.r && ( - g < other.g || ( - g == other.g && ( - b < other.b || ( - b == other.b && ( - a < other.a - ) - ) - ) - ) - ) - ); -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator + (const aiColor4t<TReal>& v1, const aiColor4t<TReal>& v2) { - return aiColor4t<TReal>( v1.r + v2.r, v1.g + v2.g, v1.b + v2.b, v1.a + v2.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator - (const aiColor4t<TReal>& v1, const aiColor4t<TReal>& v2) { - return aiColor4t<TReal>( v1.r - v2.r, v1.g - v2.g, v1.b - v2.b, v1.a - v2.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE aiColor4t<TReal> operator * (const aiColor4t<TReal>& v1, const aiColor4t<TReal>& v2) { - return aiColor4t<TReal>( v1.r * v2.r, v1.g * v2.g, v1.b * v2.b, v1.a * v2.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator / (const aiColor4t<TReal>& v1, const aiColor4t<TReal>& v2) { - return aiColor4t<TReal>( v1.r / v2.r, v1.g / v2.g, v1.b / v2.b, v1.a / v2.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator * ( TReal f, const aiColor4t<TReal>& v) { - return aiColor4t<TReal>( f*v.r, f*v.g, f*v.b, f*v.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator * ( const aiColor4t<TReal>& v, TReal f) { - return aiColor4t<TReal>( f*v.r, f*v.g, f*v.b, f*v.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator / ( const aiColor4t<TReal>& v, TReal f) { - return v * (1/f); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator / ( TReal f,const aiColor4t<TReal>& v) { - return aiColor4t<TReal>(f,f,f,f)/v; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator + ( const aiColor4t<TReal>& v, TReal f) { - return aiColor4t<TReal>( f+v.r, f+v.g, f+v.b, f+v.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator - ( const aiColor4t<TReal>& v, TReal f) { - return aiColor4t<TReal>( v.r-f, v.g-f, v.b-f, v.a-f); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator + ( TReal f, const aiColor4t<TReal>& v) { - return aiColor4t<TReal>( f+v.r, f+v.g, f+v.b, f+v.a); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiColor4t<TReal> operator - ( TReal f, const aiColor4t<TReal>& v) { - return aiColor4t<TReal>( f-v.r, f-v.g, f-v.b, f-v.a); -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiColor4t<TReal>::IsBlack() const { - // The alpha component doesn't care here. black is black. - static const TReal epsilon = 10e-3f; - return std::fabs( r ) < epsilon && std::fabs( g ) < epsilon && std::fabs( b ) < epsilon; -} - -#endif // __cplusplus -#endif // AI_VECTOR3D_INL_INC diff --git a/thirdparty/assimp/include/assimp/config.h b/thirdparty/assimp/include/assimp/config.h deleted file mode 100644 index 48d61941ad..0000000000 --- a/thirdparty/assimp/include/assimp/config.h +++ /dev/null @@ -1,1018 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2018, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file config.h - * @brief Defines constants for configurable properties for the library - * - * Typically these properties are set via - * #Assimp::Importer::SetPropertyFloat, - * #Assimp::Importer::SetPropertyInteger or - * #Assimp::Importer::SetPropertyString, - * depending on the data type of a property. All properties have a - * default value. See the doc for the mentioned methods for more details. - * - * <br><br> - * The corresponding functions for use with the plain-c API are: - * #aiSetImportPropertyInteger, - * #aiSetImportPropertyFloat, - * #aiSetImportPropertyString - */ -#pragma once -#ifndef AI_CONFIG_H_INC -#define AI_CONFIG_H_INC - - -// ########################################################################### -// LIBRARY SETTINGS -// General, global settings -// ########################################################################### - -// --------------------------------------------------------------------------- -/** @brief Enables time measurements. - * - * If enabled, measures the time needed for each part of the loading - * process (i.e. IO time, importing, postprocessing, ..) and dumps - * these timings to the DefaultLogger. See the @link perf Performance - * Page@endlink for more information on this topic. - * - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_GLOB_MEASURE_TIME \ - "GLOB_MEASURE_TIME" - - -// --------------------------------------------------------------------------- -/** @brief Global setting to disable generation of skeleton dummy meshes - * - * Skeleton dummy meshes are generated as a visualization aid in cases which - * the input data contains no geometry, but only animation data. - * Property data type: bool. Default value: false - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_IMPORT_NO_SKELETON_MESHES \ - "IMPORT_NO_SKELETON_MESHES" - - - -# if 0 // not implemented yet -// --------------------------------------------------------------------------- -/** @brief Set Assimp's multithreading policy. - * - * This setting is ignored if Assimp was built without boost.thread - * support (ASSIMP_BUILD_NO_THREADING, which is implied by ASSIMP_BUILD_BOOST_WORKAROUND). - * Possible values are: -1 to let Assimp decide what to do, 0 to disable - * multithreading entirely and any number larger than 0 to force a specific - * number of threads. Assimp is always free to ignore this settings, which is - * merely a hint. Usually, the default value (-1) will be fine. However, if - * Assimp is used concurrently from multiple user threads, it might be useful - * to limit each Importer instance to a specific number of cores. - * - * For more information, see the @link threading Threading page@endlink. - * Property type: int, default value: -1. - */ -#define AI_CONFIG_GLOB_MULTITHREADING \ - "GLOB_MULTITHREADING" -#endif - -// ########################################################################### -// POST PROCESSING SETTINGS -// Various stuff to fine-tune the behavior of a specific post processing step. -// ########################################################################### - - -// --------------------------------------------------------------------------- -/** @brief Maximum bone count per mesh for the SplitbyBoneCount step. - * - * Meshes are split until the maximum number of bones is reached. The default - * value is AI_SBBC_DEFAULT_MAX_BONES, which may be altered at - * compile-time. - * Property data type: integer. - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_PP_SBBC_MAX_BONES \ - "PP_SBBC_MAX_BONES" - - -// default limit for bone count -#if (!defined AI_SBBC_DEFAULT_MAX_BONES) -# define AI_SBBC_DEFAULT_MAX_BONES 60 -#endif - - -// --------------------------------------------------------------------------- -/** @brief Specifies the maximum angle that may be between two vertex tangents - * that their tangents and bi-tangents are smoothed. - * - * This applies to the CalcTangentSpace-Step. The angle is specified - * in degrees. The maximum value is 175. - * Property type: float. Default value: 45 degrees - */ -#define AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE \ - "PP_CT_MAX_SMOOTHING_ANGLE" - -// --------------------------------------------------------------------------- -/** @brief Source UV channel for tangent space computation. - * - * The specified channel must exist or an error will be raised. - * Property type: integer. Default value: 0 - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_PP_CT_TEXTURE_CHANNEL_INDEX \ - "PP_CT_TEXTURE_CHANNEL_INDEX" - -// --------------------------------------------------------------------------- -/** @brief Specifies the maximum angle that may be between two face normals - * at the same vertex position that their are smoothed together. - * - * Sometimes referred to as 'crease angle'. - * This applies to the GenSmoothNormals-Step. The angle is specified - * in degrees, so 180 is PI. The default value is 175 degrees (all vertex - * normals are smoothed). The maximum value is 175, too. Property type: float. - * Warning: setting this option may cause a severe loss of performance. The - * performance is unaffected if the #AI_CONFIG_FAVOUR_SPEED flag is set but - * the output quality may be reduced. - */ -#define AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE \ - "PP_GSN_MAX_SMOOTHING_ANGLE" - - -// --------------------------------------------------------------------------- -/** @brief Sets the colormap (= palette) to be used to decode embedded - * textures in MDL (Quake or 3DGS) files. - * - * This must be a valid path to a file. The file is 768 (256*3) bytes - * large and contains RGB triplets for each of the 256 palette entries. - * The default value is colormap.lmp. If the file is not found, - * a default palette (from Quake 1) is used. - * Property type: string. - */ -#define AI_CONFIG_IMPORT_MDL_COLORMAP \ - "IMPORT_MDL_COLORMAP" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_RemoveRedundantMaterials step to - * keep materials matching a name in a given list. - * - * This is a list of 1 to n strings, ' ' serves as delimiter character. - * Identifiers containing whitespaces must be enclosed in *single* - * quotation marks. For example:<tt> - * "keep-me and_me_to anotherMaterialToBeKept \'name with whitespace\'"</tt>. - * If a material matches on of these names, it will not be modified or - * removed by the postprocessing step nor will other materials be replaced - * by a reference to it. <br> - * This option might be useful if you are using some magic material names - * to pass additional semantics through the content pipeline. This ensures - * they won't be optimized away, but a general optimization is still - * performed for materials not contained in the list. - * Property type: String. Default value: n/a - * @note Linefeeds, tabs or carriage returns are treated as whitespace. - * Material names are case sensitive. - */ -#define AI_CONFIG_PP_RRM_EXCLUDE_LIST \ - "PP_RRM_EXCLUDE_LIST" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to - * keep the scene hierarchy. Meshes are moved to worldspace, but - * no optimization is performed (read: meshes with equal materials are not - * joined. The total number of meshes won't change). - * - * This option could be of use for you if the scene hierarchy contains - * important additional information which you intend to parse. - * For rendering, you can still render all meshes in the scene without - * any transformations. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_PTV_KEEP_HIERARCHY \ - "PP_PTV_KEEP_HIERARCHY" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to normalize - * all vertex components into the [-1,1] range. That is, a bounding box - * for the whole scene is computed, the maximum component is taken and all - * meshes are scaled appropriately (uniformly of course!). - * This might be useful if you don't know the spatial dimension of the input - * data*/ -#define AI_CONFIG_PP_PTV_NORMALIZE \ - "PP_PTV_NORMALIZE" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to use - * a users defined matrix as the scene root node transformation before - * transforming vertices. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_PTV_ADD_ROOT_TRANSFORMATION \ - "PP_PTV_ADD_ROOT_TRANSFORMATION" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to use - * a users defined matrix as the scene root node transformation before - * transforming vertices. This property correspond to the 'a1' component - * of the transformation matrix. - * Property type: aiMatrix4x4. - */ -#define AI_CONFIG_PP_PTV_ROOT_TRANSFORMATION \ - "PP_PTV_ROOT_TRANSFORMATION" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_FindDegenerates step to - * remove degenerated primitives from the import - immediately. - * - * The default behaviour converts degenerated triangles to lines and - * degenerated lines to points. See the documentation to the - * #aiProcess_FindDegenerates step for a detailed example of the various ways - * to get rid of these lines and points if you don't want them. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_FD_REMOVE \ - "PP_FD_REMOVE" - -// --------------------------------------------------------------------------- -/** - * @brief Configures the #aiProcess_FindDegenerates to check the area of a - * trinagle to be greates than e-6. If this is not the case the triangle will - * be removed if #AI_CONFIG_PP_FD_REMOVE is set to true. - */ -#define AI_CONFIG_PP_FD_CHECKAREA \ - "PP_FD_CHECKAREA" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_OptimizeGraph step to preserve nodes - * matching a name in a given list. - * - * This is a list of 1 to n strings, ' ' serves as delimiter character. - * Identifiers containing whitespaces must be enclosed in *single* - * quotation marks. For example:<tt> - * "keep-me and_me_to anotherNodeToBeKept \'name with whitespace\'"</tt>. - * If a node matches on of these names, it will not be modified or - * removed by the postprocessing step.<br> - * This option might be useful if you are using some magic node names - * to pass additional semantics through the content pipeline. This ensures - * they won't be optimized away, but a general optimization is still - * performed for nodes not contained in the list. - * Property type: String. Default value: n/a - * @note Linefeeds, tabs or carriage returns are treated as whitespace. - * Node names are case sensitive. - */ -#define AI_CONFIG_PP_OG_EXCLUDE_LIST \ - "PP_OG_EXCLUDE_LIST" - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of triangles in a mesh. - * - * This is used by the "SplitLargeMeshes" PostProcess-Step to determine - * whether a mesh must be split or not. - * @note The default value is AI_SLM_DEFAULT_MAX_TRIANGLES - * Property type: integer. - */ -#define AI_CONFIG_PP_SLM_TRIANGLE_LIMIT \ - "PP_SLM_TRIANGLE_LIMIT" - -// default value for AI_CONFIG_PP_SLM_TRIANGLE_LIMIT -#if (!defined AI_SLM_DEFAULT_MAX_TRIANGLES) -# define AI_SLM_DEFAULT_MAX_TRIANGLES 1000000 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of vertices in a mesh. - * - * This is used by the "SplitLargeMeshes" PostProcess-Step to determine - * whether a mesh must be split or not. - * @note The default value is AI_SLM_DEFAULT_MAX_VERTICES - * Property type: integer. - */ -#define AI_CONFIG_PP_SLM_VERTEX_LIMIT \ - "PP_SLM_VERTEX_LIMIT" - -// default value for AI_CONFIG_PP_SLM_VERTEX_LIMIT -#if (!defined AI_SLM_DEFAULT_MAX_VERTICES) -# define AI_SLM_DEFAULT_MAX_VERTICES 1000000 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of bones affecting a single vertex - * - * This is used by the #aiProcess_LimitBoneWeights PostProcess-Step. - * @note The default value is AI_LMW_MAX_WEIGHTS - * Property type: integer.*/ -#define AI_CONFIG_PP_LBW_MAX_WEIGHTS \ - "PP_LBW_MAX_WEIGHTS" - -// default value for AI_CONFIG_PP_LBW_MAX_WEIGHTS -#if (!defined AI_LMW_MAX_WEIGHTS) -# define AI_LMW_MAX_WEIGHTS 0x4 -#endif // !! AI_LMW_MAX_WEIGHTS - -// --------------------------------------------------------------------------- -/** @brief Lower the deboning threshold in order to remove more bones. - * - * This is used by the #aiProcess_Debone PostProcess-Step. - * @note The default value is AI_DEBONE_THRESHOLD - * Property type: float.*/ -#define AI_CONFIG_PP_DB_THRESHOLD \ - "PP_DB_THRESHOLD" - -// default value for AI_CONFIG_PP_LBW_MAX_WEIGHTS -#if (!defined AI_DEBONE_THRESHOLD) -# define AI_DEBONE_THRESHOLD 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -// --------------------------------------------------------------------------- -/** @brief Require all bones qualify for deboning before removing any - * - * This is used by the #aiProcess_Debone PostProcess-Step. - * @note The default value is 0 - * Property type: bool.*/ -#define AI_CONFIG_PP_DB_ALL_OR_NONE \ - "PP_DB_ALL_OR_NONE" - -/** @brief Default value for the #AI_CONFIG_PP_ICL_PTCACHE_SIZE property - */ -#ifndef PP_ICL_PTCACHE_SIZE -# define PP_ICL_PTCACHE_SIZE 12 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the size of the post-transform vertex cache to optimize the - * vertices for. This configures the #aiProcess_ImproveCacheLocality step. - * - * The size is given in vertices. Of course you can't know how the vertex - * format will exactly look like after the import returns, but you can still - * guess what your meshes will probably have. - * @note The default value is #PP_ICL_PTCACHE_SIZE. That results in slight - * performance improvements for most nVidia/AMD cards since 2002. - * Property type: integer. - */ -#define AI_CONFIG_PP_ICL_PTCACHE_SIZE "PP_ICL_PTCACHE_SIZE" - -// --------------------------------------------------------------------------- -/** @brief Enumerates components of the aiScene and aiMesh data structures - * that can be excluded from the import using the #aiProcess_RemoveComponent step. - * - * See the documentation to #aiProcess_RemoveComponent for more details. - */ -enum aiComponent -{ - /** Normal vectors */ -#ifdef SWIG - aiComponent_NORMALS = 0x2, -#else - aiComponent_NORMALS = 0x2u, -#endif - - /** Tangents and bitangents go always together ... */ -#ifdef SWIG - aiComponent_TANGENTS_AND_BITANGENTS = 0x4, -#else - aiComponent_TANGENTS_AND_BITANGENTS = 0x4u, -#endif - - /** ALL color sets - * Use aiComponent_COLORn(N) to specify the N'th set */ - aiComponent_COLORS = 0x8, - - /** ALL texture UV sets - * aiComponent_TEXCOORDn(N) to specify the N'th set */ - aiComponent_TEXCOORDS = 0x10, - - /** Removes all bone weights from all meshes. - * The scenegraph nodes corresponding to the bones are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_BONEWEIGHTS = 0x20, - - /** Removes all node animations (aiScene::mAnimations). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_ANIMATIONS = 0x40, - - /** Removes all embedded textures (aiScene::mTextures) */ - aiComponent_TEXTURES = 0x80, - - /** Removes all light sources (aiScene::mLights). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_LIGHTS = 0x100, - - /** Removes all cameras (aiScene::mCameras). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_CAMERAS = 0x200, - - /** Removes all meshes (aiScene::mMeshes). */ - aiComponent_MESHES = 0x400, - - /** Removes all materials. One default material will - * be generated, so aiScene::mNumMaterials will be 1. */ - aiComponent_MATERIALS = 0x800, - - - /** This value is not used. It is just there to force the - * compiler to map this enum to a 32 Bit integer. */ -#ifndef SWIG - _aiComponent_Force32Bit = 0x9fffffff -#endif -}; - -// Remove a specific color channel 'n' -#define aiComponent_COLORSn(n) (1u << (n+20u)) - -// Remove a specific UV channel 'n' -#define aiComponent_TEXCOORDSn(n) (1u << (n+25u)) - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_RemoveComponent step: - * Specifies the parts of the data structure to be removed. - * - * See the documentation to this step for further details. The property - * is expected to be an integer, a bitwise combination of the - * #aiComponent flags defined above in this header. The default - * value is 0. Important: if no valid mesh is remaining after the - * step has been executed (e.g you thought it was funny to specify ALL - * of the flags defined above) the import FAILS. Mainly because there is - * no data to work on anymore ... - */ -#define AI_CONFIG_PP_RVC_FLAGS \ - "PP_RVC_FLAGS" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_SortByPType step: - * Specifies which primitive types are removed by the step. - * - * This is a bitwise combination of the aiPrimitiveType flags. - * Specifying all of them is illegal, of course. A typical use would - * be to exclude all line and point meshes from the import. This - * is an integer property, its default value is 0. - */ -#define AI_CONFIG_PP_SBP_REMOVE \ - "PP_SBP_REMOVE" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_FindInvalidData step: - * Specifies the floating-point accuracy for animation values. The step - * checks for animation tracks where all frame values are absolutely equal - * and removes them. This tweakable controls the epsilon for floating-point - * comparisons - two keys are considered equal if the invariant - * abs(n0-n1)>epsilon holds true for all vector respectively quaternion - * components. The default value is 0.f - comparisons are exact then. - */ -#define AI_CONFIG_PP_FID_ANIM_ACCURACY \ - "PP_FID_ANIM_ACCURACY" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_FindInvalidData step: - * Set to true to ignore texture coordinates. This may be useful if you have - * to assign different kind of textures like one for the summer or one for the winter. - */ -#define AI_CONFIG_PP_FID_IGNORE_TEXTURECOORDS \ - "PP_FID_IGNORE_TEXTURECOORDS" - -// TransformUVCoords evaluates UV scalings -#define AI_UVTRAFO_SCALING 0x1 - -// TransformUVCoords evaluates UV rotations -#define AI_UVTRAFO_ROTATION 0x2 - -// TransformUVCoords evaluates UV translation -#define AI_UVTRAFO_TRANSLATION 0x4 - -// Everything baked together -> default value -#define AI_UVTRAFO_ALL (AI_UVTRAFO_SCALING | AI_UVTRAFO_ROTATION | AI_UVTRAFO_TRANSLATION) - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_TransformUVCoords step: - * Specifies which UV transformations are evaluated. - * - * This is a bitwise combination of the AI_UVTRAFO_XXX flags (integer - * property, of course). By default all transformations are enabled - * (AI_UVTRAFO_ALL). - */ -#define AI_CONFIG_PP_TUV_EVALUATE \ - "PP_TUV_EVALUATE" - -// --------------------------------------------------------------------------- -/** @brief A hint to assimp to favour speed against import quality. - * - * Enabling this option may result in faster loading, but it needn't. - * It represents just a hint to loaders and post-processing steps to use - * faster code paths, if possible. - * This property is expected to be an integer, != 0 stands for true. - * The default value is 0. - */ -#define AI_CONFIG_FAVOUR_SPEED \ - "FAVOUR_SPEED" - - -// ########################################################################### -// IMPORTER SETTINGS -// Various stuff to fine-tune the behaviour of specific importer plugins. -// ########################################################################### - - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will merge all geometry layers present - * in the source file or take only the first. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ALL_GEOMETRY_LAYERS \ - "IMPORT_FBX_READ_ALL_GEOMETRY_LAYERS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read all materials present in the - * source file or take only the referenced materials. - * - * This is void unless IMPORT_FBX_READ_MATERIALS=1. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ALL_MATERIALS \ - "IMPORT_FBX_READ_ALL_MATERIALS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read materials. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_MATERIALS \ - "IMPORT_FBX_READ_MATERIALS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read embedded textures. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_TEXTURES \ - "IMPORT_FBX_READ_TEXTURES" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read cameras. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_CAMERAS \ - "IMPORT_FBX_READ_CAMERAS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read light sources. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_LIGHTS \ - "IMPORT_FBX_READ_LIGHTS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read animations. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ANIMATIONS \ - "IMPORT_FBX_READ_ANIMATIONS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will act in strict mode in which only - * FBX 2013 is supported and any other sub formats are rejected. FBX 2013 - * is the primary target for the importer, so this format is best - * supported and well-tested. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_STRICT_MODE \ - "IMPORT_FBX_STRICT_MODE" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will preserve pivot points for - * transformations (as extra nodes). If set to false, pivots and offsets - * will be evaluated whenever possible. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_PRESERVE_PIVOTS \ - "IMPORT_FBX_PRESERVE_PIVOTS" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the importer will drop empty animation curves or - * animation curves which match the bind pose transformation over their - * entire defined range. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_OPTIMIZE_EMPTY_ANIMATION_CURVES \ - "IMPORT_FBX_OPTIMIZE_EMPTY_ANIMATION_CURVES" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will use the legacy embedded texture naming. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING \ - "AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING" - -// --------------------------------------------------------------------------- -/** @brief Set wether the importer shall not remove empty bones. - * - * Empty bone are often used to define connections for other models. - */ -#define AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES \ - "AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES" - - -// --------------------------------------------------------------------------- -/** @brief Set wether the FBX importer shall convert the unit from cm to m. - */ -#define AI_CONFIG_FBX_CONVERT_TO_M \ - "AI_CONFIG_FBX_CONVERT_TO_M" - -// --------------------------------------------------------------------------- -/** @brief Set the vertex animation keyframe to be imported - * - * ASSIMP does not support vertex keyframes (only bone animation is supported). - * The library reads only one frame of models with vertex animations. - * By default this is the first frame. - * \note The default value is 0. This option applies to all importers. - * However, it is also possible to override the global setting - * for a specific loader. You can use the AI_CONFIG_IMPORT_XXX_KEYFRAME - * options (where XXX is a placeholder for the file format for which you - * want to override the global setting). - * Property type: integer. - */ -#define AI_CONFIG_IMPORT_GLOBAL_KEYFRAME "IMPORT_GLOBAL_KEYFRAME" - -#define AI_CONFIG_IMPORT_MD3_KEYFRAME "IMPORT_MD3_KEYFRAME" -#define AI_CONFIG_IMPORT_MD2_KEYFRAME "IMPORT_MD2_KEYFRAME" -#define AI_CONFIG_IMPORT_MDL_KEYFRAME "IMPORT_MDL_KEYFRAME" -#define AI_CONFIG_IMPORT_MDC_KEYFRAME "IMPORT_MDC_KEYFRAME" -#define AI_CONFIG_IMPORT_SMD_KEYFRAME "IMPORT_SMD_KEYFRAME" -#define AI_CONFIG_IMPORT_UNREAL_KEYFRAME "IMPORT_UNREAL_KEYFRAME" - -// --------------------------------------------------------------------------- -/** Smd load multiple animations - * - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_SMD_LOAD_ANIMATION_LIST "IMPORT_SMD_LOAD_ANIMATION_LIST" - -// --------------------------------------------------------------------------- -/** @brief Configures the AC loader to collect all surfaces which have the - * "Backface cull" flag set in separate meshes. - * - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_AC_SEPARATE_BFCULL \ - "IMPORT_AC_SEPARATE_BFCULL" - -// --------------------------------------------------------------------------- -/** @brief Configures whether the AC loader evaluates subdivision surfaces ( - * indicated by the presence of the 'subdiv' attribute in the file). By - * default, Assimp performs the subdivision using the standard - * Catmull-Clark algorithm - * - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_AC_EVAL_SUBDIVISION \ - "IMPORT_AC_EVAL_SUBDIVISION" - -// --------------------------------------------------------------------------- -/** @brief Configures the UNREAL 3D loader to separate faces with different - * surface flags (e.g. two-sided vs. single-sided). - * - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_UNREAL_HANDLE_FLAGS \ - "UNREAL_HANDLE_FLAGS" - -// --------------------------------------------------------------------------- -/** @brief Configures the terragen import plugin to compute uv's for - * terrains, if not given. Furthermore a default texture is assigned. - * - * UV coordinates for terrains are so simple to compute that you'll usually - * want to compute them on your own, if you need them. This option is intended - * for model viewers which want to offer an easy way to apply textures to - * terrains. - * * Property type: bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_TER_MAKE_UVS \ - "IMPORT_TER_MAKE_UVS" - -// --------------------------------------------------------------------------- -/** @brief Configures the ASE loader to always reconstruct normal vectors - * basing on the smoothing groups loaded from the file. - * - * Some ASE files have carry invalid normals, other don't. - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_ASE_RECONSTRUCT_NORMALS \ - "IMPORT_ASE_RECONSTRUCT_NORMALS" - -// --------------------------------------------------------------------------- -/** @brief Configures the M3D loader to detect and process multi-part - * Quake player models. - * - * These models usually consist of 3 files, lower.md3, upper.md3 and - * head.md3. If this property is set to true, Assimp will try to load and - * combine all three files if one of them is loaded. - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_MD3_HANDLE_MULTIPART \ - "IMPORT_MD3_HANDLE_MULTIPART" - -// --------------------------------------------------------------------------- -/** @brief Tells the MD3 loader which skin files to load. - * - * When loading MD3 files, Assimp checks whether a file - * [md3_file_name]_[skin_name].skin is existing. These files are used by - * Quake III to be able to assign different skins (e.g. red and blue team) - * to models. 'default', 'red', 'blue' are typical skin names. - * Property type: String. Default value: "default". - */ -#define AI_CONFIG_IMPORT_MD3_SKIN_NAME \ - "IMPORT_MD3_SKIN_NAME" - -// --------------------------------------------------------------------------- -/** @brief Specify the Quake 3 shader file to be used for a particular - * MD3 file. This can also be a search path. - * - * By default Assimp's behaviour is as follows: If a MD3 file - * <tt>any_path/models/any_q3_subdir/model_name/file_name.md3</tt> is - * loaded, the library tries to locate the corresponding shader file in - * <tt>any_path/scripts/model_name.shader</tt>. This property overrides this - * behaviour. It can either specify a full path to the shader to be loaded - * or alternatively the path (relative or absolute) to the directory where - * the shaders for all MD3s to be loaded reside. Assimp attempts to open - * <tt>IMPORT_MD3_SHADER_SRC/model_name.shader</tt> first, <tt>IMPORT_MD3_SHADER_SRC/file_name.shader</tt> - * is the fallback file. Note that IMPORT_MD3_SHADER_SRC should have a terminal (back)slash. - * Property type: String. Default value: n/a. - */ -#define AI_CONFIG_IMPORT_MD3_SHADER_SRC \ - "IMPORT_MD3_SHADER_SRC" - -// --------------------------------------------------------------------------- -/** @brief Configures the LWO loader to load just one layer from the model. - * - * LWO files consist of layers and in some cases it could be useful to load - * only one of them. This property can be either a string - which specifies - * the name of the layer - or an integer - the index of the layer. If the - * property is not set the whole LWO model is loaded. Loading fails if the - * requested layer is not available. The layer index is zero-based and the - * layer name may not be empty.<br> - * Property type: Integer. Default value: all layers are loaded. - */ -#define AI_CONFIG_IMPORT_LWO_ONE_LAYER_ONLY \ - "IMPORT_LWO_ONE_LAYER_ONLY" - -// --------------------------------------------------------------------------- -/** @brief Configures the MD5 loader to not load the MD5ANIM file for - * a MD5MESH file automatically. - * - * The default strategy is to look for a file with the same name but the - * MD5ANIM extension in the same directory. If it is found, it is loaded - * and combined with the MD5MESH file. This configuration option can be - * used to disable this behaviour. - * - * * Property type: bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_MD5_NO_ANIM_AUTOLOAD \ - "IMPORT_MD5_NO_ANIM_AUTOLOAD" - -// --------------------------------------------------------------------------- -/** @brief Defines the begin of the time range for which the LWS loader - * evaluates animations and computes aiNodeAnim's. - * - * Assimp provides full conversion of LightWave's envelope system, including - * pre and post conditions. The loader computes linearly subsampled animation - * chanels with the frame rate given in the LWS file. This property defines - * the start time. Note: animation channels are only generated if a node - * has at least one envelope with more tan one key assigned. This property. - * is given in frames, '0' is the first frame. By default, if this property - * is not set, the importer takes the animation start from the input LWS - * file ('FirstFrame' line)<br> - * Property type: Integer. Default value: taken from file. - * - * @see AI_CONFIG_IMPORT_LWS_ANIM_END - end of the imported time range - */ -#define AI_CONFIG_IMPORT_LWS_ANIM_START \ - "IMPORT_LWS_ANIM_START" -#define AI_CONFIG_IMPORT_LWS_ANIM_END \ - "IMPORT_LWS_ANIM_END" - -// --------------------------------------------------------------------------- -/** @brief Defines the output frame rate of the IRR loader. - * - * IRR animations are difficult to convert for Assimp and there will - * always be a loss of quality. This setting defines how many keys per second - * are returned by the converter.<br> - * Property type: integer. Default value: 100 - */ -#define AI_CONFIG_IMPORT_IRR_ANIM_FPS \ - "IMPORT_IRR_ANIM_FPS" - -// --------------------------------------------------------------------------- -/** @brief Ogre Importer will try to find referenced materials from this file. - * - * Ogre meshes reference with material names, this does not tell Assimp the file - * where it is located in. Assimp will try to find the source file in the following - * order: <material-name>.material, <mesh-filename-base>.material and - * lastly the material name defined by this config property. - * <br> - * Property type: String. Default value: Scene.material. - */ -#define AI_CONFIG_IMPORT_OGRE_MATERIAL_FILE \ - "IMPORT_OGRE_MATERIAL_FILE" - -// --------------------------------------------------------------------------- -/** @brief Ogre Importer detect the texture usage from its filename. - * - * Ogre material texture units do not define texture type, the textures usage - * depends on the used shader or Ogre's fixed pipeline. If this config property - * is true Assimp will try to detect the type from the textures filename postfix: - * _n, _nrm, _nrml, _normal, _normals and _normalmap for normal map, _s, _spec, - * _specular and _specularmap for specular map, _l, _light, _lightmap, _occ - * and _occlusion for light map, _disp and _displacement for displacement map. - * The matching is case insensitive. Post fix is taken between the last - * underscore and the last period. - * Default behavior is to detect type from lower cased texture unit name by - * matching against: normalmap, specularmap, lightmap and displacementmap. - * For both cases if no match is found aiTextureType_DIFFUSE is used. - * <br> - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_OGRE_TEXTURETYPE_FROM_FILENAME \ - "IMPORT_OGRE_TEXTURETYPE_FROM_FILENAME" - - /** @brief Specifies whether the Android JNI asset extraction is supported. - * - * Turn on this option if you want to manage assets in native - * Android application without having to keep the internal directory and asset - * manager pointer. - */ - #define AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT "AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the IFC loader skips over IfcSpace elements. - * - * IfcSpace elements (and their geometric representations) are used to - * represent, well, free space in a building storey.<br> - * Property type: Bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_IFC_SKIP_SPACE_REPRESENTATIONS "IMPORT_IFC_SKIP_SPACE_REPRESENTATIONS" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the IFC loader will use its own, custom triangulation - * algorithm to triangulate wall and floor meshes. - * - * If this property is set to false, walls will be either triangulated by - * #aiProcess_Triangulate or will be passed through as huge polygons with - * faked holes (i.e. holes that are connected with the outer boundary using - * a dummy edge). It is highly recommended to set this property to true - * if you want triangulated data because #aiProcess_Triangulate is known to - * have problems with the kind of polygons that the IFC loader spits out for - * complicated meshes. - * Property type: Bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_IFC_CUSTOM_TRIANGULATION "IMPORT_IFC_CUSTOM_TRIANGULATION" - -// --------------------------------------------------------------------------- -/** @brief Set the tessellation conic angle for IFC smoothing curves. - * - * This is used by the IFC importer to determine the tessellation parameter - * for smoothing curves. - * @note The default value is AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE and the - * accepted values are in range [5.0, 120.0]. - * Property type: Float. - */ -#define AI_CONFIG_IMPORT_IFC_SMOOTHING_ANGLE "IMPORT_IFC_SMOOTHING_ANGLE" - -// default value for AI_CONFIG_IMPORT_IFC_SMOOTHING_ANGLE -#if (!defined AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE) -# define AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE 10.0f -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the tessellation for IFC cylindrical shapes. - * - * This is used by the IFC importer to determine the tessellation parameter - * for cylindrical shapes, i.e. the number of segments used to approximate a circle. - * @note The default value is AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION and the - * accepted values are in range [3, 180]. - * Property type: Integer. - */ -#define AI_CONFIG_IMPORT_IFC_CYLINDRICAL_TESSELLATION "IMPORT_IFC_CYLINDRICAL_TESSELLATION" - -// default value for AI_CONFIG_IMPORT_IFC_CYLINDRICAL_TESSELLATION -#if (!defined AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION) -# define AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION 32 -#endif - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the Collada loader will ignore the provided up direction. - * - * If this property is set to true, the up direction provided in the file header will - * be ignored and the file will be loaded as is. - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION "IMPORT_COLLADA_IGNORE_UP_DIRECTION" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the Collada loader should use Collada names as node names. - * - * If this property is set to true, the Collada names will be used as the - * node name. The default is to use the id tag (resp. sid tag, if no id tag is present) - * instead. - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_COLLADA_USE_COLLADA_NAMES "IMPORT_COLLADA_USE_COLLADA_NAMES" - -// ---------- All the Export defines ------------ - -/** @brief Specifies the xfile use double for real values of float - * - * Property type: Bool. Default value: false. - */ - -#define AI_CONFIG_EXPORT_XFILE_64BIT "EXPORT_XFILE_64BIT" - -/** @brief Specifies whether the assimp export shall be able to export point clouds - * - * When this flag is not defined the render data has to contain valid faces. - * Point clouds are only a collection of vertices which have nor spatial organization - * by a face and the validation process will remove them. Enabling this feature will - * switch off the flag and enable the functionality to export pure point clouds. - */ -#define AI_CONFIG_EXPORT_POINT_CLOUDS "EXPORT_POINT_CLOUDS" - -/** - * @brief Specifies a gobal key factor for scale, float value - */ -#define AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY "GLOBAL_SCALE_FACTOR" - -#if (!defined AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT) -# define AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -#define AI_CONFIG_APP_SCALE_KEY "APP_SCALE_FACTOR" - -#if (!defined AI_CONFIG_APP_SCALE_KEY) -# define AI_CONFIG_APP_SCALE_KEY 1.0 -#endif // AI_CONFIG_APP_SCALE_KEY - - -// ---------- All the Build/Compile-time defines ------------ - -/** @brief Specifies if double precision is supported inside assimp - * - * Property type: Bool. Default value: undefined. - */ - -/* #undef ASSIMP_DOUBLE_PRECISION */ - -#endif // !! AI_CONFIG_H_INC diff --git a/thirdparty/assimp/include/assimp/config.h.in b/thirdparty/assimp/include/assimp/config.h.in deleted file mode 100644 index 3a6379bf40..0000000000 --- a/thirdparty/assimp/include/assimp/config.h.in +++ /dev/null @@ -1,1018 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2018, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file config.h - * @brief Defines constants for configurable properties for the library - * - * Typically these properties are set via - * #Assimp::Importer::SetPropertyFloat, - * #Assimp::Importer::SetPropertyInteger or - * #Assimp::Importer::SetPropertyString, - * depending on the data type of a property. All properties have a - * default value. See the doc for the mentioned methods for more details. - * - * <br><br> - * The corresponding functions for use with the plain-c API are: - * #aiSetImportPropertyInteger, - * #aiSetImportPropertyFloat, - * #aiSetImportPropertyString - */ -#pragma once -#ifndef AI_CONFIG_H_INC -#define AI_CONFIG_H_INC - - -// ########################################################################### -// LIBRARY SETTINGS -// General, global settings -// ########################################################################### - -// --------------------------------------------------------------------------- -/** @brief Enables time measurements. - * - * If enabled, measures the time needed for each part of the loading - * process (i.e. IO time, importing, postprocessing, ..) and dumps - * these timings to the DefaultLogger. See the @link perf Performance - * Page@endlink for more information on this topic. - * - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_GLOB_MEASURE_TIME \ - "GLOB_MEASURE_TIME" - - -// --------------------------------------------------------------------------- -/** @brief Global setting to disable generation of skeleton dummy meshes - * - * Skeleton dummy meshes are generated as a visualization aid in cases which - * the input data contains no geometry, but only animation data. - * Property data type: bool. Default value: false - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_IMPORT_NO_SKELETON_MESHES \ - "IMPORT_NO_SKELETON_MESHES" - - - -# if 0 // not implemented yet -// --------------------------------------------------------------------------- -/** @brief Set Assimp's multithreading policy. - * - * This setting is ignored if Assimp was built without boost.thread - * support (ASSIMP_BUILD_NO_THREADING, which is implied by ASSIMP_BUILD_BOOST_WORKAROUND). - * Possible values are: -1 to let Assimp decide what to do, 0 to disable - * multithreading entirely and any number larger than 0 to force a specific - * number of threads. Assimp is always free to ignore this settings, which is - * merely a hint. Usually, the default value (-1) will be fine. However, if - * Assimp is used concurrently from multiple user threads, it might be useful - * to limit each Importer instance to a specific number of cores. - * - * For more information, see the @link threading Threading page@endlink. - * Property type: int, default value: -1. - */ -#define AI_CONFIG_GLOB_MULTITHREADING \ - "GLOB_MULTITHREADING" -#endif - -// ########################################################################### -// POST PROCESSING SETTINGS -// Various stuff to fine-tune the behavior of a specific post processing step. -// ########################################################################### - - -// --------------------------------------------------------------------------- -/** @brief Maximum bone count per mesh for the SplitbyBoneCount step. - * - * Meshes are split until the maximum number of bones is reached. The default - * value is AI_SBBC_DEFAULT_MAX_BONES, which may be altered at - * compile-time. - * Property data type: integer. - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_PP_SBBC_MAX_BONES \ - "PP_SBBC_MAX_BONES" - - -// default limit for bone count -#if (!defined AI_SBBC_DEFAULT_MAX_BONES) -# define AI_SBBC_DEFAULT_MAX_BONES 60 -#endif - - -// --------------------------------------------------------------------------- -/** @brief Specifies the maximum angle that may be between two vertex tangents - * that their tangents and bi-tangents are smoothed. - * - * This applies to the CalcTangentSpace-Step. The angle is specified - * in degrees. The maximum value is 175. - * Property type: float. Default value: 45 degrees - */ -#define AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE \ - "PP_CT_MAX_SMOOTHING_ANGLE" - -// --------------------------------------------------------------------------- -/** @brief Source UV channel for tangent space computation. - * - * The specified channel must exist or an error will be raised. - * Property type: integer. Default value: 0 - */ -// --------------------------------------------------------------------------- -#define AI_CONFIG_PP_CT_TEXTURE_CHANNEL_INDEX \ - "PP_CT_TEXTURE_CHANNEL_INDEX" - -// --------------------------------------------------------------------------- -/** @brief Specifies the maximum angle that may be between two face normals - * at the same vertex position that their are smoothed together. - * - * Sometimes referred to as 'crease angle'. - * This applies to the GenSmoothNormals-Step. The angle is specified - * in degrees, so 180 is PI. The default value is 175 degrees (all vertex - * normals are smoothed). The maximum value is 175, too. Property type: float. - * Warning: setting this option may cause a severe loss of performance. The - * performance is unaffected if the #AI_CONFIG_FAVOUR_SPEED flag is set but - * the output quality may be reduced. - */ -#define AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE \ - "PP_GSN_MAX_SMOOTHING_ANGLE" - - -// --------------------------------------------------------------------------- -/** @brief Sets the colormap (= palette) to be used to decode embedded - * textures in MDL (Quake or 3DGS) files. - * - * This must be a valid path to a file. The file is 768 (256*3) bytes - * large and contains RGB triplets for each of the 256 palette entries. - * The default value is colormap.lmp. If the file is not found, - * a default palette (from Quake 1) is used. - * Property type: string. - */ -#define AI_CONFIG_IMPORT_MDL_COLORMAP \ - "IMPORT_MDL_COLORMAP" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_RemoveRedundantMaterials step to - * keep materials matching a name in a given list. - * - * This is a list of 1 to n strings, ' ' serves as delimiter character. - * Identifiers containing whitespaces must be enclosed in *single* - * quotation marks. For example:<tt> - * "keep-me and_me_to anotherMaterialToBeKept \'name with whitespace\'"</tt>. - * If a material matches on of these names, it will not be modified or - * removed by the postprocessing step nor will other materials be replaced - * by a reference to it. <br> - * This option might be useful if you are using some magic material names - * to pass additional semantics through the content pipeline. This ensures - * they won't be optimized away, but a general optimization is still - * performed for materials not contained in the list. - * Property type: String. Default value: n/a - * @note Linefeeds, tabs or carriage returns are treated as whitespace. - * Material names are case sensitive. - */ -#define AI_CONFIG_PP_RRM_EXCLUDE_LIST \ - "PP_RRM_EXCLUDE_LIST" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to - * keep the scene hierarchy. Meshes are moved to worldspace, but - * no optimization is performed (read: meshes with equal materials are not - * joined. The total number of meshes won't change). - * - * This option could be of use for you if the scene hierarchy contains - * important additional information which you intend to parse. - * For rendering, you can still render all meshes in the scene without - * any transformations. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_PTV_KEEP_HIERARCHY \ - "PP_PTV_KEEP_HIERARCHY" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to normalize - * all vertex components into the [-1,1] range. That is, a bounding box - * for the whole scene is computed, the maximum component is taken and all - * meshes are scaled appropriately (uniformly of course!). - * This might be useful if you don't know the spatial dimension of the input - * data*/ -#define AI_CONFIG_PP_PTV_NORMALIZE \ - "PP_PTV_NORMALIZE" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to use - * a users defined matrix as the scene root node transformation before - * transforming vertices. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_PTV_ADD_ROOT_TRANSFORMATION \ - "PP_PTV_ADD_ROOT_TRANSFORMATION" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_PreTransformVertices step to use - * a users defined matrix as the scene root node transformation before - * transforming vertices. This property correspond to the 'a1' component - * of the transformation matrix. - * Property type: aiMatrix4x4. - */ -#define AI_CONFIG_PP_PTV_ROOT_TRANSFORMATION \ - "PP_PTV_ROOT_TRANSFORMATION" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_FindDegenerates step to - * remove degenerated primitives from the import - immediately. - * - * The default behaviour converts degenerated triangles to lines and - * degenerated lines to points. See the documentation to the - * #aiProcess_FindDegenerates step for a detailed example of the various ways - * to get rid of these lines and points if you don't want them. - * Property type: bool. Default value: false. - */ -#define AI_CONFIG_PP_FD_REMOVE \ - "PP_FD_REMOVE" - -// --------------------------------------------------------------------------- -/** - * @brief Configures the #aiProcess_FindDegenerates to check the area of a - * trinagle to be greates than e-6. If this is not the case the triangle will - * be removed if #AI_CONFIG_PP_FD_REMOVE is set to true. - */ -#define AI_CONFIG_PP_FD_CHECKAREA \ - "PP_FD_CHECKAREA" - -// --------------------------------------------------------------------------- -/** @brief Configures the #aiProcess_OptimizeGraph step to preserve nodes - * matching a name in a given list. - * - * This is a list of 1 to n strings, ' ' serves as delimiter character. - * Identifiers containing whitespaces must be enclosed in *single* - * quotation marks. For example:<tt> - * "keep-me and_me_to anotherNodeToBeKept \'name with whitespace\'"</tt>. - * If a node matches on of these names, it will not be modified or - * removed by the postprocessing step.<br> - * This option might be useful if you are using some magic node names - * to pass additional semantics through the content pipeline. This ensures - * they won't be optimized away, but a general optimization is still - * performed for nodes not contained in the list. - * Property type: String. Default value: n/a - * @note Linefeeds, tabs or carriage returns are treated as whitespace. - * Node names are case sensitive. - */ -#define AI_CONFIG_PP_OG_EXCLUDE_LIST \ - "PP_OG_EXCLUDE_LIST" - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of triangles in a mesh. - * - * This is used by the "SplitLargeMeshes" PostProcess-Step to determine - * whether a mesh must be split or not. - * @note The default value is AI_SLM_DEFAULT_MAX_TRIANGLES - * Property type: integer. - */ -#define AI_CONFIG_PP_SLM_TRIANGLE_LIMIT \ - "PP_SLM_TRIANGLE_LIMIT" - -// default value for AI_CONFIG_PP_SLM_TRIANGLE_LIMIT -#if (!defined AI_SLM_DEFAULT_MAX_TRIANGLES) -# define AI_SLM_DEFAULT_MAX_TRIANGLES 1000000 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of vertices in a mesh. - * - * This is used by the "SplitLargeMeshes" PostProcess-Step to determine - * whether a mesh must be split or not. - * @note The default value is AI_SLM_DEFAULT_MAX_VERTICES - * Property type: integer. - */ -#define AI_CONFIG_PP_SLM_VERTEX_LIMIT \ - "PP_SLM_VERTEX_LIMIT" - -// default value for AI_CONFIG_PP_SLM_VERTEX_LIMIT -#if (!defined AI_SLM_DEFAULT_MAX_VERTICES) -# define AI_SLM_DEFAULT_MAX_VERTICES 1000000 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the maximum number of bones affecting a single vertex - * - * This is used by the #aiProcess_LimitBoneWeights PostProcess-Step. - * @note The default value is AI_LMW_MAX_WEIGHTS - * Property type: integer.*/ -#define AI_CONFIG_PP_LBW_MAX_WEIGHTS \ - "PP_LBW_MAX_WEIGHTS" - -// default value for AI_CONFIG_PP_LBW_MAX_WEIGHTS -#if (!defined AI_LMW_MAX_WEIGHTS) -# define AI_LMW_MAX_WEIGHTS 0x4 -#endif // !! AI_LMW_MAX_WEIGHTS - -// --------------------------------------------------------------------------- -/** @brief Lower the deboning threshold in order to remove more bones. - * - * This is used by the #aiProcess_Debone PostProcess-Step. - * @note The default value is AI_DEBONE_THRESHOLD - * Property type: float.*/ -#define AI_CONFIG_PP_DB_THRESHOLD \ - "PP_DB_THRESHOLD" - -// default value for AI_CONFIG_PP_LBW_MAX_WEIGHTS -#if (!defined AI_DEBONE_THRESHOLD) -# define AI_DEBONE_THRESHOLD 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -// --------------------------------------------------------------------------- -/** @brief Require all bones qualify for deboning before removing any - * - * This is used by the #aiProcess_Debone PostProcess-Step. - * @note The default value is 0 - * Property type: bool.*/ -#define AI_CONFIG_PP_DB_ALL_OR_NONE \ - "PP_DB_ALL_OR_NONE" - -/** @brief Default value for the #AI_CONFIG_PP_ICL_PTCACHE_SIZE property - */ -#ifndef PP_ICL_PTCACHE_SIZE -# define PP_ICL_PTCACHE_SIZE 12 -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the size of the post-transform vertex cache to optimize the - * vertices for. This configures the #aiProcess_ImproveCacheLocality step. - * - * The size is given in vertices. Of course you can't know how the vertex - * format will exactly look like after the import returns, but you can still - * guess what your meshes will probably have. - * @note The default value is #PP_ICL_PTCACHE_SIZE. That results in slight - * performance improvements for most nVidia/AMD cards since 2002. - * Property type: integer. - */ -#define AI_CONFIG_PP_ICL_PTCACHE_SIZE "PP_ICL_PTCACHE_SIZE" - -// --------------------------------------------------------------------------- -/** @brief Enumerates components of the aiScene and aiMesh data structures - * that can be excluded from the import using the #aiProcess_RemoveComponent step. - * - * See the documentation to #aiProcess_RemoveComponent for more details. - */ -enum aiComponent -{ - /** Normal vectors */ -#ifdef SWIG - aiComponent_NORMALS = 0x2, -#else - aiComponent_NORMALS = 0x2u, -#endif - - /** Tangents and bitangents go always together ... */ -#ifdef SWIG - aiComponent_TANGENTS_AND_BITANGENTS = 0x4, -#else - aiComponent_TANGENTS_AND_BITANGENTS = 0x4u, -#endif - - /** ALL color sets - * Use aiComponent_COLORn(N) to specify the N'th set */ - aiComponent_COLORS = 0x8, - - /** ALL texture UV sets - * aiComponent_TEXCOORDn(N) to specify the N'th set */ - aiComponent_TEXCOORDS = 0x10, - - /** Removes all bone weights from all meshes. - * The scenegraph nodes corresponding to the bones are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_BONEWEIGHTS = 0x20, - - /** Removes all node animations (aiScene::mAnimations). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_ANIMATIONS = 0x40, - - /** Removes all embedded textures (aiScene::mTextures) */ - aiComponent_TEXTURES = 0x80, - - /** Removes all light sources (aiScene::mLights). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_LIGHTS = 0x100, - - /** Removes all cameras (aiScene::mCameras). - * The corresponding scenegraph nodes are NOT removed. - * use the #aiProcess_OptimizeGraph step to do this */ - aiComponent_CAMERAS = 0x200, - - /** Removes all meshes (aiScene::mMeshes). */ - aiComponent_MESHES = 0x400, - - /** Removes all materials. One default material will - * be generated, so aiScene::mNumMaterials will be 1. */ - aiComponent_MATERIALS = 0x800, - - - /** This value is not used. It is just there to force the - * compiler to map this enum to a 32 Bit integer. */ -#ifndef SWIG - _aiComponent_Force32Bit = 0x9fffffff -#endif -}; - -// Remove a specific color channel 'n' -#define aiComponent_COLORSn(n) (1u << (n+20u)) - -// Remove a specific UV channel 'n' -#define aiComponent_TEXCOORDSn(n) (1u << (n+25u)) - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_RemoveComponent step: - * Specifies the parts of the data structure to be removed. - * - * See the documentation to this step for further details. The property - * is expected to be an integer, a bitwise combination of the - * #aiComponent flags defined above in this header. The default - * value is 0. Important: if no valid mesh is remaining after the - * step has been executed (e.g you thought it was funny to specify ALL - * of the flags defined above) the import FAILS. Mainly because there is - * no data to work on anymore ... - */ -#define AI_CONFIG_PP_RVC_FLAGS \ - "PP_RVC_FLAGS" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_SortByPType step: - * Specifies which primitive types are removed by the step. - * - * This is a bitwise combination of the aiPrimitiveType flags. - * Specifying all of them is illegal, of course. A typical use would - * be to exclude all line and point meshes from the import. This - * is an integer property, its default value is 0. - */ -#define AI_CONFIG_PP_SBP_REMOVE \ - "PP_SBP_REMOVE" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_FindInvalidData step: - * Specifies the floating-point accuracy for animation values. The step - * checks for animation tracks where all frame values are absolutely equal - * and removes them. This tweakable controls the epsilon for floating-point - * comparisons - two keys are considered equal if the invariant - * abs(n0-n1)>epsilon holds true for all vector respectively quaternion - * components. The default value is 0.f - comparisons are exact then. - */ -#define AI_CONFIG_PP_FID_ANIM_ACCURACY \ - "PP_FID_ANIM_ACCURACY" - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_FindInvalidData step: - * Set to true to ignore texture coordinates. This may be useful if you have - * to assign different kind of textures like one for the summer or one for the winter. - */ -#define AI_CONFIG_PP_FID_IGNORE_TEXTURECOORDS \ - "PP_FID_IGNORE_TEXTURECOORDS" - -// TransformUVCoords evaluates UV scalings -#define AI_UVTRAFO_SCALING 0x1 - -// TransformUVCoords evaluates UV rotations -#define AI_UVTRAFO_ROTATION 0x2 - -// TransformUVCoords evaluates UV translation -#define AI_UVTRAFO_TRANSLATION 0x4 - -// Everything baked together -> default value -#define AI_UVTRAFO_ALL (AI_UVTRAFO_SCALING | AI_UVTRAFO_ROTATION | AI_UVTRAFO_TRANSLATION) - -// --------------------------------------------------------------------------- -/** @brief Input parameter to the #aiProcess_TransformUVCoords step: - * Specifies which UV transformations are evaluated. - * - * This is a bitwise combination of the AI_UVTRAFO_XXX flags (integer - * property, of course). By default all transformations are enabled - * (AI_UVTRAFO_ALL). - */ -#define AI_CONFIG_PP_TUV_EVALUATE \ - "PP_TUV_EVALUATE" - -// --------------------------------------------------------------------------- -/** @brief A hint to assimp to favour speed against import quality. - * - * Enabling this option may result in faster loading, but it needn't. - * It represents just a hint to loaders and post-processing steps to use - * faster code paths, if possible. - * This property is expected to be an integer, != 0 stands for true. - * The default value is 0. - */ -#define AI_CONFIG_FAVOUR_SPEED \ - "FAVOUR_SPEED" - - -// ########################################################################### -// IMPORTER SETTINGS -// Various stuff to fine-tune the behaviour of specific importer plugins. -// ########################################################################### - - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will merge all geometry layers present - * in the source file or take only the first. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ALL_GEOMETRY_LAYERS \ - "IMPORT_FBX_READ_ALL_GEOMETRY_LAYERS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read all materials present in the - * source file or take only the referenced materials. - * - * This is void unless IMPORT_FBX_READ_MATERIALS=1. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ALL_MATERIALS \ - "IMPORT_FBX_READ_ALL_MATERIALS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read materials. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_MATERIALS \ - "IMPORT_FBX_READ_MATERIALS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read embedded textures. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_TEXTURES \ - "IMPORT_FBX_READ_TEXTURES" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read cameras. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_CAMERAS \ - "IMPORT_FBX_READ_CAMERAS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read light sources. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_LIGHTS \ - "IMPORT_FBX_READ_LIGHTS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will read animations. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_READ_ANIMATIONS \ - "IMPORT_FBX_READ_ANIMATIONS" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will act in strict mode in which only - * FBX 2013 is supported and any other sub formats are rejected. FBX 2013 - * is the primary target for the importer, so this format is best - * supported and well-tested. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_STRICT_MODE \ - "IMPORT_FBX_STRICT_MODE" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will preserve pivot points for - * transformations (as extra nodes). If set to false, pivots and offsets - * will be evaluated whenever possible. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_PRESERVE_PIVOTS \ - "IMPORT_FBX_PRESERVE_PIVOTS" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the importer will drop empty animation curves or - * animation curves which match the bind pose transformation over their - * entire defined range. - * - * The default value is true (1) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_OPTIMIZE_EMPTY_ANIMATION_CURVES \ - "IMPORT_FBX_OPTIMIZE_EMPTY_ANIMATION_CURVES" - -// --------------------------------------------------------------------------- -/** @brief Set whether the fbx importer will use the legacy embedded texture naming. - * - * The default value is false (0) - * Property type: bool - */ -#define AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING \ - "AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING" - -// --------------------------------------------------------------------------- -/** @brief Set wether the importer shall not remove empty bones. - * - * Empty bone are often used to define connections for other models. - */ -#define AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES \ - "AI_CONFIG_IMPORT_REMOVE_EMPTY_BONES" - - -// --------------------------------------------------------------------------- -/** @brief Set wether the FBX importer shall convert the unit from cm to m. - */ -#define AI_CONFIG_FBX_CONVERT_TO_M \ - "AI_CONFIG_FBX_CONVERT_TO_M" - -// --------------------------------------------------------------------------- -/** @brief Set the vertex animation keyframe to be imported - * - * ASSIMP does not support vertex keyframes (only bone animation is supported). - * The library reads only one frame of models with vertex animations. - * By default this is the first frame. - * \note The default value is 0. This option applies to all importers. - * However, it is also possible to override the global setting - * for a specific loader. You can use the AI_CONFIG_IMPORT_XXX_KEYFRAME - * options (where XXX is a placeholder for the file format for which you - * want to override the global setting). - * Property type: integer. - */ -#define AI_CONFIG_IMPORT_GLOBAL_KEYFRAME "IMPORT_GLOBAL_KEYFRAME" - -#define AI_CONFIG_IMPORT_MD3_KEYFRAME "IMPORT_MD3_KEYFRAME" -#define AI_CONFIG_IMPORT_MD2_KEYFRAME "IMPORT_MD2_KEYFRAME" -#define AI_CONFIG_IMPORT_MDL_KEYFRAME "IMPORT_MDL_KEYFRAME" -#define AI_CONFIG_IMPORT_MDC_KEYFRAME "IMPORT_MDC_KEYFRAME" -#define AI_CONFIG_IMPORT_SMD_KEYFRAME "IMPORT_SMD_KEYFRAME" -#define AI_CONFIG_IMPORT_UNREAL_KEYFRAME "IMPORT_UNREAL_KEYFRAME" - -// --------------------------------------------------------------------------- -/** Smd load multiple animations - * - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_SMD_LOAD_ANIMATION_LIST "IMPORT_SMD_LOAD_ANIMATION_LIST" - -// --------------------------------------------------------------------------- -/** @brief Configures the AC loader to collect all surfaces which have the - * "Backface cull" flag set in separate meshes. - * - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_AC_SEPARATE_BFCULL \ - "IMPORT_AC_SEPARATE_BFCULL" - -// --------------------------------------------------------------------------- -/** @brief Configures whether the AC loader evaluates subdivision surfaces ( - * indicated by the presence of the 'subdiv' attribute in the file). By - * default, Assimp performs the subdivision using the standard - * Catmull-Clark algorithm - * - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_AC_EVAL_SUBDIVISION \ - "IMPORT_AC_EVAL_SUBDIVISION" - -// --------------------------------------------------------------------------- -/** @brief Configures the UNREAL 3D loader to separate faces with different - * surface flags (e.g. two-sided vs. single-sided). - * - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_UNREAL_HANDLE_FLAGS \ - "UNREAL_HANDLE_FLAGS" - -// --------------------------------------------------------------------------- -/** @brief Configures the terragen import plugin to compute uv's for - * terrains, if not given. Furthermore a default texture is assigned. - * - * UV coordinates for terrains are so simple to compute that you'll usually - * want to compute them on your own, if you need them. This option is intended - * for model viewers which want to offer an easy way to apply textures to - * terrains. - * * Property type: bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_TER_MAKE_UVS \ - "IMPORT_TER_MAKE_UVS" - -// --------------------------------------------------------------------------- -/** @brief Configures the ASE loader to always reconstruct normal vectors - * basing on the smoothing groups loaded from the file. - * - * Some ASE files have carry invalid normals, other don't. - * * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_ASE_RECONSTRUCT_NORMALS \ - "IMPORT_ASE_RECONSTRUCT_NORMALS" - -// --------------------------------------------------------------------------- -/** @brief Configures the M3D loader to detect and process multi-part - * Quake player models. - * - * These models usually consist of 3 files, lower.md3, upper.md3 and - * head.md3. If this property is set to true, Assimp will try to load and - * combine all three files if one of them is loaded. - * Property type: bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_MD3_HANDLE_MULTIPART \ - "IMPORT_MD3_HANDLE_MULTIPART" - -// --------------------------------------------------------------------------- -/** @brief Tells the MD3 loader which skin files to load. - * - * When loading MD3 files, Assimp checks whether a file - * [md3_file_name]_[skin_name].skin is existing. These files are used by - * Quake III to be able to assign different skins (e.g. red and blue team) - * to models. 'default', 'red', 'blue' are typical skin names. - * Property type: String. Default value: "default". - */ -#define AI_CONFIG_IMPORT_MD3_SKIN_NAME \ - "IMPORT_MD3_SKIN_NAME" - -// --------------------------------------------------------------------------- -/** @brief Specify the Quake 3 shader file to be used for a particular - * MD3 file. This can also be a search path. - * - * By default Assimp's behaviour is as follows: If a MD3 file - * <tt>any_path/models/any_q3_subdir/model_name/file_name.md3</tt> is - * loaded, the library tries to locate the corresponding shader file in - * <tt>any_path/scripts/model_name.shader</tt>. This property overrides this - * behaviour. It can either specify a full path to the shader to be loaded - * or alternatively the path (relative or absolute) to the directory where - * the shaders for all MD3s to be loaded reside. Assimp attempts to open - * <tt>IMPORT_MD3_SHADER_SRC/model_name.shader</tt> first, <tt>IMPORT_MD3_SHADER_SRC/file_name.shader</tt> - * is the fallback file. Note that IMPORT_MD3_SHADER_SRC should have a terminal (back)slash. - * Property type: String. Default value: n/a. - */ -#define AI_CONFIG_IMPORT_MD3_SHADER_SRC \ - "IMPORT_MD3_SHADER_SRC" - -// --------------------------------------------------------------------------- -/** @brief Configures the LWO loader to load just one layer from the model. - * - * LWO files consist of layers and in some cases it could be useful to load - * only one of them. This property can be either a string - which specifies - * the name of the layer - or an integer - the index of the layer. If the - * property is not set the whole LWO model is loaded. Loading fails if the - * requested layer is not available. The layer index is zero-based and the - * layer name may not be empty.<br> - * Property type: Integer. Default value: all layers are loaded. - */ -#define AI_CONFIG_IMPORT_LWO_ONE_LAYER_ONLY \ - "IMPORT_LWO_ONE_LAYER_ONLY" - -// --------------------------------------------------------------------------- -/** @brief Configures the MD5 loader to not load the MD5ANIM file for - * a MD5MESH file automatically. - * - * The default strategy is to look for a file with the same name but the - * MD5ANIM extension in the same directory. If it is found, it is loaded - * and combined with the MD5MESH file. This configuration option can be - * used to disable this behaviour. - * - * * Property type: bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_MD5_NO_ANIM_AUTOLOAD \ - "IMPORT_MD5_NO_ANIM_AUTOLOAD" - -// --------------------------------------------------------------------------- -/** @brief Defines the begin of the time range for which the LWS loader - * evaluates animations and computes aiNodeAnim's. - * - * Assimp provides full conversion of LightWave's envelope system, including - * pre and post conditions. The loader computes linearly subsampled animation - * chanels with the frame rate given in the LWS file. This property defines - * the start time. Note: animation channels are only generated if a node - * has at least one envelope with more tan one key assigned. This property. - * is given in frames, '0' is the first frame. By default, if this property - * is not set, the importer takes the animation start from the input LWS - * file ('FirstFrame' line)<br> - * Property type: Integer. Default value: taken from file. - * - * @see AI_CONFIG_IMPORT_LWS_ANIM_END - end of the imported time range - */ -#define AI_CONFIG_IMPORT_LWS_ANIM_START \ - "IMPORT_LWS_ANIM_START" -#define AI_CONFIG_IMPORT_LWS_ANIM_END \ - "IMPORT_LWS_ANIM_END" - -// --------------------------------------------------------------------------- -/** @brief Defines the output frame rate of the IRR loader. - * - * IRR animations are difficult to convert for Assimp and there will - * always be a loss of quality. This setting defines how many keys per second - * are returned by the converter.<br> - * Property type: integer. Default value: 100 - */ -#define AI_CONFIG_IMPORT_IRR_ANIM_FPS \ - "IMPORT_IRR_ANIM_FPS" - -// --------------------------------------------------------------------------- -/** @brief Ogre Importer will try to find referenced materials from this file. - * - * Ogre meshes reference with material names, this does not tell Assimp the file - * where it is located in. Assimp will try to find the source file in the following - * order: <material-name>.material, <mesh-filename-base>.material and - * lastly the material name defined by this config property. - * <br> - * Property type: String. Default value: Scene.material. - */ -#define AI_CONFIG_IMPORT_OGRE_MATERIAL_FILE \ - "IMPORT_OGRE_MATERIAL_FILE" - -// --------------------------------------------------------------------------- -/** @brief Ogre Importer detect the texture usage from its filename. - * - * Ogre material texture units do not define texture type, the textures usage - * depends on the used shader or Ogre's fixed pipeline. If this config property - * is true Assimp will try to detect the type from the textures filename postfix: - * _n, _nrm, _nrml, _normal, _normals and _normalmap for normal map, _s, _spec, - * _specular and _specularmap for specular map, _l, _light, _lightmap, _occ - * and _occlusion for light map, _disp and _displacement for displacement map. - * The matching is case insensitive. Post fix is taken between the last - * underscore and the last period. - * Default behavior is to detect type from lower cased texture unit name by - * matching against: normalmap, specularmap, lightmap and displacementmap. - * For both cases if no match is found aiTextureType_DIFFUSE is used. - * <br> - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_OGRE_TEXTURETYPE_FROM_FILENAME \ - "IMPORT_OGRE_TEXTURETYPE_FROM_FILENAME" - - /** @brief Specifies whether the Android JNI asset extraction is supported. - * - * Turn on this option if you want to manage assets in native - * Android application without having to keep the internal directory and asset - * manager pointer. - */ - #define AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT "AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the IFC loader skips over IfcSpace elements. - * - * IfcSpace elements (and their geometric representations) are used to - * represent, well, free space in a building storey.<br> - * Property type: Bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_IFC_SKIP_SPACE_REPRESENTATIONS "IMPORT_IFC_SKIP_SPACE_REPRESENTATIONS" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the IFC loader will use its own, custom triangulation - * algorithm to triangulate wall and floor meshes. - * - * If this property is set to false, walls will be either triangulated by - * #aiProcess_Triangulate or will be passed through as huge polygons with - * faked holes (i.e. holes that are connected with the outer boundary using - * a dummy edge). It is highly recommended to set this property to true - * if you want triangulated data because #aiProcess_Triangulate is known to - * have problems with the kind of polygons that the IFC loader spits out for - * complicated meshes. - * Property type: Bool. Default value: true. - */ -#define AI_CONFIG_IMPORT_IFC_CUSTOM_TRIANGULATION "IMPORT_IFC_CUSTOM_TRIANGULATION" - -// --------------------------------------------------------------------------- -/** @brief Set the tessellation conic angle for IFC smoothing curves. - * - * This is used by the IFC importer to determine the tessellation parameter - * for smoothing curves. - * @note The default value is AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE and the - * accepted values are in range [5.0, 120.0]. - * Property type: Float. - */ -#define AI_CONFIG_IMPORT_IFC_SMOOTHING_ANGLE "IMPORT_IFC_SMOOTHING_ANGLE" - -// default value for AI_CONFIG_IMPORT_IFC_SMOOTHING_ANGLE -#if (!defined AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE) -# define AI_IMPORT_IFC_DEFAULT_SMOOTHING_ANGLE 10.0f -#endif - -// --------------------------------------------------------------------------- -/** @brief Set the tessellation for IFC cylindrical shapes. - * - * This is used by the IFC importer to determine the tessellation parameter - * for cylindrical shapes, i.e. the number of segments used to approximate a circle. - * @note The default value is AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION and the - * accepted values are in range [3, 180]. - * Property type: Integer. - */ -#define AI_CONFIG_IMPORT_IFC_CYLINDRICAL_TESSELLATION "IMPORT_IFC_CYLINDRICAL_TESSELLATION" - -// default value for AI_CONFIG_IMPORT_IFC_CYLINDRICAL_TESSELLATION -#if (!defined AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION) -# define AI_IMPORT_IFC_DEFAULT_CYLINDRICAL_TESSELLATION 32 -#endif - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the Collada loader will ignore the provided up direction. - * - * If this property is set to true, the up direction provided in the file header will - * be ignored and the file will be loaded as is. - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_COLLADA_IGNORE_UP_DIRECTION "IMPORT_COLLADA_IGNORE_UP_DIRECTION" - -// --------------------------------------------------------------------------- -/** @brief Specifies whether the Collada loader should use Collada names as node names. - * - * If this property is set to true, the Collada names will be used as the - * node name. The default is to use the id tag (resp. sid tag, if no id tag is present) - * instead. - * Property type: Bool. Default value: false. - */ -#define AI_CONFIG_IMPORT_COLLADA_USE_COLLADA_NAMES "IMPORT_COLLADA_USE_COLLADA_NAMES" - -// ---------- All the Export defines ------------ - -/** @brief Specifies the xfile use double for real values of float - * - * Property type: Bool. Default value: false. - */ - -#define AI_CONFIG_EXPORT_XFILE_64BIT "EXPORT_XFILE_64BIT" - -/** @brief Specifies whether the assimp export shall be able to export point clouds - * - * When this flag is not defined the render data has to contain valid faces. - * Point clouds are only a collection of vertices which have nor spatial organization - * by a face and the validation process will remove them. Enabling this feature will - * switch off the flag and enable the functionality to export pure point clouds. - */ -#define AI_CONFIG_EXPORT_POINT_CLOUDS "EXPORT_POINT_CLOUDS" - -/** - * @brief Specifies a gobal key factor for scale, float value - */ -#define AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY "GLOBAL_SCALE_FACTOR" - -#if (!defined AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT) -# define AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT 1.0f -#endif // !! AI_DEBONE_THRESHOLD - -#define AI_CONFIG_APP_SCALE_KEY "APP_SCALE_FACTOR" - -#if (!defined AI_CONFIG_APP_SCALE_KEY) -# define AI_CONFIG_APP_SCALE_KEY 1.0 -#endif // AI_CONFIG_APP_SCALE_KEY - - -// ---------- All the Build/Compile-time defines ------------ - -/** @brief Specifies if double precision is supported inside assimp - * - * Property type: Bool. Default value: undefined. - */ - -#cmakedefine ASSIMP_DOUBLE_PRECISION 1 - -#endif // !! AI_CONFIG_H_INC diff --git a/thirdparty/assimp/include/assimp/defs.h b/thirdparty/assimp/include/assimp/defs.h deleted file mode 100644 index 6f2f8ae88b..0000000000 --- a/thirdparty/assimp/include/assimp/defs.h +++ /dev/null @@ -1,327 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file defs.h - * @brief Assimp build configuration setup. See the notes in the comment - * blocks to find out how to customize _your_ Assimp build. - */ - -#pragma once -#ifndef AI_DEFINES_H_INC -#define AI_DEFINES_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/config.h> - -////////////////////////////////////////////////////////////////////////// -/* Define ASSIMP_BUILD_NO_XX_IMPORTER to disable a specific - * file format loader. The loader is be excluded from the - * build in this case. 'XX' stands for the most common file - * extension of the file format. E.g.: - * ASSIMP_BUILD_NO_X_IMPORTER disables the X loader. - * - * If you're unsure about that, take a look at the implementation of the - * import plugin you wish to disable. You'll find the right define in the - * first lines of the corresponding unit. - * - * Other (mixed) configuration switches are listed here: - * ASSIMP_BUILD_NO_COMPRESSED_X - * - Disable support for compressed X files (zip) - * ASSIMP_BUILD_NO_COMPRESSED_BLEND - * - Disable support for compressed Blender files (zip) - * ASSIMP_BUILD_NO_COMPRESSED_IFC - * - Disable support for IFCZIP files (unzip) - */ -////////////////////////////////////////////////////////////////////////// - -#ifndef ASSIMP_BUILD_NO_COMPRESSED_X -# define ASSIMP_BUILD_NEED_Z_INFLATE -#endif - -#ifndef ASSIMP_BUILD_NO_COMPRESSED_BLEND -# define ASSIMP_BUILD_NEED_Z_INFLATE -#endif - -#ifndef ASSIMP_BUILD_NO_COMPRESSED_IFC -# define ASSIMP_BUILD_NEED_Z_INFLATE -# define ASSIMP_BUILD_NEED_UNZIP -#endif - -#ifndef ASSIMP_BUILD_NO_Q3BSP_IMPORTER -# define ASSIMP_BUILD_NEED_Z_INFLATE -# define ASSIMP_BUILD_NEED_UNZIP -#endif - -////////////////////////////////////////////////////////////////////////// -/* Define ASSIMP_BUILD_NO_XX_PROCESS to disable a specific - * post processing step. This is the current list of process names ('XX'): - * CALCTANGENTS - * JOINVERTICES - * TRIANGULATE - * DROPFACENORMALS - * GENFACENORMALS - * GENVERTEXNORMALS - * REMOVEVC - * SPLITLARGEMESHES - * PRETRANSFORMVERTICES - * LIMITBONEWEIGHTS - * VALIDATEDS - * IMPROVECACHELOCALITY - * FIXINFACINGNORMALS - * REMOVE_REDUNDANTMATERIALS - * OPTIMIZEGRAPH - * SORTBYPTYPE - * FINDINVALIDDATA - * TRANSFORMTEXCOORDS - * GENUVCOORDS - * ENTITYMESHBUILDER - * EMBEDTEXTURES - * MAKELEFTHANDED - * FLIPUVS - * FLIPWINDINGORDER - * OPTIMIZEMESHES - * OPTIMIZEANIMS - * OPTIMIZEGRAPH - * GENENTITYMESHES - * FIXTEXTUREPATHS - * GENBOUNDINGBOXES */ -////////////////////////////////////////////////////////////////////////// - -#ifdef _WIN32 -# undef ASSIMP_API - ////////////////////////////////////////////////////////////////////////// - /* Define 'ASSIMP_BUILD_DLL_EXPORT' to build a DLL of the library */ - ////////////////////////////////////////////////////////////////////////// -# ifdef ASSIMP_BUILD_DLL_EXPORT -# define ASSIMP_API __declspec(dllexport) -# define ASSIMP_API_WINONLY __declspec(dllexport) - - ////////////////////////////////////////////////////////////////////////// - /* Define 'ASSIMP_DLL' before including Assimp to link to ASSIMP in - * an external DLL under Windows. Default is static linkage. */ - ////////////////////////////////////////////////////////////////////////// -# elif (defined ASSIMP_DLL) -# define ASSIMP_API __declspec(dllimport) -# define ASSIMP_API_WINONLY __declspec(dllimport) -# else -# define ASSIMP_API -# define ASSIMP_API_WINONLY -# endif -#elif defined(SWIG) - - /* Do nothing, the relevant defines are all in AssimpSwigPort.i */ - -#else -# define ASSIMP_API __attribute__ ((visibility("default"))) -# define ASSIMP_API_WINONLY -#endif - -#ifdef _MSC_VER -# ifdef ASSIMP_BUILD_DLL_EXPORT -# pragma warning (disable : 4251) -# endif - /* Force the compiler to inline a function, if possible - */ -# define AI_FORCE_INLINE __forceinline - - /* Tells the compiler that a function never returns. Used in code analysis - * to skip dead paths (e.g. after an assertion evaluated to false). */ -# define AI_WONT_RETURN __declspec(noreturn) -#elif defined(SWIG) - - /* Do nothing, the relevant defines are all in AssimpSwigPort.i */ - -#else -# define AI_WONT_RETURN -# define AI_FORCE_INLINE inline -#endif // (defined _MSC_VER) - -#ifdef __GNUC__ -# define AI_WONT_RETURN_SUFFIX __attribute__((noreturn)) -#else -# define AI_WONT_RETURN_SUFFIX -#endif // (defined __clang__) - -#ifdef __cplusplus - /* No explicit 'struct' and 'enum' tags for C++, this keeps showing up - * in doxydocs. - */ -# define C_STRUCT -# define C_ENUM -#else - ////////////////////////////////////////////////////////////////////////// - /* To build the documentation, make sure ASSIMP_DOXYGEN_BUILD - * is defined by Doxygen's preprocessor. The corresponding - * entries in the DOXYFILE are: */ - ////////////////////////////////////////////////////////////////////////// -#if 0 - ENABLE_PREPROCESSING = YES - MACRO_EXPANSION = YES - EXPAND_ONLY_PREDEF = YES - SEARCH_INCLUDES = YES - INCLUDE_PATH = - INCLUDE_FILE_PATTERNS = - PREDEFINED = ASSIMP_DOXYGEN_BUILD=1 - EXPAND_AS_DEFINED = C_STRUCT C_ENUM - SKIP_FUNCTION_MACROS = YES -#endif - ////////////////////////////////////////////////////////////////////////// - /* Doxygen gets confused if we use c-struct typedefs to avoid - * the explicit 'struct' notation. This trick here has the same - * effect as the TYPEDEF_HIDES_STRUCT option, but we don't need - * to typedef all structs/enums. */ - ////////////////////////////////////////////////////////////////////////// -# if (defined ASSIMP_DOXYGEN_BUILD) -# define C_STRUCT -# define C_ENUM -# else -# define C_STRUCT struct -# define C_ENUM enum -# endif -#endif - -#if (defined(__BORLANDC__) || defined (__BCPLUSPLUS__)) -# error Currently, Borland is unsupported. Feel free to port Assimp. -#endif - - - ////////////////////////////////////////////////////////////////////////// - /* Define ASSIMP_BUILD_SINGLETHREADED to compile assimp - * without threading support. The library doesn't utilize - * threads then and is itself not threadsafe. */ - ////////////////////////////////////////////////////////////////////////// -#ifndef ASSIMP_BUILD_SINGLETHREADED -# define ASSIMP_BUILD_SINGLETHREADED -#endif - -#if defined(_DEBUG) || ! defined(NDEBUG) -# define ASSIMP_BUILD_DEBUG -#endif - - ////////////////////////////////////////////////////////////////////////// - /* Define ASSIMP_DOUBLE_PRECISION to compile assimp - * with double precision support (64-bit). */ - ////////////////////////////////////////////////////////////////////////// - -#ifdef ASSIMP_DOUBLE_PRECISION - typedef double ai_real; - typedef signed long long int ai_int; - typedef unsigned long long int ai_uint; -#ifndef ASSIMP_AI_REAL_TEXT_PRECISION -#define ASSIMP_AI_REAL_TEXT_PRECISION 16 -#endif // ASSIMP_AI_REAL_TEXT_PRECISION -#else // ASSIMP_DOUBLE_PRECISION - typedef float ai_real; - typedef signed int ai_int; - typedef unsigned int ai_uint; -#ifndef ASSIMP_AI_REAL_TEXT_PRECISION -#define ASSIMP_AI_REAL_TEXT_PRECISION 8 -#endif // ASSIMP_AI_REAL_TEXT_PRECISION -#endif // ASSIMP_DOUBLE_PRECISION - - ////////////////////////////////////////////////////////////////////////// - /* Useful constants */ - ////////////////////////////////////////////////////////////////////////// - -/* This is PI. Hi PI. */ -#define AI_MATH_PI (3.141592653589793238462643383279 ) -#define AI_MATH_TWO_PI (AI_MATH_PI * 2.0) -#define AI_MATH_HALF_PI (AI_MATH_PI * 0.5) - -/* And this is to avoid endless casts to float */ -#define AI_MATH_PI_F (3.1415926538f) -#define AI_MATH_TWO_PI_F (AI_MATH_PI_F * 2.0f) -#define AI_MATH_HALF_PI_F (AI_MATH_PI_F * 0.5f) - -/* Tiny macro to convert from radians to degrees and back */ -#define AI_DEG_TO_RAD(x) ((x)*(ai_real)0.0174532925) -#define AI_RAD_TO_DEG(x) ((x)*(ai_real)57.2957795) - -/* Numerical limits */ -static const ai_real ai_epsilon = (ai_real) 0.00001; - -/* Support for big-endian builds */ -#if defined(__BYTE_ORDER__) -# if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# if !defined(__BIG_ENDIAN__) -# define __BIG_ENDIAN__ -# endif -# else /* little endian */ -# if defined (__BIG_ENDIAN__) -# undef __BIG_ENDIAN__ -# endif -# endif -#endif -#if defined(__BIG_ENDIAN__) -# define AI_BUILD_BIG_ENDIAN -#endif - - -/** - * To avoid running out of memory - * This can be adjusted for specific use cases - * It's NOT a total limit, just a limit for individual allocations - */ -#define AI_MAX_ALLOC(type) ((256U * 1024 * 1024) / sizeof(type)) - -#ifndef _MSC_VER -# define AI_NO_EXCEPT noexcept -#else -# if (_MSC_VER >= 1915 ) -# define AI_NO_EXCEPT noexcept -# else -# define AI_NO_EXCEPT -# endif -#endif // _MSC_VER - -/** - * Helper macro to set a pointer to NULL in debug builds - */ -#if (defined ASSIMP_BUILD_DEBUG) -# define AI_DEBUG_INVALIDATE_PTR(x) x = NULL; -#else -# define AI_DEBUG_INVALIDATE_PTR(x) -#endif - -#endif // !! AI_DEFINES_H_INC diff --git a/thirdparty/assimp/include/assimp/fast_atof.h b/thirdparty/assimp/include/assimp/fast_atof.h deleted file mode 100644 index 6e9a1bba7a..0000000000 --- a/thirdparty/assimp/include/assimp/fast_atof.h +++ /dev/null @@ -1,377 +0,0 @@ -#pragma once - -// Copyright (C) 2002-2007 Nikolaus Gebhardt -// This file is part of the "Irrlicht Engine" and the "irrXML" project. -// For conditions of distribution and use, see copyright notice in irrlicht.h and irrXML.h - -// ------------------------------------------------------------------------------------ -// Original description: (Schrompf) -// Adapted to the ASSIMP library because the builtin atof indeed takes AGES to parse a -// float inside a large string. Before parsing, it does a strlen on the given point. -// Changes: -// 22nd October 08 (Aramis_acg): Added temporary cast to double, added strtoul10_64 -// to ensure long numbers are handled correctly -// ------------------------------------------------------------------------------------ - -#pragma once -#ifndef FAST_A_TO_F_H_INCLUDED -#define FAST_A_TO_F_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <cmath> -#include <limits> -#include <stdint.h> -#include <stdexcept> -#include <assimp/defs.h> - -#include "StringComparison.h" -#include <assimp/DefaultLogger.hpp> - -#ifdef _MSC_VER -# include <stdint.h> -#else -# include <assimp/Compiler/pstdint.h> -#endif - -namespace Assimp { - -const double fast_atof_table[16] = { // we write [16] here instead of [] to work around a swig bug - 0.0, - 0.1, - 0.01, - 0.001, - 0.0001, - 0.00001, - 0.000001, - 0.0000001, - 0.00000001, - 0.000000001, - 0.0000000001, - 0.00000000001, - 0.000000000001, - 0.0000000000001, - 0.00000000000001, - 0.000000000000001 -}; - - -// ------------------------------------------------------------------------------------ -// Convert a string in decimal format to a number -// ------------------------------------------------------------------------------------ -inline -unsigned int strtoul10( const char* in, const char** out=0) { - unsigned int value = 0; - - for ( ;; ) { - if ( *in < '0' || *in > '9' ) { - break; - } - - value = ( value * 10 ) + ( *in - '0' ); - ++in; - } - if ( out ) { - *out = in; - } - return value; -} - -// ------------------------------------------------------------------------------------ -// Convert a string in octal format to a number -// ------------------------------------------------------------------------------------ -inline -unsigned int strtoul8( const char* in, const char** out=0) { - unsigned int value( 0 ); - for ( ;; ) { - if ( *in < '0' || *in > '7' ) { - break; - } - - value = ( value << 3 ) + ( *in - '0' ); - ++in; - } - if ( out ) { - *out = in; - } - return value; -} - -// ------------------------------------------------------------------------------------ -// Convert a string in hex format to a number -// ------------------------------------------------------------------------------------ -inline -unsigned int strtoul16( const char* in, const char** out=0) { - unsigned int value( 0 ); - for ( ;; ) { - if ( *in >= '0' && *in <= '9' ) { - value = ( value << 4u ) + ( *in - '0' ); - } else if (*in >= 'A' && *in <= 'F') { - value = ( value << 4u ) + ( *in - 'A' ) + 10; - } else if (*in >= 'a' && *in <= 'f') { - value = ( value << 4u ) + ( *in - 'a' ) + 10; - } else { - break; - } - ++in; - } - if ( out ) { - *out = in; - } - return value; -} - -// ------------------------------------------------------------------------------------ -// Convert just one hex digit -// Return value is UINT_MAX if the input character is not a hex digit. -// ------------------------------------------------------------------------------------ -inline -unsigned int HexDigitToDecimal(char in) { - unsigned int out( UINT_MAX ); - if ( in >= '0' && in <= '9' ) { - out = in - '0'; - } else if ( in >= 'a' && in <= 'f' ) { - out = 10u + in - 'a'; - } else if ( in >= 'A' && in <= 'F' ) { - out = 10u + in - 'A'; - } - - // return value is UINT_MAX if the input is not a hex digit - return out; -} - -// ------------------------------------------------------------------------------------ -// Convert a hex-encoded octet (2 characters, i.e. df or 1a). -// ------------------------------------------------------------------------------------ -inline -uint8_t HexOctetToDecimal(const char* in) { - return ((uint8_t)HexDigitToDecimal(in[0])<<4)+(uint8_t)HexDigitToDecimal(in[1]); -} - -// ------------------------------------------------------------------------------------ -// signed variant of strtoul10 -// ------------------------------------------------------------------------------------ -inline -int strtol10( const char* in, const char** out=0) { - bool inv = (*in=='-'); - if ( inv || *in == '+' ) { - ++in; - } - - int value = strtoul10(in,out); - if (inv) { - value = -value; - } - return value; -} - -// ------------------------------------------------------------------------------------ -// Parse a C++-like integer literal - hex and oct prefixes. -// 0xNNNN - hex -// 0NNN - oct -// NNN - dec -// ------------------------------------------------------------------------------------ -inline -unsigned int strtoul_cppstyle( const char* in, const char** out=0) { - if ('0' == in[0]) { - return 'x' == in[1] ? strtoul16(in+2,out) : strtoul8(in+1,out); - } - return strtoul10(in, out); -} - -// ------------------------------------------------------------------------------------ -// Special version of the function, providing higher accuracy and safety -// It is mainly used by fast_atof to prevent ugly and unwanted integer overflows. -// ------------------------------------------------------------------------------------ -inline -uint64_t strtoul10_64( const char* in, const char** out=0, unsigned int* max_inout=0) { - unsigned int cur = 0; - uint64_t value = 0; - - if ( *in < '0' || *in > '9' ) { - throw std::invalid_argument( std::string( "The string \"" ) + in + "\" cannot be converted into a value." ); - } - - for ( ;; ) { - if ( *in < '0' || *in > '9' ) { - break; - } - - const uint64_t new_value = ( value * (uint64_t) 10 ) + ( (uint64_t) ( *in - '0' ) ); - - // numeric overflow, we rely on you - if ( new_value < value ) { - ASSIMP_LOG_WARN_F( "Converting the string \"", in, "\" into a value resulted in overflow." ); - return 0; - } - - value = new_value; - - ++in; - ++cur; - - if (max_inout && *max_inout == cur) { - if (out) { /* skip to end */ - while ( *in >= '0' && *in <= '9' ) { - ++in; - } - *out = in; - } - - return value; - } - } - if ( out ) { - *out = in; - } - - if ( max_inout ) { - *max_inout = cur; - } - - return value; -} - -// ------------------------------------------------------------------------------------ -// signed variant of strtoul10_64 -// ------------------------------------------------------------------------------------ -inline -int64_t strtol10_64(const char* in, const char** out = 0, unsigned int* max_inout = 0) { - bool inv = (*in == '-'); - if ( inv || *in == '+' ) { - ++in; - } - - int64_t value = strtoul10_64(in, out, max_inout); - if (inv) { - value = -value; - } - return value; -} - -// Number of relevant decimals for floating-point parsing. -#define AI_FAST_ATOF_RELAVANT_DECIMALS 15 - -// ------------------------------------------------------------------------------------ -//! Provides a fast function for converting a string into a float, -//! about 6 times faster than atof in win32. -// If you find any bugs, please send them to me, niko (at) irrlicht3d.org. -// ------------------------------------------------------------------------------------ -template<typename Real> -inline -const char* fast_atoreal_move(const char* c, Real& out, bool check_comma = true) { - Real f = 0; - - bool inv = (*c == '-'); - if (inv || *c == '+') { - ++c; - } - - if ((c[0] == 'N' || c[0] == 'n') && ASSIMP_strincmp(c, "nan", 3) == 0) { - out = std::numeric_limits<Real>::quiet_NaN(); - c += 3; - return c; - } - - if ((c[0] == 'I' || c[0] == 'i') && ASSIMP_strincmp(c, "inf", 3) == 0) { - out = std::numeric_limits<Real>::infinity(); - if (inv) { - out = -out; - } - c += 3; - if ((c[0] == 'I' || c[0] == 'i') && ASSIMP_strincmp(c, "inity", 5) == 0) { - c += 5; - } - return c; - } - - if (!(c[0] >= '0' && c[0] <= '9') && - !((c[0] == '.' || (check_comma && c[0] == ',')) && c[1] >= '0' && c[1] <= '9')) { - throw std::invalid_argument("Cannot parse string " - "as real number: does not start with digit " - "or decimal point followed by digit."); - } - - if (*c != '.' && (! check_comma || c[0] != ',')) { - f = static_cast<Real>( strtoul10_64 ( c, &c) ); - } - - if ((*c == '.' || (check_comma && c[0] == ',')) && c[1] >= '0' && c[1] <= '9') { - ++c; - - // NOTE: The original implementation is highly inaccurate here. The precision of a single - // IEEE 754 float is not high enough, everything behind the 6th digit tends to be more - // inaccurate than it would need to be. Casting to double seems to solve the problem. - // strtol_64 is used to prevent integer overflow. - - // Another fix: this tends to become 0 for long numbers if we don't limit the maximum - // number of digits to be read. AI_FAST_ATOF_RELAVANT_DECIMALS can be a value between - // 1 and 15. - unsigned int diff = AI_FAST_ATOF_RELAVANT_DECIMALS; - double pl = static_cast<double>( strtoul10_64 ( c, &c, &diff )); - - pl *= fast_atof_table[diff]; - f += static_cast<Real>( pl ); - } - // For backwards compatibility: eat trailing dots, but not trailing commas. - else if (*c == '.') { - ++c; - } - - // A major 'E' must be allowed. Necessary for proper reading of some DXF files. - // Thanks to Zhao Lei to point out that this if() must be outside the if (*c == '.' ..) - if (*c == 'e' || *c == 'E') { - ++c; - const bool einv = (*c=='-'); - if (einv || *c=='+') { - ++c; - } - - // The reason float constants are used here is that we've seen cases where compilers - // would perform such casts on compile-time constants at runtime, which would be - // bad considering how frequently fast_atoreal_move<float> is called in Assimp. - Real exp = static_cast<Real>( strtoul10_64(c, &c) ); - if (einv) { - exp = -exp; - } - f *= std::pow(static_cast<Real>(10.0), exp); - } - - if (inv) { - f = -f; - } - out = f; - return c; -} - -// ------------------------------------------------------------------------------------ -// The same but more human. -inline -ai_real fast_atof(const char* c) { - ai_real ret(0.0); - fast_atoreal_move<ai_real>(c, ret); - - return ret; -} - -inline -ai_real fast_atof( const char* c, const char** cout) { - ai_real ret(0.0); - *cout = fast_atoreal_move<ai_real>(c, ret); - - return ret; -} - -inline -ai_real fast_atof( const char** inout) { - ai_real ret(0.0); - *inout = fast_atoreal_move<ai_real>(*inout, ret); - - return ret; -} - -} //! namespace Assimp - -#endif // FAST_A_TO_F_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/importerdesc.h b/thirdparty/assimp/include/assimp/importerdesc.h deleted file mode 100644 index 0a6919c1ae..0000000000 --- a/thirdparty/assimp/include/assimp/importerdesc.h +++ /dev/null @@ -1,148 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file importerdesc.h - * @brief #aiImporterFlags, aiImporterDesc implementation. - */ -#pragma once -#ifndef AI_IMPORTER_DESC_H_INC -#define AI_IMPORTER_DESC_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - - -/** Mixed set of flags for #aiImporterDesc, indicating some features - * common to many importers*/ -enum aiImporterFlags { - /** Indicates that there is a textual encoding of the - * file format; and that it is supported.*/ - aiImporterFlags_SupportTextFlavour = 0x1, - - /** Indicates that there is a binary encoding of the - * file format; and that it is supported.*/ - aiImporterFlags_SupportBinaryFlavour = 0x2, - - /** Indicates that there is a compressed encoding of the - * file format; and that it is supported.*/ - aiImporterFlags_SupportCompressedFlavour = 0x4, - - /** Indicates that the importer reads only a very particular - * subset of the file format. This happens commonly for - * declarative or procedural formats which cannot easily - * be mapped to #aiScene */ - aiImporterFlags_LimitedSupport = 0x8, - - /** Indicates that the importer is highly experimental and - * should be used with care. This only happens for trunk - * (i.e. SVN) versions, experimental code is not included - * in releases. */ - aiImporterFlags_Experimental = 0x10 -}; - - -/** Meta information about a particular importer. Importers need to fill - * this structure, but they can freely decide how talkative they are. - * A common use case for loader meta info is a user interface - * in which the user can choose between various import/export file - * formats. Building such an UI by hand means a lot of maintenance - * as importers/exporters are added to Assimp, so it might be useful - * to have a common mechanism to query some rough importer - * characteristics. */ -struct aiImporterDesc { - /** Full name of the importer (i.e. Blender3D importer)*/ - const char* mName; - - /** Original author (left blank if unknown or whole assimp team) */ - const char* mAuthor; - - /** Current maintainer, left blank if the author maintains */ - const char* mMaintainer; - - /** Implementation comments, i.e. unimplemented features*/ - const char* mComments; - - /** These flags indicate some characteristics common to many - importers. */ - unsigned int mFlags; - - /** Minimum format version that can be loaded im major.minor format, - both are set to 0 if there is either no version scheme - or if the loader doesn't care. */ - unsigned int mMinMajor; - unsigned int mMinMinor; - - /** Maximum format version that can be loaded im major.minor format, - both are set to 0 if there is either no version scheme - or if the loader doesn't care. Loaders that expect to be - forward-compatible to potential future format versions should - indicate zero, otherwise they should specify the current - maximum version.*/ - unsigned int mMaxMajor; - unsigned int mMaxMinor; - - /** List of file extensions this importer can handle. - List entries are separated by space characters. - All entries are lower case without a leading dot (i.e. - "xml dae" would be a valid value. Note that multiple - importers may respond to the same file extension - - assimp calls all importers in the order in which they - are registered and each importer gets the opportunity - to load the file until one importer "claims" the file. Apart - from file extension checks, importers typically use - other methods to quickly reject files (i.e. magic - words) so this does not mean that common or generic - file extensions such as XML would be tediously slow. */ - const char* mFileExtensions; -}; - -/** \brief Returns the Importer description for a given extension. - -Will return a NULL-pointer if no assigned importer desc. was found for the given extension - \param extension [in] The extension to look for - \return A pointer showing to the ImporterDesc, \see aiImporterDesc. -*/ -ASSIMP_API const C_STRUCT aiImporterDesc* aiGetImporterDesc( const char *extension ); - -#endif // AI_IMPORTER_DESC_H_INC diff --git a/thirdparty/assimp/include/assimp/light.h b/thirdparty/assimp/include/assimp/light.h deleted file mode 100644 index bdb2368c4f..0000000000 --- a/thirdparty/assimp/include/assimp/light.h +++ /dev/null @@ -1,263 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file light.h - * @brief Defines the aiLight data structure - */ - -#pragma once -#ifndef AI_LIGHT_H_INC -#define AI_LIGHT_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -/** Enumerates all supported types of light sources. - */ -enum aiLightSourceType -{ - aiLightSource_UNDEFINED = 0x0, - - //! A directional light source has a well-defined direction - //! but is infinitely far away. That's quite a good - //! approximation for sun light. - aiLightSource_DIRECTIONAL = 0x1, - - //! A point light source has a well-defined position - //! in space but no direction - it emits light in all - //! directions. A normal bulb is a point light. - aiLightSource_POINT = 0x2, - - //! A spot light source emits light in a specific - //! angle. It has a position and a direction it is pointing to. - //! A good example for a spot light is a light spot in - //! sport arenas. - aiLightSource_SPOT = 0x3, - - //! The generic light level of the world, including the bounces - //! of all other light sources. - //! Typically, there's at most one ambient light in a scene. - //! This light type doesn't have a valid position, direction, or - //! other properties, just a color. - aiLightSource_AMBIENT = 0x4, - - //! An area light is a rectangle with predefined size that uniformly - //! emits light from one of its sides. The position is center of the - //! rectangle and direction is its normal vector. - aiLightSource_AREA = 0x5, - - /** This value is not used. It is just there to force the - * compiler to map this enum to a 32 Bit integer. - */ -#ifndef SWIG - _aiLightSource_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** Helper structure to describe a light source. - * - * Assimp supports multiple sorts of light sources, including - * directional, point and spot lights. All of them are defined with just - * a single structure and distinguished by their parameters. - * Note - some file formats (such as 3DS, ASE) export a "target point" - - * the point a spot light is looking at (it can even be animated). Assimp - * writes the target point as a subnode of a spotlights's main node, - * called "<spotName>.Target". However, this is just additional information - * then, the transformation tracks of the main node make the - * spot light already point in the right direction. -*/ -struct aiLight -{ - /** The name of the light source. - * - * There must be a node in the scenegraph with the same name. - * This node specifies the position of the light in the scene - * hierarchy and can be animated. - */ - C_STRUCT aiString mName; - - /** The type of the light source. - * - * aiLightSource_UNDEFINED is not a valid value for this member. - */ - C_ENUM aiLightSourceType mType; - - /** Position of the light source in space. Relative to the - * transformation of the node corresponding to the light. - * - * The position is undefined for directional lights. - */ - C_STRUCT aiVector3D mPosition; - - /** Direction of the light source in space. Relative to the - * transformation of the node corresponding to the light. - * - * The direction is undefined for point lights. The vector - * may be normalized, but it needn't. - */ - C_STRUCT aiVector3D mDirection; - - /** Up direction of the light source in space. Relative to the - * transformation of the node corresponding to the light. - * - * The direction is undefined for point lights. The vector - * may be normalized, but it needn't. - */ - C_STRUCT aiVector3D mUp; - - /** Constant light attenuation factor. - * - * The intensity of the light source at a given distance 'd' from - * the light's position is - * @code - * Atten = 1/( att0 + att1 * d + att2 * d*d) - * @endcode - * This member corresponds to the att0 variable in the equation. - * Naturally undefined for directional lights. - */ - float mAttenuationConstant; - - /** Linear light attenuation factor. - * - * The intensity of the light source at a given distance 'd' from - * the light's position is - * @code - * Atten = 1/( att0 + att1 * d + att2 * d*d) - * @endcode - * This member corresponds to the att1 variable in the equation. - * Naturally undefined for directional lights. - */ - float mAttenuationLinear; - - /** Quadratic light attenuation factor. - * - * The intensity of the light source at a given distance 'd' from - * the light's position is - * @code - * Atten = 1/( att0 + att1 * d + att2 * d*d) - * @endcode - * This member corresponds to the att2 variable in the equation. - * Naturally undefined for directional lights. - */ - float mAttenuationQuadratic; - - /** Diffuse color of the light source - * - * The diffuse light color is multiplied with the diffuse - * material color to obtain the final color that contributes - * to the diffuse shading term. - */ - C_STRUCT aiColor3D mColorDiffuse; - - /** Specular color of the light source - * - * The specular light color is multiplied with the specular - * material color to obtain the final color that contributes - * to the specular shading term. - */ - C_STRUCT aiColor3D mColorSpecular; - - /** Ambient color of the light source - * - * The ambient light color is multiplied with the ambient - * material color to obtain the final color that contributes - * to the ambient shading term. Most renderers will ignore - * this value it, is just a remaining of the fixed-function pipeline - * that is still supported by quite many file formats. - */ - C_STRUCT aiColor3D mColorAmbient; - - /** Inner angle of a spot light's light cone. - * - * The spot light has maximum influence on objects inside this - * angle. The angle is given in radians. It is 2PI for point - * lights and undefined for directional lights. - */ - float mAngleInnerCone; - - /** Outer angle of a spot light's light cone. - * - * The spot light does not affect objects outside this angle. - * The angle is given in radians. It is 2PI for point lights and - * undefined for directional lights. The outer angle must be - * greater than or equal to the inner angle. - * It is assumed that the application uses a smooth - * interpolation between the inner and the outer cone of the - * spot light. - */ - float mAngleOuterCone; - - /** Size of area light source. */ - C_STRUCT aiVector2D mSize; - -#ifdef __cplusplus - - aiLight() AI_NO_EXCEPT - : mType (aiLightSource_UNDEFINED) - , mAttenuationConstant (0.f) - , mAttenuationLinear (1.f) - , mAttenuationQuadratic (0.f) - , mAngleInnerCone ((float)AI_MATH_TWO_PI) - , mAngleOuterCone ((float)AI_MATH_TWO_PI) - , mSize (0.f, 0.f) - { - } - -#endif -}; - -#ifdef __cplusplus -} -#endif - - -#endif // !! AI_LIGHT_H_INC diff --git a/thirdparty/assimp/include/assimp/material.h b/thirdparty/assimp/include/assimp/material.h deleted file mode 100644 index 19a7c69709..0000000000 --- a/thirdparty/assimp/include/assimp/material.h +++ /dev/null @@ -1,1604 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file material.h - * @brief Defines the material system of the library - */ -#pragma once -#ifndef AI_MATERIAL_H_INC -#define AI_MATERIAL_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// Name for default materials (2nd is used if meshes have UV coords) -#define AI_DEFAULT_MATERIAL_NAME "DefaultMaterial" - -// --------------------------------------------------------------------------- -/** @brief Defines how the Nth texture of a specific type is combined with - * the result of all previous layers. - * - * Example (left: key, right: value): <br> - * @code - * DiffColor0 - gray - * DiffTextureOp0 - aiTextureOpMultiply - * DiffTexture0 - tex1.png - * DiffTextureOp0 - aiTextureOpAdd - * DiffTexture1 - tex2.png - * @endcode - * Written as equation, the final diffuse term for a specific pixel would be: - * @code - * diffFinal = DiffColor0 * sampleTex(DiffTexture0,UV0) + - * sampleTex(DiffTexture1,UV0) * diffContrib; - * @endcode - * where 'diffContrib' is the intensity of the incoming light for that pixel. - */ -enum aiTextureOp -{ - /** T = T1 * T2 */ - aiTextureOp_Multiply = 0x0, - - /** T = T1 + T2 */ - aiTextureOp_Add = 0x1, - - /** T = T1 - T2 */ - aiTextureOp_Subtract = 0x2, - - /** T = T1 / T2 */ - aiTextureOp_Divide = 0x3, - - /** T = (T1 + T2) - (T1 * T2) */ - aiTextureOp_SmoothAdd = 0x4, - - /** T = T1 + (T2-0.5) */ - aiTextureOp_SignedAdd = 0x5, - - -#ifndef SWIG - _aiTextureOp_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** @brief Defines how UV coordinates outside the [0...1] range are handled. - * - * Commonly referred to as 'wrapping mode'. - */ -enum aiTextureMapMode -{ - /** A texture coordinate u|v is translated to u%1|v%1 - */ - aiTextureMapMode_Wrap = 0x0, - - /** Texture coordinates outside [0...1] - * are clamped to the nearest valid value. - */ - aiTextureMapMode_Clamp = 0x1, - - /** If the texture coordinates for a pixel are outside [0...1] - * the texture is not applied to that pixel - */ - aiTextureMapMode_Decal = 0x3, - - /** A texture coordinate u|v becomes u%1|v%1 if (u-(u%1))%2 is zero and - * 1-(u%1)|1-(v%1) otherwise - */ - aiTextureMapMode_Mirror = 0x2, - -#ifndef SWIG - _aiTextureMapMode_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** @brief Defines how the mapping coords for a texture are generated. - * - * Real-time applications typically require full UV coordinates, so the use of - * the aiProcess_GenUVCoords step is highly recommended. It generates proper - * UV channels for non-UV mapped objects, as long as an accurate description - * how the mapping should look like (e.g spherical) is given. - * See the #AI_MATKEY_MAPPING property for more details. - */ -enum aiTextureMapping -{ - /** The mapping coordinates are taken from an UV channel. - * - * The #AI_MATKEY_UVWSRC key specifies from which UV channel - * the texture coordinates are to be taken from (remember, - * meshes can have more than one UV channel). - */ - aiTextureMapping_UV = 0x0, - - /** Spherical mapping */ - aiTextureMapping_SPHERE = 0x1, - - /** Cylindrical mapping */ - aiTextureMapping_CYLINDER = 0x2, - - /** Cubic mapping */ - aiTextureMapping_BOX = 0x3, - - /** Planar mapping */ - aiTextureMapping_PLANE = 0x4, - - /** Undefined mapping. Have fun. */ - aiTextureMapping_OTHER = 0x5, - - -#ifndef SWIG - _aiTextureMapping_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** @brief Defines the purpose of a texture - * - * This is a very difficult topic. Different 3D packages support different - * kinds of textures. For very common texture types, such as bumpmaps, the - * rendering results depend on implementation details in the rendering - * pipelines of these applications. Assimp loads all texture references from - * the model file and tries to determine which of the predefined texture - * types below is the best choice to match the original use of the texture - * as closely as possible.<br> - * - * In content pipelines you'll usually define how textures have to be handled, - * and the artists working on models have to conform to this specification, - * regardless which 3D tool they're using. - */ -enum aiTextureType -{ - /** Dummy value. - * - * No texture, but the value to be used as 'texture semantic' - * (#aiMaterialProperty::mSemantic) for all material properties - * *not* related to textures. - */ - aiTextureType_NONE = 0, - - /** LEGACY API MATERIALS - * Legacy refers to materials which - * Were originally implemented in the specifications around 2000. - * These must never be removed, as most engines support them. - */ - - /** The texture is combined with the result of the diffuse - * lighting equation. - */ - aiTextureType_DIFFUSE = 1, - - /** The texture is combined with the result of the specular - * lighting equation. - */ - aiTextureType_SPECULAR = 2, - - /** The texture is combined with the result of the ambient - * lighting equation. - */ - aiTextureType_AMBIENT = 3, - - /** The texture is added to the result of the lighting - * calculation. It isn't influenced by incoming light. - */ - aiTextureType_EMISSIVE = 4, - - /** The texture is a height map. - * - * By convention, higher gray-scale values stand for - * higher elevations from the base height. - */ - aiTextureType_HEIGHT = 5, - - /** The texture is a (tangent space) normal-map. - * - * Again, there are several conventions for tangent-space - * normal maps. Assimp does (intentionally) not - * distinguish here. - */ - aiTextureType_NORMALS = 6, - - /** The texture defines the glossiness of the material. - * - * The glossiness is in fact the exponent of the specular - * (phong) lighting equation. Usually there is a conversion - * function defined to map the linear color values in the - * texture to a suitable exponent. Have fun. - */ - aiTextureType_SHININESS = 7, - - /** The texture defines per-pixel opacity. - * - * Usually 'white' means opaque and 'black' means - * 'transparency'. Or quite the opposite. Have fun. - */ - aiTextureType_OPACITY = 8, - - /** Displacement texture - * - * The exact purpose and format is application-dependent. - * Higher color values stand for higher vertex displacements. - */ - aiTextureType_DISPLACEMENT = 9, - - /** Lightmap texture (aka Ambient Occlusion) - * - * Both 'Lightmaps' and dedicated 'ambient occlusion maps' are - * covered by this material property. The texture contains a - * scaling value for the final color value of a pixel. Its - * intensity is not affected by incoming light. - */ - aiTextureType_LIGHTMAP = 10, - - /** Reflection texture - * - * Contains the color of a perfect mirror reflection. - * Rarely used, almost never for real-time applications. - */ - aiTextureType_REFLECTION = 11, - - /** PBR Materials - * PBR definitions from maya and other modelling packages now use this standard. - * This was originally introduced around 2012. - * Support for this is in game engines like Godot, Unreal or Unity3D. - * Modelling packages which use this are very common now. - */ - - aiTextureType_BASE_COLOR = 12, - aiTextureType_NORMAL_CAMERA = 13, - aiTextureType_EMISSION_COLOR = 14, - aiTextureType_METALNESS = 15, - aiTextureType_DIFFUSE_ROUGHNESS = 16, - aiTextureType_AMBIENT_OCCLUSION = 17, - - /** Unknown texture - * - * A texture reference that does not match any of the definitions - * above is considered to be 'unknown'. It is still imported, - * but is excluded from any further post-processing. - */ - aiTextureType_UNKNOWN = 18, - - -#ifndef SWIG - _aiTextureType_Force32Bit = INT_MAX -#endif -}; - -#define AI_TEXTURE_TYPE_MAX aiTextureType_UNKNOWN - -// --------------------------------------------------------------------------- -/** @brief Defines all shading models supported by the library - * - * The list of shading modes has been taken from Blender. - * See Blender documentation for more information. The API does - * not distinguish between "specular" and "diffuse" shaders (thus the - * specular term for diffuse shading models like Oren-Nayar remains - * undefined). <br> - * Again, this value is just a hint. Assimp tries to select the shader whose - * most common implementation matches the original rendering results of the - * 3D modeller which wrote a particular model as closely as possible. - */ -enum aiShadingMode -{ - /** Flat shading. Shading is done on per-face base, - * diffuse only. Also known as 'faceted shading'. - */ - aiShadingMode_Flat = 0x1, - - /** Simple Gouraud shading. - */ - aiShadingMode_Gouraud = 0x2, - - /** Phong-Shading - - */ - aiShadingMode_Phong = 0x3, - - /** Phong-Blinn-Shading - */ - aiShadingMode_Blinn = 0x4, - - /** Toon-Shading per pixel - * - * Also known as 'comic' shader. - */ - aiShadingMode_Toon = 0x5, - - /** OrenNayar-Shading per pixel - * - * Extension to standard Lambertian shading, taking the - * roughness of the material into account - */ - aiShadingMode_OrenNayar = 0x6, - - /** Minnaert-Shading per pixel - * - * Extension to standard Lambertian shading, taking the - * "darkness" of the material into account - */ - aiShadingMode_Minnaert = 0x7, - - /** CookTorrance-Shading per pixel - * - * Special shader for metallic surfaces. - */ - aiShadingMode_CookTorrance = 0x8, - - /** No shading at all. Constant light influence of 1.0. - */ - aiShadingMode_NoShading = 0x9, - - /** Fresnel shading - */ - aiShadingMode_Fresnel = 0xa, - - -#ifndef SWIG - _aiShadingMode_Force32Bit = INT_MAX -#endif -}; - - -// --------------------------------------------------------------------------- -/** @brief Defines some mixed flags for a particular texture. - * - * Usually you'll instruct your cg artists how textures have to look like ... - * and how they will be processed in your application. However, if you use - * Assimp for completely generic loading purposes you might also need to - * process these flags in order to display as many 'unknown' 3D models as - * possible correctly. - * - * This corresponds to the #AI_MATKEY_TEXFLAGS property. -*/ -enum aiTextureFlags -{ - /** The texture's color values have to be inverted (component-wise 1-n) - */ - aiTextureFlags_Invert = 0x1, - - /** Explicit request to the application to process the alpha channel - * of the texture. - * - * Mutually exclusive with #aiTextureFlags_IgnoreAlpha. These - * flags are set if the library can say for sure that the alpha - * channel is used/is not used. If the model format does not - * define this, it is left to the application to decide whether - * the texture alpha channel - if any - is evaluated or not. - */ - aiTextureFlags_UseAlpha = 0x2, - - /** Explicit request to the application to ignore the alpha channel - * of the texture. - * - * Mutually exclusive with #aiTextureFlags_UseAlpha. - */ - aiTextureFlags_IgnoreAlpha = 0x4, - -#ifndef SWIG - _aiTextureFlags_Force32Bit = INT_MAX -#endif -}; - - -// --------------------------------------------------------------------------- -/** @brief Defines alpha-blend flags. - * - * If you're familiar with OpenGL or D3D, these flags aren't new to you. - * They define *how* the final color value of a pixel is computed, basing - * on the previous color at that pixel and the new color value from the - * material. - * The blend formula is: - * @code - * SourceColor * SourceBlend + DestColor * DestBlend - * @endcode - * where DestColor is the previous color in the framebuffer at this - * position and SourceColor is the material color before the transparency - * calculation.<br> - * This corresponds to the #AI_MATKEY_BLEND_FUNC property. -*/ -enum aiBlendMode -{ - /** - * Formula: - * @code - * SourceColor*SourceAlpha + DestColor*(1-SourceAlpha) - * @endcode - */ - aiBlendMode_Default = 0x0, - - /** Additive blending - * - * Formula: - * @code - * SourceColor*1 + DestColor*1 - * @endcode - */ - aiBlendMode_Additive = 0x1, - - // we don't need more for the moment, but we might need them - // in future versions ... - -#ifndef SWIG - _aiBlendMode_Force32Bit = INT_MAX -#endif -}; - - -#include "./Compiler/pushpack1.h" - -// --------------------------------------------------------------------------- -/** @brief Defines how an UV channel is transformed. - * - * This is just a helper structure for the #AI_MATKEY_UVTRANSFORM key. - * See its documentation for more details. - * - * Typically you'll want to build a matrix of this information. However, - * we keep separate scaling/translation/rotation values to make it - * easier to process and optimize UV transformations internally. - */ -struct aiUVTransform -{ - /** Translation on the u and v axes. - * - * The default value is (0|0). - */ - C_STRUCT aiVector2D mTranslation; - - /** Scaling on the u and v axes. - * - * The default value is (1|1). - */ - C_STRUCT aiVector2D mScaling; - - /** Rotation - in counter-clockwise direction. - * - * The rotation angle is specified in radians. The - * rotation center is 0.5f|0.5f. The default value - * 0.f. - */ - ai_real mRotation; - - -#ifdef __cplusplus - aiUVTransform() AI_NO_EXCEPT - : mTranslation (0.0,0.0) - , mScaling (1.0,1.0) - , mRotation (0.0) - { - // nothing to be done here ... - } -#endif - -}; - -#include "./Compiler/poppack1.h" - -//! @cond AI_DOX_INCLUDE_INTERNAL -// --------------------------------------------------------------------------- -/** @brief A very primitive RTTI system for the contents of material - * properties. - */ -enum aiPropertyTypeInfo -{ - /** Array of single-precision (32 Bit) floats - * - * It is possible to use aiGetMaterialInteger[Array]() (or the C++-API - * aiMaterial::Get()) to query properties stored in floating-point format. - * The material system performs the type conversion automatically. - */ - aiPTI_Float = 0x1, - - /** Array of double-precision (64 Bit) floats - * - * It is possible to use aiGetMaterialInteger[Array]() (or the C++-API - * aiMaterial::Get()) to query properties stored in floating-point format. - * The material system performs the type conversion automatically. - */ - aiPTI_Double = 0x2, - - /** The material property is an aiString. - * - * Arrays of strings aren't possible, aiGetMaterialString() (or the - * C++-API aiMaterial::Get()) *must* be used to query a string property. - */ - aiPTI_String = 0x3, - - /** Array of (32 Bit) integers - * - * It is possible to use aiGetMaterialFloat[Array]() (or the C++-API - * aiMaterial::Get()) to query properties stored in integer format. - * The material system performs the type conversion automatically. - */ - aiPTI_Integer = 0x4, - - - /** Simple binary buffer, content undefined. Not convertible to anything. - */ - aiPTI_Buffer = 0x5, - - - /** This value is not used. It is just there to force the - * compiler to map this enum to a 32 Bit integer. - */ -#ifndef SWIG - _aiPTI_Force32Bit = INT_MAX -#endif -}; - -// --------------------------------------------------------------------------- -/** @brief Data structure for a single material property - * - * As an user, you'll probably never need to deal with this data structure. - * Just use the provided aiGetMaterialXXX() or aiMaterial::Get() family - * of functions to query material properties easily. Processing them - * manually is faster, but it is not the recommended way. It isn't worth - * the effort. <br> - * Material property names follow a simple scheme: - * @code - * $<name> - * ?<name> - * A public property, there must be corresponding AI_MATKEY_XXX define - * 2nd: Public, but ignored by the #aiProcess_RemoveRedundantMaterials - * post-processing step. - * ~<name> - * A temporary property for internal use. - * @endcode - * @see aiMaterial - */ -struct aiMaterialProperty -{ - /** Specifies the name of the property (key) - * Keys are generally case insensitive. - */ - C_STRUCT aiString mKey; - - /** Textures: Specifies their exact usage semantic. - * For non-texture properties, this member is always 0 - * (or, better-said, #aiTextureType_NONE). - */ - unsigned int mSemantic; - - /** Textures: Specifies the index of the texture. - * For non-texture properties, this member is always 0. - */ - unsigned int mIndex; - - /** Size of the buffer mData is pointing to, in bytes. - * This value may not be 0. - */ - unsigned int mDataLength; - - /** Type information for the property. - * - * Defines the data layout inside the data buffer. This is used - * by the library internally to perform debug checks and to - * utilize proper type conversions. - * (It's probably a hacky solution, but it works.) - */ - C_ENUM aiPropertyTypeInfo mType; - - /** Binary buffer to hold the property's value. - * The size of the buffer is always mDataLength. - */ - char* mData; - -#ifdef __cplusplus - - aiMaterialProperty() AI_NO_EXCEPT - : mSemantic( 0 ) - , mIndex( 0 ) - , mDataLength( 0 ) - , mType( aiPTI_Float ) - , mData(nullptr) { - // empty - } - - ~aiMaterialProperty() { - delete[] mData; - mData = nullptr; - } - -#endif -}; -//! @endcond - -#ifdef __cplusplus -} // We need to leave the "C" block here to allow template member functions -#endif - -// --------------------------------------------------------------------------- -/** @brief Data structure for a material -* -* Material data is stored using a key-value structure. A single key-value -* pair is called a 'material property'. C++ users should use the provided -* member functions of aiMaterial to process material properties, C users -* have to stick with the aiMaterialGetXXX family of unbound functions. -* The library defines a set of standard keys (AI_MATKEY_XXX). -*/ -#ifdef __cplusplus -struct ASSIMP_API aiMaterial -#else -struct aiMaterial -#endif -{ - -#ifdef __cplusplus - -public: - - aiMaterial(); - ~aiMaterial(); - - // ------------------------------------------------------------------- - /** - * @brief Returns the name of the material. - * @return The name of the material. - */ - // ------------------------------------------------------------------- - aiString GetName(); - - // ------------------------------------------------------------------- - /** @brief Retrieve an array of Type values with a specific key - * from the material - * - * @param pKey Key to search for. One of the AI_MATKEY_XXX constants. - * @param type .. set by AI_MATKEY_XXX - * @param idx .. set by AI_MATKEY_XXX - * @param pOut Pointer to a buffer to receive the result. - * @param pMax Specifies the size of the given buffer, in Type's. - * Receives the number of values (not bytes!) read. - * NULL is a valid value for this parameter. - */ - template <typename Type> - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, Type* pOut, unsigned int* pMax) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, int* pOut, unsigned int* pMax) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, ai_real* pOut, unsigned int* pMax) const; - - // ------------------------------------------------------------------- - /** @brief Retrieve a Type value with a specific key - * from the material - * - * @param pKey Key to search for. One of the AI_MATKEY_XXX constants. - * @param type Specifies the type of the texture to be retrieved ( - * e.g. diffuse, specular, height map ...) - * @param idx Index of the texture to be retrieved. - * @param pOut Reference to receive the output value - */ - template <typename Type> - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx,Type& pOut) const; - - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, int& pOut) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, ai_real& pOut) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, aiString& pOut) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, aiColor3D& pOut) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, aiColor4D& pOut) const; - - aiReturn Get(const char* pKey,unsigned int type, - unsigned int idx, aiUVTransform& pOut) const; - - // ------------------------------------------------------------------- - /** Get the number of textures for a particular texture type. - * @param type Texture type to check for - * @return Number of textures for this type. - * @note A texture can be easily queried using #GetTexture() */ - unsigned int GetTextureCount(aiTextureType type) const; - - // ------------------------------------------------------------------- - /** Helper function to get all parameters pertaining to a - * particular texture slot from a material. - * - * This function is provided just for convenience, you could also - * read the single material properties manually. - * @param type Specifies the type of the texture to be retrieved ( - * e.g. diffuse, specular, height map ...) - * @param index Index of the texture to be retrieved. The function fails - * if there is no texture of that type with this index. - * #GetTextureCount() can be used to determine the number of textures - * per texture type. - * @param path Receives the path to the texture. - * If the texture is embedded, receives a '*' followed by the id of - * the texture (for the textures stored in the corresponding scene) which - * can be converted to an int using a function like atoi. - * NULL is a valid value. - * @param mapping The texture mapping. - * NULL is allowed as value. - * @param uvindex Receives the UV index of the texture. - * NULL is a valid value. - * @param blend Receives the blend factor for the texture - * NULL is a valid value. - * @param op Receives the texture operation to be performed between - * this texture and the previous texture. NULL is allowed as value. - * @param mapmode Receives the mapping modes to be used for the texture. - * The parameter may be NULL but if it is a valid pointer it MUST - * point to an array of 3 aiTextureMapMode's (one for each - * axis: UVW order (=XYZ)). - */ - // ------------------------------------------------------------------- - aiReturn GetTexture(aiTextureType type, - unsigned int index, - C_STRUCT aiString* path, - aiTextureMapping* mapping = NULL, - unsigned int* uvindex = NULL, - ai_real* blend = NULL, - aiTextureOp* op = NULL, - aiTextureMapMode* mapmode = NULL) const; - - - // Setters - - - // ------------------------------------------------------------------------------ - /** @brief Add a property with a given key and type info to the material - * structure - * - * @param pInput Pointer to input data - * @param pSizeInBytes Size of input data - * @param pKey Key/Usage of the property (AI_MATKEY_XXX) - * @param type Set by the AI_MATKEY_XXX macro - * @param index Set by the AI_MATKEY_XXX macro - * @param pType Type information hint */ - aiReturn AddBinaryProperty (const void* pInput, - unsigned int pSizeInBytes, - const char* pKey, - unsigned int type , - unsigned int index , - aiPropertyTypeInfo pType); - - // ------------------------------------------------------------------------------ - /** @brief Add a string property with a given key and type info to the - * material structure - * - * @param pInput Input string - * @param pKey Key/Usage of the property (AI_MATKEY_XXX) - * @param type Set by the AI_MATKEY_XXX macro - * @param index Set by the AI_MATKEY_XXX macro */ - aiReturn AddProperty (const aiString* pInput, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - // ------------------------------------------------------------------------------ - /** @brief Add a property with a given key to the material structure - * @param pInput Pointer to the input data - * @param pNumValues Number of values in the array - * @param pKey Key/Usage of the property (AI_MATKEY_XXX) - * @param type Set by the AI_MATKEY_XXX macro - * @param index Set by the AI_MATKEY_XXX macro */ - template<class TYPE> - aiReturn AddProperty (const TYPE* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const aiVector3D* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const aiColor3D* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const aiColor4D* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const int* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const float* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const double* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - aiReturn AddProperty (const aiUVTransform* pInput, - unsigned int pNumValues, - const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - // ------------------------------------------------------------------------------ - /** @brief Remove a given key from the list. - * - * The function fails if the key isn't found - * @param pKey Key to be deleted - * @param type Set by the AI_MATKEY_XXX macro - * @param index Set by the AI_MATKEY_XXX macro */ - aiReturn RemoveProperty (const char* pKey, - unsigned int type = 0, - unsigned int index = 0); - - // ------------------------------------------------------------------------------ - /** @brief Removes all properties from the material. - * - * The data array remains allocated so adding new properties is quite fast. */ - void Clear(); - - // ------------------------------------------------------------------------------ - /** Copy the property list of a material - * @param pcDest Destination material - * @param pcSrc Source material - */ - static void CopyPropertyList(aiMaterial* pcDest, - const aiMaterial* pcSrc); - - -#endif - - /** List of all material properties loaded. */ - C_STRUCT aiMaterialProperty** mProperties; - - /** Number of properties in the data base */ - unsigned int mNumProperties; - - /** Storage allocated */ - unsigned int mNumAllocated; -}; - -// Go back to extern "C" again -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -#define AI_MATKEY_NAME "?mat.name",0,0 -#define AI_MATKEY_TWOSIDED "$mat.twosided",0,0 -#define AI_MATKEY_SHADING_MODEL "$mat.shadingm",0,0 -#define AI_MATKEY_ENABLE_WIREFRAME "$mat.wireframe",0,0 -#define AI_MATKEY_BLEND_FUNC "$mat.blend",0,0 -#define AI_MATKEY_OPACITY "$mat.opacity",0,0 -#define AI_MATKEY_TRANSPARENCYFACTOR "$mat.transparencyfactor",0,0 -#define AI_MATKEY_BUMPSCALING "$mat.bumpscaling",0,0 -#define AI_MATKEY_SHININESS "$mat.shininess",0,0 -#define AI_MATKEY_REFLECTIVITY "$mat.reflectivity",0,0 -#define AI_MATKEY_SHININESS_STRENGTH "$mat.shinpercent",0,0 -#define AI_MATKEY_REFRACTI "$mat.refracti",0,0 -#define AI_MATKEY_COLOR_DIFFUSE "$clr.diffuse",0,0 -#define AI_MATKEY_COLOR_AMBIENT "$clr.ambient",0,0 -#define AI_MATKEY_COLOR_SPECULAR "$clr.specular",0,0 -#define AI_MATKEY_COLOR_EMISSIVE "$clr.emissive",0,0 -#define AI_MATKEY_COLOR_TRANSPARENT "$clr.transparent",0,0 -#define AI_MATKEY_COLOR_REFLECTIVE "$clr.reflective",0,0 -#define AI_MATKEY_GLOBAL_BACKGROUND_IMAGE "?bg.global",0,0 -#define AI_MATKEY_GLOBAL_SHADERLANG "?sh.lang",0,0 -#define AI_MATKEY_SHADER_VERTEX "?sh.vs",0,0 -#define AI_MATKEY_SHADER_FRAGMENT "?sh.fs",0,0 -#define AI_MATKEY_SHADER_GEO "?sh.gs",0,0 -#define AI_MATKEY_SHADER_TESSELATION "?sh.ts",0,0 -#define AI_MATKEY_SHADER_PRIMITIVE "?sh.ps",0,0 -#define AI_MATKEY_SHADER_COMPUTE "?sh.cs",0,0 - -// --------------------------------------------------------------------------- -// Pure key names for all texture-related properties -//! @cond MATS_DOC_FULL -#define _AI_MATKEY_TEXTURE_BASE "$tex.file" -#define _AI_MATKEY_UVWSRC_BASE "$tex.uvwsrc" -#define _AI_MATKEY_TEXOP_BASE "$tex.op" -#define _AI_MATKEY_MAPPING_BASE "$tex.mapping" -#define _AI_MATKEY_TEXBLEND_BASE "$tex.blend" -#define _AI_MATKEY_MAPPINGMODE_U_BASE "$tex.mapmodeu" -#define _AI_MATKEY_MAPPINGMODE_V_BASE "$tex.mapmodev" -#define _AI_MATKEY_TEXMAP_AXIS_BASE "$tex.mapaxis" -#define _AI_MATKEY_UVTRANSFORM_BASE "$tex.uvtrafo" -#define _AI_MATKEY_TEXFLAGS_BASE "$tex.flags" -//! @endcond - -// --------------------------------------------------------------------------- -#define AI_MATKEY_TEXTURE(type, N) _AI_MATKEY_TEXTURE_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_TEXTURE_DIFFUSE(N) \ - AI_MATKEY_TEXTURE(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_TEXTURE_SPECULAR(N) \ - AI_MATKEY_TEXTURE(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_TEXTURE_AMBIENT(N) \ - AI_MATKEY_TEXTURE(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_TEXTURE_EMISSIVE(N) \ - AI_MATKEY_TEXTURE(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_TEXTURE_NORMALS(N) \ - AI_MATKEY_TEXTURE(aiTextureType_NORMALS,N) - -#define AI_MATKEY_TEXTURE_HEIGHT(N) \ - AI_MATKEY_TEXTURE(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_TEXTURE_SHININESS(N) \ - AI_MATKEY_TEXTURE(aiTextureType_SHININESS,N) - -#define AI_MATKEY_TEXTURE_OPACITY(N) \ - AI_MATKEY_TEXTURE(aiTextureType_OPACITY,N) - -#define AI_MATKEY_TEXTURE_DISPLACEMENT(N) \ - AI_MATKEY_TEXTURE(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_TEXTURE_LIGHTMAP(N) \ - AI_MATKEY_TEXTURE(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_TEXTURE_REFLECTION(N) \ - AI_MATKEY_TEXTURE(aiTextureType_REFLECTION,N) - -//! @endcond - -// --------------------------------------------------------------------------- -#define AI_MATKEY_UVWSRC(type, N) _AI_MATKEY_UVWSRC_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_UVWSRC_DIFFUSE(N) \ - AI_MATKEY_UVWSRC(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_UVWSRC_SPECULAR(N) \ - AI_MATKEY_UVWSRC(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_UVWSRC_AMBIENT(N) \ - AI_MATKEY_UVWSRC(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_UVWSRC_EMISSIVE(N) \ - AI_MATKEY_UVWSRC(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_UVWSRC_NORMALS(N) \ - AI_MATKEY_UVWSRC(aiTextureType_NORMALS,N) - -#define AI_MATKEY_UVWSRC_HEIGHT(N) \ - AI_MATKEY_UVWSRC(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_UVWSRC_SHININESS(N) \ - AI_MATKEY_UVWSRC(aiTextureType_SHININESS,N) - -#define AI_MATKEY_UVWSRC_OPACITY(N) \ - AI_MATKEY_UVWSRC(aiTextureType_OPACITY,N) - -#define AI_MATKEY_UVWSRC_DISPLACEMENT(N) \ - AI_MATKEY_UVWSRC(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_UVWSRC_LIGHTMAP(N) \ - AI_MATKEY_UVWSRC(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_UVWSRC_REFLECTION(N) \ - AI_MATKEY_UVWSRC(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_TEXOP(type, N) _AI_MATKEY_TEXOP_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_TEXOP_DIFFUSE(N) \ - AI_MATKEY_TEXOP(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_TEXOP_SPECULAR(N) \ - AI_MATKEY_TEXOP(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_TEXOP_AMBIENT(N) \ - AI_MATKEY_TEXOP(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_TEXOP_EMISSIVE(N) \ - AI_MATKEY_TEXOP(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_TEXOP_NORMALS(N) \ - AI_MATKEY_TEXOP(aiTextureType_NORMALS,N) - -#define AI_MATKEY_TEXOP_HEIGHT(N) \ - AI_MATKEY_TEXOP(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_TEXOP_SHININESS(N) \ - AI_MATKEY_TEXOP(aiTextureType_SHININESS,N) - -#define AI_MATKEY_TEXOP_OPACITY(N) \ - AI_MATKEY_TEXOP(aiTextureType_OPACITY,N) - -#define AI_MATKEY_TEXOP_DISPLACEMENT(N) \ - AI_MATKEY_TEXOP(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_TEXOP_LIGHTMAP(N) \ - AI_MATKEY_TEXOP(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_TEXOP_REFLECTION(N) \ - AI_MATKEY_TEXOP(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_MAPPING(type, N) _AI_MATKEY_MAPPING_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_MAPPING_DIFFUSE(N) \ - AI_MATKEY_MAPPING(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_MAPPING_SPECULAR(N) \ - AI_MATKEY_MAPPING(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_MAPPING_AMBIENT(N) \ - AI_MATKEY_MAPPING(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_MAPPING_EMISSIVE(N) \ - AI_MATKEY_MAPPING(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_MAPPING_NORMALS(N) \ - AI_MATKEY_MAPPING(aiTextureType_NORMALS,N) - -#define AI_MATKEY_MAPPING_HEIGHT(N) \ - AI_MATKEY_MAPPING(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_MAPPING_SHININESS(N) \ - AI_MATKEY_MAPPING(aiTextureType_SHININESS,N) - -#define AI_MATKEY_MAPPING_OPACITY(N) \ - AI_MATKEY_MAPPING(aiTextureType_OPACITY,N) - -#define AI_MATKEY_MAPPING_DISPLACEMENT(N) \ - AI_MATKEY_MAPPING(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_MAPPING_LIGHTMAP(N) \ - AI_MATKEY_MAPPING(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_MAPPING_REFLECTION(N) \ - AI_MATKEY_MAPPING(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_TEXBLEND(type, N) _AI_MATKEY_TEXBLEND_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_TEXBLEND_DIFFUSE(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_TEXBLEND_SPECULAR(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_TEXBLEND_AMBIENT(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_TEXBLEND_EMISSIVE(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_TEXBLEND_NORMALS(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_NORMALS,N) - -#define AI_MATKEY_TEXBLEND_HEIGHT(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_TEXBLEND_SHININESS(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_SHININESS,N) - -#define AI_MATKEY_TEXBLEND_OPACITY(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_OPACITY,N) - -#define AI_MATKEY_TEXBLEND_DISPLACEMENT(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_TEXBLEND_LIGHTMAP(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_TEXBLEND_REFLECTION(N) \ - AI_MATKEY_TEXBLEND(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_MAPPINGMODE_U(type, N) _AI_MATKEY_MAPPINGMODE_U_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_MAPPINGMODE_U_DIFFUSE(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_MAPPINGMODE_U_SPECULAR(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_MAPPINGMODE_U_AMBIENT(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_MAPPINGMODE_U_EMISSIVE(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_MAPPINGMODE_U_NORMALS(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_NORMALS,N) - -#define AI_MATKEY_MAPPINGMODE_U_HEIGHT(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_MAPPINGMODE_U_SHININESS(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_SHININESS,N) - -#define AI_MATKEY_MAPPINGMODE_U_OPACITY(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_OPACITY,N) - -#define AI_MATKEY_MAPPINGMODE_U_DISPLACEMENT(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_MAPPINGMODE_U_LIGHTMAP(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_MAPPINGMODE_U_REFLECTION(N) \ - AI_MATKEY_MAPPINGMODE_U(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_MAPPINGMODE_V(type, N) _AI_MATKEY_MAPPINGMODE_V_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_MAPPINGMODE_V_DIFFUSE(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_MAPPINGMODE_V_SPECULAR(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_MAPPINGMODE_V_AMBIENT(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_MAPPINGMODE_V_EMISSIVE(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_MAPPINGMODE_V_NORMALS(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_NORMALS,N) - -#define AI_MATKEY_MAPPINGMODE_V_HEIGHT(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_MAPPINGMODE_V_SHININESS(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_SHININESS,N) - -#define AI_MATKEY_MAPPINGMODE_V_OPACITY(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_OPACITY,N) - -#define AI_MATKEY_MAPPINGMODE_V_DISPLACEMENT(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_MAPPINGMODE_V_LIGHTMAP(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_MAPPINGMODE_V_REFLECTION(N) \ - AI_MATKEY_MAPPINGMODE_V(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_TEXMAP_AXIS(type, N) _AI_MATKEY_TEXMAP_AXIS_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_TEXMAP_AXIS_DIFFUSE(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_TEXMAP_AXIS_SPECULAR(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_TEXMAP_AXIS_AMBIENT(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_TEXMAP_AXIS_EMISSIVE(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_TEXMAP_AXIS_NORMALS(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_NORMALS,N) - -#define AI_MATKEY_TEXMAP_AXIS_HEIGHT(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_TEXMAP_AXIS_SHININESS(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_SHININESS,N) - -#define AI_MATKEY_TEXMAP_AXIS_OPACITY(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_OPACITY,N) - -#define AI_MATKEY_TEXMAP_AXIS_DISPLACEMENT(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_TEXMAP_AXIS_LIGHTMAP(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_TEXMAP_AXIS_REFLECTION(N) \ - AI_MATKEY_TEXMAP_AXIS(aiTextureType_REFLECTION,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_UVTRANSFORM(type, N) _AI_MATKEY_UVTRANSFORM_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_UVTRANSFORM_DIFFUSE(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_UVTRANSFORM_SPECULAR(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_UVTRANSFORM_AMBIENT(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_UVTRANSFORM_EMISSIVE(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_UVTRANSFORM_NORMALS(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_NORMALS,N) - -#define AI_MATKEY_UVTRANSFORM_HEIGHT(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_UVTRANSFORM_SHININESS(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_SHININESS,N) - -#define AI_MATKEY_UVTRANSFORM_OPACITY(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_OPACITY,N) - -#define AI_MATKEY_UVTRANSFORM_DISPLACEMENT(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_UVTRANSFORM_LIGHTMAP(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_UVTRANSFORM_REFLECTION(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_REFLECTION,N) - -#define AI_MATKEY_UVTRANSFORM_UNKNOWN(N) \ - AI_MATKEY_UVTRANSFORM(aiTextureType_UNKNOWN,N) - -//! @endcond -// --------------------------------------------------------------------------- -#define AI_MATKEY_TEXFLAGS(type, N) _AI_MATKEY_TEXFLAGS_BASE,type,N - -// For backward compatibility and simplicity -//! @cond MATS_DOC_FULL -#define AI_MATKEY_TEXFLAGS_DIFFUSE(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_DIFFUSE,N) - -#define AI_MATKEY_TEXFLAGS_SPECULAR(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_SPECULAR,N) - -#define AI_MATKEY_TEXFLAGS_AMBIENT(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_AMBIENT,N) - -#define AI_MATKEY_TEXFLAGS_EMISSIVE(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_EMISSIVE,N) - -#define AI_MATKEY_TEXFLAGS_NORMALS(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_NORMALS,N) - -#define AI_MATKEY_TEXFLAGS_HEIGHT(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_HEIGHT,N) - -#define AI_MATKEY_TEXFLAGS_SHININESS(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_SHININESS,N) - -#define AI_MATKEY_TEXFLAGS_OPACITY(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_OPACITY,N) - -#define AI_MATKEY_TEXFLAGS_DISPLACEMENT(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_DISPLACEMENT,N) - -#define AI_MATKEY_TEXFLAGS_LIGHTMAP(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_LIGHTMAP,N) - -#define AI_MATKEY_TEXFLAGS_REFLECTION(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_REFLECTION,N) - -#define AI_MATKEY_TEXFLAGS_UNKNOWN(N) \ - AI_MATKEY_TEXFLAGS(aiTextureType_UNKNOWN,N) - -//! @endcond -//! -// --------------------------------------------------------------------------- -/** @brief Retrieve a material property with a specific key from the material - * - * @param pMat Pointer to the input material. May not be NULL - * @param pKey Key to search for. One of the AI_MATKEY_XXX constants. - * @param type Specifies the type of the texture to be retrieved ( - * e.g. diffuse, specular, height map ...) - * @param index Index of the texture to be retrieved. - * @param pPropOut Pointer to receive a pointer to a valid aiMaterialProperty - * structure or NULL if the key has not been found. */ -// --------------------------------------------------------------------------- -ASSIMP_API C_ENUM aiReturn aiGetMaterialProperty( - const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - const C_STRUCT aiMaterialProperty** pPropOut); - -// --------------------------------------------------------------------------- -/** @brief Retrieve an array of float values with a specific key - * from the material - * - * Pass one of the AI_MATKEY_XXX constants for the last three parameters (the - * example reads the #AI_MATKEY_UVTRANSFORM property of the first diffuse texture) - * @code - * aiUVTransform trafo; - * unsigned int max = sizeof(aiUVTransform); - * if (AI_SUCCESS != aiGetMaterialFloatArray(mat, AI_MATKEY_UVTRANSFORM(aiTextureType_DIFFUSE,0), - * (float*)&trafo, &max) || sizeof(aiUVTransform) != max) - * { - * // error handling - * } - * @endcode - * - * @param pMat Pointer to the input material. May not be NULL - * @param pKey Key to search for. One of the AI_MATKEY_XXX constants. - * @param pOut Pointer to a buffer to receive the result. - * @param pMax Specifies the size of the given buffer, in float's. - * Receives the number of values (not bytes!) read. - * @param type (see the code sample above) - * @param index (see the code sample above) - * @return Specifies whether the key has been found. If not, the output - * arrays remains unmodified and pMax is set to 0.*/ -// --------------------------------------------------------------------------- -ASSIMP_API C_ENUM aiReturn aiGetMaterialFloatArray( - const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - ai_real* pOut, - unsigned int* pMax); - - -#ifdef __cplusplus - -// --------------------------------------------------------------------------- -/** @brief Retrieve a single float property with a specific key from the material. -* -* Pass one of the AI_MATKEY_XXX constants for the last three parameters (the -* example reads the #AI_MATKEY_SHININESS_STRENGTH property of the first diffuse texture) -* @code -* float specStrength = 1.f; // default value, remains unmodified if we fail. -* aiGetMaterialFloat(mat, AI_MATKEY_SHININESS_STRENGTH, -* (float*)&specStrength); -* @endcode -* -* @param pMat Pointer to the input material. May not be NULL -* @param pKey Key to search for. One of the AI_MATKEY_XXX constants. -* @param pOut Receives the output float. -* @param type (see the code sample above) -* @param index (see the code sample above) -* @return Specifies whether the key has been found. If not, the output -* float remains unmodified.*/ -// --------------------------------------------------------------------------- -inline aiReturn aiGetMaterialFloat(const aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - ai_real* pOut) -{ - return aiGetMaterialFloatArray(pMat,pKey,type,index,pOut,(unsigned int*)0x0); -} - -#else - -// Use our friend, the C preprocessor -#define aiGetMaterialFloat (pMat, type, index, pKey, pOut) \ - aiGetMaterialFloatArray(pMat, type, index, pKey, pOut, NULL) - -#endif //!__cplusplus - - -// --------------------------------------------------------------------------- -/** @brief Retrieve an array of integer values with a specific key - * from a material - * - * See the sample for aiGetMaterialFloatArray for more information.*/ -ASSIMP_API C_ENUM aiReturn aiGetMaterialIntegerArray(const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - int* pOut, - unsigned int* pMax); - - -#ifdef __cplusplus - -// --------------------------------------------------------------------------- -/** @brief Retrieve an integer property with a specific key from a material - * - * See the sample for aiGetMaterialFloat for more information.*/ -// --------------------------------------------------------------------------- -inline aiReturn aiGetMaterialInteger(const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - int* pOut) -{ - return aiGetMaterialIntegerArray(pMat,pKey,type,index,pOut,(unsigned int*)0x0); -} - -#else - -// use our friend, the C preprocessor -#define aiGetMaterialInteger (pMat, type, index, pKey, pOut) \ - aiGetMaterialIntegerArray(pMat, type, index, pKey, pOut, NULL) - -#endif //!__cplusplus - -// --------------------------------------------------------------------------- -/** @brief Retrieve a color value from the material property table -* -* See the sample for aiGetMaterialFloat for more information*/ -// --------------------------------------------------------------------------- -ASSIMP_API C_ENUM aiReturn aiGetMaterialColor(const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - C_STRUCT aiColor4D* pOut); - - -// --------------------------------------------------------------------------- -/** @brief Retrieve a aiUVTransform value from the material property table -* -* See the sample for aiGetMaterialFloat for more information*/ -// --------------------------------------------------------------------------- -ASSIMP_API C_ENUM aiReturn aiGetMaterialUVTransform(const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - C_STRUCT aiUVTransform* pOut); - - -// --------------------------------------------------------------------------- -/** @brief Retrieve a string from the material property table -* -* See the sample for aiGetMaterialFloat for more information.*/ -// --------------------------------------------------------------------------- -ASSIMP_API C_ENUM aiReturn aiGetMaterialString(const C_STRUCT aiMaterial* pMat, - const char* pKey, - unsigned int type, - unsigned int index, - C_STRUCT aiString* pOut); - -// --------------------------------------------------------------------------- -/** Get the number of textures for a particular texture type. - * @param[in] pMat Pointer to the input material. May not be NULL - * @param type Texture type to check for - * @return Number of textures for this type. - * @note A texture can be easily queried using #aiGetMaterialTexture() */ -// --------------------------------------------------------------------------- -ASSIMP_API unsigned int aiGetMaterialTextureCount(const C_STRUCT aiMaterial* pMat, - C_ENUM aiTextureType type); - -// --------------------------------------------------------------------------- -/** @brief Helper function to get all values pertaining to a particular - * texture slot from a material structure. - * - * This function is provided just for convenience. You could also read the - * texture by parsing all of its properties manually. This function bundles - * all of them in a huge function monster. - * - * @param[in] mat Pointer to the input material. May not be NULL - * @param[in] type Specifies the texture stack to read from (e.g. diffuse, - * specular, height map ...). - * @param[in] index Index of the texture. The function fails if the - * requested index is not available for this texture type. - * #aiGetMaterialTextureCount() can be used to determine the number of - * textures in a particular texture stack. - * @param[out] path Receives the output path - * If the texture is embedded, receives a '*' followed by the id of - * the texture (for the textures stored in the corresponding scene) which - * can be converted to an int using a function like atoi. - * This parameter must be non-null. - * @param mapping The texture mapping mode to be used. - * Pass NULL if you're not interested in this information. - * @param[out] uvindex For UV-mapped textures: receives the index of the UV - * source channel. Unmodified otherwise. - * Pass NULL if you're not interested in this information. - * @param[out] blend Receives the blend factor for the texture - * Pass NULL if you're not interested in this information. - * @param[out] op Receives the texture blend operation to be perform between - * this texture and the previous texture. - * Pass NULL if you're not interested in this information. - * @param[out] mapmode Receives the mapping modes to be used for the texture. - * Pass NULL if you're not interested in this information. Otherwise, - * pass a pointer to an array of two aiTextureMapMode's (one for each - * axis, UV order). - * @param[out] flags Receives the the texture flags. - * @return AI_SUCCESS on success, otherwise something else. Have fun.*/ -// --------------------------------------------------------------------------- -#ifdef __cplusplus -ASSIMP_API aiReturn aiGetMaterialTexture(const C_STRUCT aiMaterial* mat, - aiTextureType type, - unsigned int index, - aiString* path, - aiTextureMapping* mapping = NULL, - unsigned int* uvindex = NULL, - ai_real* blend = NULL, - aiTextureOp* op = NULL, - aiTextureMapMode* mapmode = NULL, - unsigned int* flags = NULL); -#else -C_ENUM aiReturn aiGetMaterialTexture(const C_STRUCT aiMaterial* mat, - C_ENUM aiTextureType type, - unsigned int index, - C_STRUCT aiString* path, - C_ENUM aiTextureMapping* mapping /*= NULL*/, - unsigned int* uvindex /*= NULL*/, - ai_real* blend /*= NULL*/, - C_ENUM aiTextureOp* op /*= NULL*/, - C_ENUM aiTextureMapMode* mapmode /*= NULL*/, - unsigned int* flags /*= NULL*/); -#endif // !#ifdef __cplusplus - - -#ifdef __cplusplus -} - -#include "material.inl" - -#endif //!__cplusplus - -#endif //!!AI_MATERIAL_H_INC diff --git a/thirdparty/assimp/include/assimp/material.inl b/thirdparty/assimp/include/assimp/material.inl deleted file mode 100644 index 8ae6b88d3e..0000000000 --- a/thirdparty/assimp/include/assimp/material.inl +++ /dev/null @@ -1,389 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file material.inl - * @brief Defines the C++ getters for the material system - */ - -#pragma once -#ifndef AI_MATERIAL_INL_INC -#define AI_MATERIAL_INL_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiPropertyTypeInfo ai_real_to_property_type_info(float) { - return aiPTI_Float; -} - -AI_FORCE_INLINE -aiPropertyTypeInfo ai_real_to_property_type_info(double) { - return aiPTI_Double; -} -// --------------------------------------------------------------------------- - -//! @cond never - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::GetTexture( aiTextureType type, - unsigned int index, - C_STRUCT aiString* path, - aiTextureMapping* mapping /*= NULL*/, - unsigned int* uvindex /*= NULL*/, - ai_real* blend /*= NULL*/, - aiTextureOp* op /*= NULL*/, - aiTextureMapMode* mapmode /*= NULL*/) const { - return ::aiGetMaterialTexture(this,type,index,path,mapping,uvindex,blend,op,mapmode); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -unsigned int aiMaterial::GetTextureCount(aiTextureType type) const { - return ::aiGetMaterialTextureCount(this,type); -} - -// --------------------------------------------------------------------------- -template <typename Type> -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx, Type* pOut, - unsigned int* pMax) const { - unsigned int iNum = pMax ? *pMax : 1; - - const aiMaterialProperty* prop; - const aiReturn ret = ::aiGetMaterialProperty(this,pKey,type,idx, - (const aiMaterialProperty**)&prop); - if ( AI_SUCCESS == ret ) { - - if (prop->mDataLength < sizeof(Type)*iNum) { - return AI_FAILURE; - } - - if (prop->mType != aiPTI_Buffer) { - return AI_FAILURE; - } - - iNum = std::min((size_t)iNum,prop->mDataLength / sizeof(Type)); - ::memcpy(pOut,prop->mData,iNum * sizeof(Type)); - if (pMax) { - *pMax = iNum; - } - } - return ret; -} - -// --------------------------------------------------------------------------- -template <typename Type> -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,Type& pOut) const { - const aiMaterialProperty* prop; - const aiReturn ret = ::aiGetMaterialProperty(this,pKey,type,idx, - (const aiMaterialProperty**)&prop); - if ( AI_SUCCESS == ret ) { - - if (prop->mDataLength < sizeof(Type)) { - return AI_FAILURE; - } - - if (prop->mType != aiPTI_Buffer) { - return AI_FAILURE; - } - - ::memcpy( &pOut, prop->mData, sizeof( Type ) ); - } - return ret; -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,ai_real* pOut, - unsigned int* pMax) const { - return ::aiGetMaterialFloatArray(this,pKey,type,idx,pOut,pMax); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,int* pOut, - unsigned int* pMax) const { - return ::aiGetMaterialIntegerArray(this,pKey,type,idx,pOut,pMax); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,ai_real& pOut) const { - return aiGetMaterialFloat(this,pKey,type,idx,&pOut); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,int& pOut) const { - return aiGetMaterialInteger(this,pKey,type,idx,&pOut); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,aiColor4D& pOut) const { - return aiGetMaterialColor(this,pKey,type,idx,&pOut); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,aiColor3D& pOut) const { - aiColor4D c; - const aiReturn ret = aiGetMaterialColor(this,pKey,type,idx,&c); - pOut = aiColor3D(c.r,c.g,c.b); - return ret; -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,aiString& pOut) const { - return aiGetMaterialString(this,pKey,type,idx,&pOut); -} -// --------------------------------------------------------------------------- -AI_FORCE_INLINE aiReturn aiMaterial::Get(const char* pKey,unsigned int type, - unsigned int idx,aiUVTransform& pOut) const { - return aiGetMaterialUVTransform(this,pKey,type,idx,&pOut); -} - -// --------------------------------------------------------------------------- -template<class TYPE> -aiReturn aiMaterial::AddProperty (const TYPE* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) -{ - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(TYPE), - pKey,type,index,aiPTI_Buffer); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE aiReturn aiMaterial::AddProperty(const float* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(float), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const double* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(double), - pKey,type,index,aiPTI_Double); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const aiUVTransform* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiUVTransform), - pKey,type,index,ai_real_to_property_type_info(pInput->mRotation)); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const aiColor4D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiColor4D), - pKey,type,index,ai_real_to_property_type_info(pInput->a)); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const aiColor3D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiColor3D), - pKey,type,index,ai_real_to_property_type_info(pInput->b)); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const aiVector3D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiVector3D), - pKey,type,index,ai_real_to_property_type_info(pInput->x)); -} - -// --------------------------------------------------------------------------- -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty(const int* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(int), - pKey,type,index,aiPTI_Integer); -} - - -// --------------------------------------------------------------------------- -// The template specializations below are for backwards compatibility. -// The recommended way to add material properties is using the non-template -// overloads. -// --------------------------------------------------------------------------- - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<float>(const float* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(float), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<double>(const double* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(double), - pKey,type,index,aiPTI_Double); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<aiUVTransform>(const aiUVTransform* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiUVTransform), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<aiColor4D>(const aiColor4D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiColor4D), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<aiColor3D>(const aiColor3D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiColor3D), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<aiVector3D>(const aiVector3D* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(aiVector3D), - pKey,type,index,aiPTI_Float); -} - -// --------------------------------------------------------------------------- -template<> -AI_FORCE_INLINE -aiReturn aiMaterial::AddProperty<int>(const int* pInput, - const unsigned int pNumValues, - const char* pKey, - unsigned int type, - unsigned int index) { - return AddBinaryProperty((const void*)pInput, - pNumValues * sizeof(int), - pKey,type,index,aiPTI_Integer); -} - -//! @endcond - -#endif //! AI_MATERIAL_INL_INC diff --git a/thirdparty/assimp/include/assimp/matrix3x3.h b/thirdparty/assimp/include/assimp/matrix3x3.h deleted file mode 100644 index 2c26cf92bb..0000000000 --- a/thirdparty/assimp/include/assimp/matrix3x3.h +++ /dev/null @@ -1,180 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file matrix3x3.h - * @brief Definition of a 3x3 matrix, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_MATRIX3X3_H_INC -#define AI_MATRIX3X3_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -#ifdef __cplusplus - -template <typename T> class aiMatrix4x4t; -template <typename T> class aiVector2t; - -// --------------------------------------------------------------------------- -/** @brief Represents a row-major 3x3 matrix - * - * There's much confusion about matrix layouts (column vs. row order). - * This is *always* a row-major matrix. Not even with the - * #aiProcess_ConvertToLeftHanded flag, which absolutely does not affect - * matrix order - it just affects the handedness of the coordinate system - * defined thereby. - */ -template <typename TReal> -class aiMatrix3x3t { -public: - aiMatrix3x3t() AI_NO_EXCEPT : - a1(static_cast<TReal>(1.0f)), a2(), a3(), - b1(), b2(static_cast<TReal>(1.0f)), b3(), - c1(), c2(), c3(static_cast<TReal>(1.0f)) {} - - aiMatrix3x3t ( TReal _a1, TReal _a2, TReal _a3, - TReal _b1, TReal _b2, TReal _b3, - TReal _c1, TReal _c2, TReal _c3) : - a1(_a1), a2(_a2), a3(_a3), - b1(_b1), b2(_b2), b3(_b3), - c1(_c1), c2(_c2), c3(_c3) - {} - - // matrix multiplication. - aiMatrix3x3t& operator *= (const aiMatrix3x3t& m); - aiMatrix3x3t operator * (const aiMatrix3x3t& m) const; - - // array access operators - TReal* operator[] (unsigned int p_iIndex); - const TReal* operator[] (unsigned int p_iIndex) const; - - // comparison operators - bool operator== (const aiMatrix4x4t<TReal>& m) const; - bool operator!= (const aiMatrix4x4t<TReal>& m) const; - - bool Equal(const aiMatrix4x4t<TReal>& m, TReal epsilon = 1e-6) const; - - template <typename TOther> - operator aiMatrix3x3t<TOther> () const; - - // ------------------------------------------------------------------- - /** @brief Construction from a 4x4 matrix. The remaining parts - * of the matrix are ignored. - */ - explicit aiMatrix3x3t( const aiMatrix4x4t<TReal>& pMatrix); - - // ------------------------------------------------------------------- - /** @brief Transpose the matrix - */ - aiMatrix3x3t& Transpose(); - - // ------------------------------------------------------------------- - /** @brief Invert the matrix. - * If the matrix is not invertible all elements are set to qnan. - * Beware, use (f != f) to check whether a TReal f is qnan. - */ - aiMatrix3x3t& Inverse(); - TReal Determinant() const; - - // ------------------------------------------------------------------- - /** @brief Returns a rotation matrix for a rotation around z - * @param a Rotation angle, in radians - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix3x3t& RotationZ(TReal a, aiMatrix3x3t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a rotation matrix for a rotation around - * an arbitrary axis. - * - * @param a Rotation angle, in radians - * @param axis Axis to rotate around - * @param out To be filled - */ - static aiMatrix3x3t& Rotation( TReal a, - const aiVector3t<TReal>& axis, aiMatrix3x3t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a translation matrix - * @param v Translation vector - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix3x3t& Translation( const aiVector2t<TReal>& v, aiMatrix3x3t& out); - - // ------------------------------------------------------------------- - /** @brief A function for creating a rotation matrix that rotates a - * vector called "from" into another vector called "to". - * Input : from[3], to[3] which both must be *normalized* non-zero vectors - * Output: mtx[3][3] -- a 3x3 matrix in column-major form - * Authors: Tomas Möller, John Hughes - * "Efficiently Building a Matrix to Rotate One Vector to Another" - * Journal of Graphics Tools, 4(4):1-4, 1999 - */ - static aiMatrix3x3t& FromToMatrix(const aiVector3t<TReal>& from, - const aiVector3t<TReal>& to, aiMatrix3x3t& out); - -public: - TReal a1, a2, a3; - TReal b1, b2, b3; - TReal c1, c2, c3; -}; - -typedef aiMatrix3x3t<ai_real> aiMatrix3x3; - -#else - -struct aiMatrix3x3 { - ai_real a1, a2, a3; - ai_real b1, b2, b3; - ai_real c1, c2, c3; -}; - -#endif // __cplusplus - -#endif // AI_MATRIX3X3_H_INC diff --git a/thirdparty/assimp/include/assimp/matrix3x3.inl b/thirdparty/assimp/include/assimp/matrix3x3.inl deleted file mode 100644 index 1ce8c9691c..0000000000 --- a/thirdparty/assimp/include/assimp/matrix3x3.inl +++ /dev/null @@ -1,361 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file matrix3x3.inl - * @brief Inline implementation of the 3x3 matrix operators - */ -#pragma once -#ifndef AI_MATRIX3X3_INL_INC -#define AI_MATRIX3X3_INL_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -#include <assimp/matrix3x3.h> -#include <assimp/matrix4x4.h> - -#include <algorithm> -#include <cmath> -#include <limits> - -// ------------------------------------------------------------------------------------------------ -// Construction from a 4x4 matrix. The remaining parts of the matrix are ignored. -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>::aiMatrix3x3t( const aiMatrix4x4t<TReal>& pMatrix) { - a1 = pMatrix.a1; a2 = pMatrix.a2; a3 = pMatrix.a3; - b1 = pMatrix.b1; b2 = pMatrix.b2; b3 = pMatrix.b3; - c1 = pMatrix.c1; c2 = pMatrix.c2; c3 = pMatrix.c3; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::operator *= (const aiMatrix3x3t<TReal>& m) { - *this = aiMatrix3x3t<TReal>(m.a1 * a1 + m.b1 * a2 + m.c1 * a3, - m.a2 * a1 + m.b2 * a2 + m.c2 * a3, - m.a3 * a1 + m.b3 * a2 + m.c3 * a3, - m.a1 * b1 + m.b1 * b2 + m.c1 * b3, - m.a2 * b1 + m.b2 * b2 + m.c2 * b3, - m.a3 * b1 + m.b3 * b2 + m.c3 * b3, - m.a1 * c1 + m.b1 * c2 + m.c1 * c3, - m.a2 * c1 + m.b2 * c2 + m.c2 * c3, - m.a3 * c1 + m.b3 * c2 + m.c3 * c3); - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -template <typename TOther> -aiMatrix3x3t<TReal>::operator aiMatrix3x3t<TOther> () const { - return aiMatrix3x3t<TOther>(static_cast<TOther>(a1),static_cast<TOther>(a2),static_cast<TOther>(a3), - static_cast<TOther>(b1),static_cast<TOther>(b2),static_cast<TOther>(b3), - static_cast<TOther>(c1),static_cast<TOther>(c2),static_cast<TOther>(c3)); -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal> aiMatrix3x3t<TReal>::operator* (const aiMatrix3x3t<TReal>& m) const { - aiMatrix3x3t<TReal> temp( *this); - temp *= m; - return temp; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal* aiMatrix3x3t<TReal>::operator[] (unsigned int p_iIndex) { - switch ( p_iIndex ) { - case 0: - return &a1; - case 1: - return &b1; - case 2: - return &c1; - default: - break; - } - return &a1; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const TReal* aiMatrix3x3t<TReal>::operator[] (unsigned int p_iIndex) const { - switch ( p_iIndex ) { - case 0: - return &a1; - case 1: - return &b1; - case 2: - return &c1; - default: - break; - } - return &a1; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiMatrix3x3t<TReal>::operator== (const aiMatrix4x4t<TReal>& m) const { - return a1 == m.a1 && a2 == m.a2 && a3 == m.a3 && - b1 == m.b1 && b2 == m.b2 && b3 == m.b3 && - c1 == m.c1 && c2 == m.c2 && c3 == m.c3; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiMatrix3x3t<TReal>::operator!= (const aiMatrix4x4t<TReal>& m) const { - return !(*this == m); -} - -// --------------------------------------------------------------------------- -template<typename TReal> -AI_FORCE_INLINE -bool aiMatrix3x3t<TReal>::Equal(const aiMatrix4x4t<TReal>& m, TReal epsilon) const { - return - std::abs(a1 - m.a1) <= epsilon && - std::abs(a2 - m.a2) <= epsilon && - std::abs(a3 - m.a3) <= epsilon && - std::abs(b1 - m.b1) <= epsilon && - std::abs(b2 - m.b2) <= epsilon && - std::abs(b3 - m.b3) <= epsilon && - std::abs(c1 - m.c1) <= epsilon && - std::abs(c2 - m.c2) <= epsilon && - std::abs(c3 - m.c3) <= epsilon; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::Transpose() { - // (TReal&) don't remove, GCC complains cause of packed fields - std::swap( (TReal&)a2, (TReal&)b1); - std::swap( (TReal&)a3, (TReal&)c1); - std::swap( (TReal&)b3, (TReal&)c2); - return *this; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -TReal aiMatrix3x3t<TReal>::Determinant() const { - return a1*b2*c3 - a1*b3*c2 + a2*b3*c1 - a2*b1*c3 + a3*b1*c2 - a3*b2*c1; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::Inverse() { - // Compute the reciprocal determinant - TReal det = Determinant(); - if(det == static_cast<TReal>(0.0)) - { - // Matrix not invertible. Setting all elements to nan is not really - // correct in a mathematical sense; but at least qnans are easy to - // spot. XXX we might throw an exception instead, which would - // be even much better to spot :/. - const TReal nan = std::numeric_limits<TReal>::quiet_NaN(); - *this = aiMatrix3x3t<TReal>( nan,nan,nan,nan,nan,nan,nan,nan,nan); - - return *this; - } - - TReal invdet = static_cast<TReal>(1.0) / det; - - aiMatrix3x3t<TReal> res; - res.a1 = invdet * (b2 * c3 - b3 * c2); - res.a2 = -invdet * (a2 * c3 - a3 * c2); - res.a3 = invdet * (a2 * b3 - a3 * b2); - res.b1 = -invdet * (b1 * c3 - b3 * c1); - res.b2 = invdet * (a1 * c3 - a3 * c1); - res.b3 = -invdet * (a1 * b3 - a3 * b1); - res.c1 = invdet * (b1 * c2 - b2 * c1); - res.c2 = -invdet * (a1 * c2 - a2 * c1); - res.c3 = invdet * (a1 * b2 - a2 * b1); - *this = res; - - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::RotationZ(TReal a, aiMatrix3x3t<TReal>& out) { - out.a1 = out.b2 = std::cos(a); - out.b1 = std::sin(a); - out.a2 = - out.b1; - - out.a3 = out.b3 = out.c1 = out.c2 = 0.f; - out.c3 = 1.f; - - return out; -} - -// ------------------------------------------------------------------------------------------------ -// Returns a rotation matrix for a rotation around an arbitrary axis. -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::Rotation( TReal a, const aiVector3t<TReal>& axis, aiMatrix3x3t<TReal>& out) { - TReal c = std::cos( a), s = std::sin( a), t = 1 - c; - TReal x = axis.x, y = axis.y, z = axis.z; - - // Many thanks to MathWorld and Wikipedia - out.a1 = t*x*x + c; out.a2 = t*x*y - s*z; out.a3 = t*x*z + s*y; - out.b1 = t*x*y + s*z; out.b2 = t*y*y + c; out.b3 = t*y*z - s*x; - out.c1 = t*x*z - s*y; out.c2 = t*y*z + s*x; out.c3 = t*z*z + c; - - return out; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::Translation( const aiVector2t<TReal>& v, aiMatrix3x3t<TReal>& out) { - out = aiMatrix3x3t<TReal>(); - out.a3 = v.x; - out.b3 = v.y; - return out; -} - -// ---------------------------------------------------------------------------------------- -/** A function for creating a rotation matrix that rotates a vector called - * "from" into another vector called "to". - * Input : from[3], to[3] which both must be *normalized* non-zero vectors - * Output: mtx[3][3] -- a 3x3 matrix in colum-major form - * Authors: Tomas Möller, John Hughes - * "Efficiently Building a Matrix to Rotate One Vector to Another" - * Journal of Graphics Tools, 4(4):1-4, 1999 - */ -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE aiMatrix3x3t<TReal>& aiMatrix3x3t<TReal>::FromToMatrix(const aiVector3t<TReal>& from, - const aiVector3t<TReal>& to, aiMatrix3x3t<TReal>& mtx) { - const TReal e = from * to; - const TReal f = (e < 0)? -e:e; - - if (f > static_cast<TReal>(1.0) - static_cast<TReal>(0.00001)) /* "from" and "to"-vector almost parallel */ - { - aiVector3D u,v; /* temporary storage vectors */ - aiVector3D x; /* vector most nearly orthogonal to "from" */ - - x.x = (from.x > 0.0)? from.x : -from.x; - x.y = (from.y > 0.0)? from.y : -from.y; - x.z = (from.z > 0.0)? from.z : -from.z; - - if (x.x < x.y) - { - if (x.x < x.z) - { - x.x = static_cast<TReal>(1.0); - x.y = x.z = static_cast<TReal>(0.0); - } - else - { - x.z = static_cast<TReal>(1.0); - x.x = x.y = static_cast<TReal>(0.0); - } - } - else - { - if (x.y < x.z) - { - x.y = static_cast<TReal>(1.0); - x.x = x.z = static_cast<TReal>(0.0); - } - else - { - x.z = static_cast<TReal>(1.0); - x.x = x.y = static_cast<TReal>(0.0); - } - } - - u.x = x.x - from.x; u.y = x.y - from.y; u.z = x.z - from.z; - v.x = x.x - to.x; v.y = x.y - to.y; v.z = x.z - to.z; - - const TReal c1_ = static_cast<TReal>(2.0) / (u * u); - const TReal c2_ = static_cast<TReal>(2.0) / (v * v); - const TReal c3_ = c1_ * c2_ * (u * v); - - for (unsigned int i = 0; i < 3; i++) - { - for (unsigned int j = 0; j < 3; j++) - { - mtx[i][j] = - c1_ * u[i] * u[j] - c2_ * v[i] * v[j] - + c3_ * v[i] * u[j]; - } - mtx[i][i] += static_cast<TReal>(1.0); - } - } - else /* the most common case, unless "from"="to", or "from"=-"to" */ - { - const aiVector3D v = from ^ to; - /* ... use this hand optimized version (9 mults less) */ - const TReal h = static_cast<TReal>(1.0)/(static_cast<TReal>(1.0) + e); /* optimization by Gottfried Chen */ - const TReal hvx = h * v.x; - const TReal hvz = h * v.z; - const TReal hvxy = hvx * v.y; - const TReal hvxz = hvx * v.z; - const TReal hvyz = hvz * v.y; - mtx[0][0] = e + hvx * v.x; - mtx[0][1] = hvxy - v.z; - mtx[0][2] = hvxz + v.y; - - mtx[1][0] = hvxy + v.z; - mtx[1][1] = e + h * v.y * v.y; - mtx[1][2] = hvyz - v.x; - - mtx[2][0] = hvxz - v.y; - mtx[2][1] = hvyz + v.x; - mtx[2][2] = e + hvz * v.z; - } - return mtx; -} - -#endif // __cplusplus -#endif // AI_MATRIX3X3_INL_INC diff --git a/thirdparty/assimp/include/assimp/matrix4x4.h b/thirdparty/assimp/include/assimp/matrix4x4.h deleted file mode 100644 index 8fc216f669..0000000000 --- a/thirdparty/assimp/include/assimp/matrix4x4.h +++ /dev/null @@ -1,276 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file matrix4x4.h - * @brief 4x4 matrix structure, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_MATRIX4X4_H_INC -#define AI_MATRIX4X4_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/vector3.h> -#include <assimp/defs.h> - -#ifdef __cplusplus - -template<typename TReal> class aiMatrix3x3t; -template<typename TReal> class aiQuaterniont; - -// --------------------------------------------------------------------------- -/** @brief Represents a row-major 4x4 matrix, use this for homogeneous - * coordinates. - * - * There's much confusion about matrix layouts (column vs. row order). - * This is *always* a row-major matrix. Not even with the - * #aiProcess_ConvertToLeftHanded flag, which absolutely does not affect - * matrix order - it just affects the handedness of the coordinate system - * defined thereby. - */ -template<typename TReal> -class aiMatrix4x4t { -public: - - /** set to identity */ - aiMatrix4x4t() AI_NO_EXCEPT; - - /** construction from single values */ - aiMatrix4x4t ( TReal _a1, TReal _a2, TReal _a3, TReal _a4, - TReal _b1, TReal _b2, TReal _b3, TReal _b4, - TReal _c1, TReal _c2, TReal _c3, TReal _c4, - TReal _d1, TReal _d2, TReal _d3, TReal _d4); - - - /** construction from 3x3 matrix, remaining elements are set to identity */ - explicit aiMatrix4x4t( const aiMatrix3x3t<TReal>& m); - - /** construction from position, rotation and scaling components - * @param scaling The scaling for the x,y,z axes - * @param rotation The rotation as a hamilton quaternion - * @param position The position for the x,y,z axes - */ - aiMatrix4x4t(const aiVector3t<TReal>& scaling, const aiQuaterniont<TReal>& rotation, - const aiVector3t<TReal>& position); - - // array access operators - /** @fn TReal* operator[] (unsigned int p_iIndex) - * @param [in] p_iIndex - index of the row. - * @return pointer to pointed row. - */ - TReal* operator[] (unsigned int p_iIndex); - - /** @fn const TReal* operator[] (unsigned int p_iIndex) const - * @overload TReal* operator[] (unsigned int p_iIndex) - */ - const TReal* operator[] (unsigned int p_iIndex) const; - - // comparison operators - bool operator== (const aiMatrix4x4t& m) const; - bool operator!= (const aiMatrix4x4t& m) const; - - bool Equal(const aiMatrix4x4t& m, TReal epsilon = 1e-6) const; - - // matrix multiplication. - aiMatrix4x4t& operator *= (const aiMatrix4x4t& m); - aiMatrix4x4t operator * (const aiMatrix4x4t& m) const; - aiMatrix4x4t operator * (const TReal& aFloat) const; - aiMatrix4x4t operator + (const aiMatrix4x4t& aMatrix) const; - - template <typename TOther> - operator aiMatrix4x4t<TOther> () const; - - // ------------------------------------------------------------------- - /** @brief Transpose the matrix */ - aiMatrix4x4t& Transpose(); - - // ------------------------------------------------------------------- - /** @brief Invert the matrix. - * If the matrix is not invertible all elements are set to qnan. - * Beware, use (f != f) to check whether a TReal f is qnan. - */ - aiMatrix4x4t& Inverse(); - TReal Determinant() const; - - - // ------------------------------------------------------------------- - /** @brief Returns true of the matrix is the identity matrix. - * The check is performed against a not so small epsilon. - */ - inline bool IsIdentity() const; - - // ------------------------------------------------------------------- - /** @brief Decompose a trafo matrix into its original components - * @param scaling Receives the output scaling for the x,y,z axes - * @param rotation Receives the output rotation as a hamilton - * quaternion - * @param position Receives the output position for the x,y,z axes - */ - void Decompose (aiVector3t<TReal>& scaling, aiQuaterniont<TReal>& rotation, - aiVector3t<TReal>& position) const; - - // ------------------------------------------------------------------- - /** @fn void Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotation, aiVector3t<TReal>& pPosition) const - * @brief Decompose a trafo matrix into its original components. - * Thx to good FAQ at http://www.gamedev.ru/code/articles/faq_matrix_quat - * @param [out] pScaling - Receives the output scaling for the x,y,z axes. - * @param [out] pRotation - Receives the output rotation as a Euler angles. - * @param [out] pPosition - Receives the output position for the x,y,z axes. - */ - void Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotation, aiVector3t<TReal>& pPosition) const; - - // ------------------------------------------------------------------- - /** @fn void Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotationAxis, TReal& pRotationAngle, aiVector3t<TReal>& pPosition) const - * @brief Decompose a trafo matrix into its original components - * Thx to good FAQ at http://www.gamedev.ru/code/articles/faq_matrix_quat - * @param [out] pScaling - Receives the output scaling for the x,y,z axes. - * @param [out] pRotationAxis - Receives the output rotation axis. - * @param [out] pRotationAngle - Receives the output rotation angle for @ref pRotationAxis. - * @param [out] pPosition - Receives the output position for the x,y,z axes. - */ - void Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotationAxis, TReal& pRotationAngle, aiVector3t<TReal>& pPosition) const; - - // ------------------------------------------------------------------- - /** @brief Decompose a trafo matrix with no scaling into its - * original components - * @param rotation Receives the output rotation as a hamilton - * quaternion - * @param position Receives the output position for the x,y,z axes - */ - void DecomposeNoScaling (aiQuaterniont<TReal>& rotation, - aiVector3t<TReal>& position) const; - - // ------------------------------------------------------------------- - /** @brief Creates a trafo matrix from a set of euler angles - * @param x Rotation angle for the x-axis, in radians - * @param y Rotation angle for the y-axis, in radians - * @param z Rotation angle for the z-axis, in radians - */ - aiMatrix4x4t& FromEulerAnglesXYZ(TReal x, TReal y, TReal z); - aiMatrix4x4t& FromEulerAnglesXYZ(const aiVector3t<TReal>& blubb); - - // ------------------------------------------------------------------- - /** @brief Returns a rotation matrix for a rotation around the x axis - * @param a Rotation angle, in radians - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& RotationX(TReal a, aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a rotation matrix for a rotation around the y axis - * @param a Rotation angle, in radians - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& RotationY(TReal a, aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a rotation matrix for a rotation around the z axis - * @param a Rotation angle, in radians - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& RotationZ(TReal a, aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** Returns a rotation matrix for a rotation around an arbitrary axis. - * @param a Rotation angle, in radians - * @param axis Rotation axis, should be a normalized vector. - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& Rotation(TReal a, const aiVector3t<TReal>& axis, - aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a translation matrix - * @param v Translation vector - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& Translation( const aiVector3t<TReal>& v, - aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** @brief Returns a scaling matrix - * @param v Scaling vector - * @param out Receives the output matrix - * @return Reference to the output matrix - */ - static aiMatrix4x4t& Scaling( const aiVector3t<TReal>& v, aiMatrix4x4t& out); - - // ------------------------------------------------------------------- - /** @brief A function for creating a rotation matrix that rotates a - * vector called "from" into another vector called "to". - * Input : from[3], to[3] which both must be *normalized* non-zero vectors - * Output: mtx[3][3] -- a 3x3 matrix in column-major form - * Authors: Tomas Mueller, John Hughes - * "Efficiently Building a Matrix to Rotate One Vector to Another" - * Journal of Graphics Tools, 4(4):1-4, 1999 - */ - static aiMatrix4x4t& FromToMatrix(const aiVector3t<TReal>& from, - const aiVector3t<TReal>& to, aiMatrix4x4t& out); - - TReal a1, a2, a3, a4; - TReal b1, b2, b3, b4; - TReal c1, c2, c3, c4; - TReal d1, d2, d3, d4; -}; - -typedef aiMatrix4x4t<ai_real> aiMatrix4x4; - -#else - -struct aiMatrix4x4 { - ai_real a1, a2, a3, a4; - ai_real b1, b2, b3, b4; - ai_real c1, c2, c3, c4; - ai_real d1, d2, d3, d4; -}; - - -#endif // __cplusplus - -#endif // AI_MATRIX4X4_H_INC diff --git a/thirdparty/assimp/include/assimp/matrix4x4.inl b/thirdparty/assimp/include/assimp/matrix4x4.inl deleted file mode 100644 index 84079974f7..0000000000 --- a/thirdparty/assimp/include/assimp/matrix4x4.inl +++ /dev/null @@ -1,677 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file matrix4x4.inl - * @brief Inline implementation of the 4x4 matrix operators - */ -#pragma once -#ifndef AI_MATRIX4X4_INL_INC -#define AI_MATRIX4X4_INL_INC - -#ifdef __cplusplus - -#include "matrix4x4.h" -#include "matrix3x3.h" -#include "quaternion.h" -#include "MathFunctions.h" - -#include <algorithm> -#include <limits> -#include <cmath> - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -aiMatrix4x4t<TReal>::aiMatrix4x4t() AI_NO_EXCEPT : - a1(1.0f), a2(), a3(), a4(), - b1(), b2(1.0f), b3(), b4(), - c1(), c2(), c3(1.0f), c4(), - d1(), d2(), d3(), d4(1.0f) { - // empty -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -aiMatrix4x4t<TReal>::aiMatrix4x4t (TReal _a1, TReal _a2, TReal _a3, TReal _a4, - TReal _b1, TReal _b2, TReal _b3, TReal _b4, - TReal _c1, TReal _c2, TReal _c3, TReal _c4, - TReal _d1, TReal _d2, TReal _d3, TReal _d4) : - a1(_a1), a2(_a2), a3(_a3), a4(_a4), - b1(_b1), b2(_b2), b3(_b3), b4(_b4), - c1(_c1), c2(_c2), c3(_c3), c4(_c4), - d1(_d1), d2(_d2), d3(_d3), d4(_d4) { - // empty -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -template <typename TOther> -aiMatrix4x4t<TReal>::operator aiMatrix4x4t<TOther> () const { - return aiMatrix4x4t<TOther>(static_cast<TOther>(a1),static_cast<TOther>(a2),static_cast<TOther>(a3),static_cast<TOther>(a4), - static_cast<TOther>(b1),static_cast<TOther>(b2),static_cast<TOther>(b3),static_cast<TOther>(b4), - static_cast<TOther>(c1),static_cast<TOther>(c2),static_cast<TOther>(c3),static_cast<TOther>(c4), - static_cast<TOther>(d1),static_cast<TOther>(d2),static_cast<TOther>(d3),static_cast<TOther>(d4)); -} - - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>::aiMatrix4x4t (const aiMatrix3x3t<TReal>& m) { - a1 = m.a1; a2 = m.a2; a3 = m.a3; a4 = static_cast<TReal>(0.0); - b1 = m.b1; b2 = m.b2; b3 = m.b3; b4 = static_cast<TReal>(0.0); - c1 = m.c1; c2 = m.c2; c3 = m.c3; c4 = static_cast<TReal>(0.0); - d1 = static_cast<TReal>(0.0); d2 = static_cast<TReal>(0.0); d3 = static_cast<TReal>(0.0); d4 = static_cast<TReal>(1.0); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>::aiMatrix4x4t (const aiVector3t<TReal>& scaling, const aiQuaterniont<TReal>& rotation, const aiVector3t<TReal>& position) { - // build a 3x3 rotation matrix - aiMatrix3x3t<TReal> m = rotation.GetMatrix(); - - a1 = m.a1 * scaling.x; - a2 = m.a2 * scaling.x; - a3 = m.a3 * scaling.x; - a4 = position.x; - - b1 = m.b1 * scaling.y; - b2 = m.b2 * scaling.y; - b3 = m.b3 * scaling.y; - b4 = position.y; - - c1 = m.c1 * scaling.z; - c2 = m.c2 * scaling.z; - c3 = m.c3 * scaling.z; - c4= position.z; - - d1 = static_cast<TReal>(0.0); - d2 = static_cast<TReal>(0.0); - d3 = static_cast<TReal>(0.0); - d4 = static_cast<TReal>(1.0); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::operator *= (const aiMatrix4x4t<TReal>& m) { - *this = aiMatrix4x4t<TReal>( - m.a1 * a1 + m.b1 * a2 + m.c1 * a3 + m.d1 * a4, - m.a2 * a1 + m.b2 * a2 + m.c2 * a3 + m.d2 * a4, - m.a3 * a1 + m.b3 * a2 + m.c3 * a3 + m.d3 * a4, - m.a4 * a1 + m.b4 * a2 + m.c4 * a3 + m.d4 * a4, - m.a1 * b1 + m.b1 * b2 + m.c1 * b3 + m.d1 * b4, - m.a2 * b1 + m.b2 * b2 + m.c2 * b3 + m.d2 * b4, - m.a3 * b1 + m.b3 * b2 + m.c3 * b3 + m.d3 * b4, - m.a4 * b1 + m.b4 * b2 + m.c4 * b3 + m.d4 * b4, - m.a1 * c1 + m.b1 * c2 + m.c1 * c3 + m.d1 * c4, - m.a2 * c1 + m.b2 * c2 + m.c2 * c3 + m.d2 * c4, - m.a3 * c1 + m.b3 * c2 + m.c3 * c3 + m.d3 * c4, - m.a4 * c1 + m.b4 * c2 + m.c4 * c3 + m.d4 * c4, - m.a1 * d1 + m.b1 * d2 + m.c1 * d3 + m.d1 * d4, - m.a2 * d1 + m.b2 * d2 + m.c2 * d3 + m.d2 * d4, - m.a3 * d1 + m.b3 * d2 + m.c3 * d3 + m.d3 * d4, - m.a4 * d1 + m.b4 * d2 + m.c4 * d3 + m.d4 * d4); - return *this; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE aiMatrix4x4t<TReal> aiMatrix4x4t<TReal>::operator* (const TReal& aFloat) const { - aiMatrix4x4t<TReal> temp( - a1 * aFloat, - a2 * aFloat, - a3 * aFloat, - a4 * aFloat, - b1 * aFloat, - b2 * aFloat, - b3 * aFloat, - b4 * aFloat, - c1 * aFloat, - c2 * aFloat, - c3 * aFloat, - c4 * aFloat, - d1 * aFloat, - d2 * aFloat, - d3 * aFloat, - d4 * aFloat); - return temp; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal> aiMatrix4x4t<TReal>::operator+ (const aiMatrix4x4t<TReal>& m) const { - aiMatrix4x4t<TReal> temp( - m.a1 + a1, - m.a2 + a2, - m.a3 + a3, - m.a4 + a4, - m.b1 + b1, - m.b2 + b2, - m.b3 + b3, - m.b4 + b4, - m.c1 + c1, - m.c2 + c2, - m.c3 + c3, - m.c4 + c4, - m.d1 + d1, - m.d2 + d2, - m.d3 + d3, - m.d4 + d4); - return temp; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal> aiMatrix4x4t<TReal>::operator* (const aiMatrix4x4t<TReal>& m) const { - aiMatrix4x4t<TReal> temp( *this); - temp *= m; - return temp; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::Transpose() { - // (TReal&) don't remove, GCC complains cause of packed fields - std::swap( (TReal&)b1, (TReal&)a2); - std::swap( (TReal&)c1, (TReal&)a3); - std::swap( (TReal&)c2, (TReal&)b3); - std::swap( (TReal&)d1, (TReal&)a4); - std::swap( (TReal&)d2, (TReal&)b4); - std::swap( (TReal&)d3, (TReal&)c4); - return *this; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -TReal aiMatrix4x4t<TReal>::Determinant() const { - return a1*b2*c3*d4 - a1*b2*c4*d3 + a1*b3*c4*d2 - a1*b3*c2*d4 - + a1*b4*c2*d3 - a1*b4*c3*d2 - a2*b3*c4*d1 + a2*b3*c1*d4 - - a2*b4*c1*d3 + a2*b4*c3*d1 - a2*b1*c3*d4 + a2*b1*c4*d3 - + a3*b4*c1*d2 - a3*b4*c2*d1 + a3*b1*c2*d4 - a3*b1*c4*d2 - + a3*b2*c4*d1 - a3*b2*c1*d4 - a4*b1*c2*d3 + a4*b1*c3*d2 - - a4*b2*c3*d1 + a4*b2*c1*d3 - a4*b3*c1*d2 + a4*b3*c2*d1; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::Inverse() { - // Compute the reciprocal determinant - const TReal det = Determinant(); - if(det == static_cast<TReal>(0.0)) - { - // Matrix not invertible. Setting all elements to nan is not really - // correct in a mathematical sense but it is easy to debug for the - // programmer. - const TReal nan = std::numeric_limits<TReal>::quiet_NaN(); - *this = aiMatrix4x4t<TReal>( - nan,nan,nan,nan, - nan,nan,nan,nan, - nan,nan,nan,nan, - nan,nan,nan,nan); - - return *this; - } - - const TReal invdet = static_cast<TReal>(1.0) / det; - - aiMatrix4x4t<TReal> res; - res.a1 = invdet * (b2 * (c3 * d4 - c4 * d3) + b3 * (c4 * d2 - c2 * d4) + b4 * (c2 * d3 - c3 * d2)); - res.a2 = -invdet * (a2 * (c3 * d4 - c4 * d3) + a3 * (c4 * d2 - c2 * d4) + a4 * (c2 * d3 - c3 * d2)); - res.a3 = invdet * (a2 * (b3 * d4 - b4 * d3) + a3 * (b4 * d2 - b2 * d4) + a4 * (b2 * d3 - b3 * d2)); - res.a4 = -invdet * (a2 * (b3 * c4 - b4 * c3) + a3 * (b4 * c2 - b2 * c4) + a4 * (b2 * c3 - b3 * c2)); - res.b1 = -invdet * (b1 * (c3 * d4 - c4 * d3) + b3 * (c4 * d1 - c1 * d4) + b4 * (c1 * d3 - c3 * d1)); - res.b2 = invdet * (a1 * (c3 * d4 - c4 * d3) + a3 * (c4 * d1 - c1 * d4) + a4 * (c1 * d3 - c3 * d1)); - res.b3 = -invdet * (a1 * (b3 * d4 - b4 * d3) + a3 * (b4 * d1 - b1 * d4) + a4 * (b1 * d3 - b3 * d1)); - res.b4 = invdet * (a1 * (b3 * c4 - b4 * c3) + a3 * (b4 * c1 - b1 * c4) + a4 * (b1 * c3 - b3 * c1)); - res.c1 = invdet * (b1 * (c2 * d4 - c4 * d2) + b2 * (c4 * d1 - c1 * d4) + b4 * (c1 * d2 - c2 * d1)); - res.c2 = -invdet * (a1 * (c2 * d4 - c4 * d2) + a2 * (c4 * d1 - c1 * d4) + a4 * (c1 * d2 - c2 * d1)); - res.c3 = invdet * (a1 * (b2 * d4 - b4 * d2) + a2 * (b4 * d1 - b1 * d4) + a4 * (b1 * d2 - b2 * d1)); - res.c4 = -invdet * (a1 * (b2 * c4 - b4 * c2) + a2 * (b4 * c1 - b1 * c4) + a4 * (b1 * c2 - b2 * c1)); - res.d1 = -invdet * (b1 * (c2 * d3 - c3 * d2) + b2 * (c3 * d1 - c1 * d3) + b3 * (c1 * d2 - c2 * d1)); - res.d2 = invdet * (a1 * (c2 * d3 - c3 * d2) + a2 * (c3 * d1 - c1 * d3) + a3 * (c1 * d2 - c2 * d1)); - res.d3 = -invdet * (a1 * (b2 * d3 - b3 * d2) + a2 * (b3 * d1 - b1 * d3) + a3 * (b1 * d2 - b2 * d1)); - res.d4 = invdet * (a1 * (b2 * c3 - b3 * c2) + a2 * (b3 * c1 - b1 * c3) + a3 * (b1 * c2 - b2 * c1)); - *this = res; - - return *this; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -TReal* aiMatrix4x4t<TReal>::operator[](unsigned int p_iIndex) { - if (p_iIndex > 3) { - return nullptr; - } - switch ( p_iIndex ) { - case 0: - return &a1; - case 1: - return &b1; - case 2: - return &c1; - case 3: - return &d1; - default: - break; - } - return &a1; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -const TReal* aiMatrix4x4t<TReal>::operator[](unsigned int p_iIndex) const { - if (p_iIndex > 3) { - return nullptr; - } - - switch ( p_iIndex ) { - case 0: - return &a1; - case 1: - return &b1; - case 2: - return &c1; - case 3: - return &d1; - default: - break; - } - return &a1; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -bool aiMatrix4x4t<TReal>::operator== (const aiMatrix4x4t<TReal>& m) const { - return (a1 == m.a1 && a2 == m.a2 && a3 == m.a3 && a4 == m.a4 && - b1 == m.b1 && b2 == m.b2 && b3 == m.b3 && b4 == m.b4 && - c1 == m.c1 && c2 == m.c2 && c3 == m.c3 && c4 == m.c4 && - d1 == m.d1 && d2 == m.d2 && d3 == m.d3 && d4 == m.d4); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -bool aiMatrix4x4t<TReal>::operator!= (const aiMatrix4x4t<TReal>& m) const { - return !(*this == m); -} - -// --------------------------------------------------------------------------- -template<typename TReal> -AI_FORCE_INLINE -bool aiMatrix4x4t<TReal>::Equal(const aiMatrix4x4t<TReal>& m, TReal epsilon) const { - return - std::abs(a1 - m.a1) <= epsilon && - std::abs(a2 - m.a2) <= epsilon && - std::abs(a3 - m.a3) <= epsilon && - std::abs(a4 - m.a4) <= epsilon && - std::abs(b1 - m.b1) <= epsilon && - std::abs(b2 - m.b2) <= epsilon && - std::abs(b3 - m.b3) <= epsilon && - std::abs(b4 - m.b4) <= epsilon && - std::abs(c1 - m.c1) <= epsilon && - std::abs(c2 - m.c2) <= epsilon && - std::abs(c3 - m.c3) <= epsilon && - std::abs(c4 - m.c4) <= epsilon && - std::abs(d1 - m.d1) <= epsilon && - std::abs(d2 - m.d2) <= epsilon && - std::abs(d3 - m.d3) <= epsilon && - std::abs(d4 - m.d4) <= epsilon; -} - -// ---------------------------------------------------------------------------------------- - -#define ASSIMP_MATRIX4_4_DECOMPOSE_PART \ - const aiMatrix4x4t<TReal>& _this = *this;/* Create alias for conveniance. */ \ - \ - /* extract translation */ \ - pPosition.x = _this[0][3]; \ - pPosition.y = _this[1][3]; \ - pPosition.z = _this[2][3]; \ - \ - /* extract the columns of the matrix. */ \ - aiVector3t<TReal> vCols[3] = { \ - aiVector3t<TReal>(_this[0][0],_this[1][0],_this[2][0]), \ - aiVector3t<TReal>(_this[0][1],_this[1][1],_this[2][1]), \ - aiVector3t<TReal>(_this[0][2],_this[1][2],_this[2][2]) \ - }; \ - \ - /* extract the scaling factors */ \ - pScaling.x = vCols[0].Length(); \ - pScaling.y = vCols[1].Length(); \ - pScaling.z = vCols[2].Length(); \ - \ - /* and the sign of the scaling */ \ - if (Determinant() < 0) pScaling = -pScaling; \ - \ - /* and remove all scaling from the matrix */ \ - if(pScaling.x) vCols[0] /= pScaling.x; \ - if(pScaling.y) vCols[1] /= pScaling.y; \ - if(pScaling.z) vCols[2] /= pScaling.z; \ - \ - do {} while(false) - -template <typename TReal> -AI_FORCE_INLINE -void aiMatrix4x4t<TReal>::Decompose (aiVector3t<TReal>& pScaling, aiQuaterniont<TReal>& pRotation, - aiVector3t<TReal>& pPosition) const { - ASSIMP_MATRIX4_4_DECOMPOSE_PART; - - // build a 3x3 rotation matrix - aiMatrix3x3t<TReal> m(vCols[0].x,vCols[1].x,vCols[2].x, - vCols[0].y,vCols[1].y,vCols[2].y, - vCols[0].z,vCols[1].z,vCols[2].z); - - // and generate the rotation quaternion from it - pRotation = aiQuaterniont<TReal>(m); -} - -template <typename TReal> -AI_FORCE_INLINE -void aiMatrix4x4t<TReal>::Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotation, aiVector3t<TReal>& pPosition) const { - ASSIMP_MATRIX4_4_DECOMPOSE_PART; - - /* - assuming a right-handed coordinate system - and post-multiplication of column vectors, - the rotation matrix for an euler XYZ rotation is M = Rz * Ry * Rx. - combining gives: - - | CE BDE-AF ADE+BF 0 | - M = | CF BDF+AE ADF-BE 0 | - | -D CB AC 0 | - | 0 0 0 1 | - - where - A = cos(angle_x), B = sin(angle_x); - C = cos(angle_y), D = sin(angle_y); - E = cos(angle_z), F = sin(angle_z); - */ - - // Use a small epsilon to solve floating-point inaccuracies - const TReal epsilon = Assimp::Math::getEpsilon<TReal>(); - - pRotation.y = std::asin(-vCols[0].z);// D. Angle around oY. - - TReal C = std::cos(pRotation.y); - - if(std::fabs(C) > epsilon) - { - // Finding angle around oX. - TReal tan_x = vCols[2].z / C;// A - TReal tan_y = vCols[1].z / C;// B - - pRotation.x = std::atan2(tan_y, tan_x); - // Finding angle around oZ. - tan_x = vCols[0].x / C;// E - tan_y = vCols[0].y / C;// F - pRotation.z = std::atan2(tan_y, tan_x); - } - else - {// oY is fixed. - pRotation.x = 0;// Set angle around oX to 0. => A == 1, B == 0, C == 0, D == 1. - - // And finding angle around oZ. - TReal tan_x = vCols[1].y;// BDF+AE => E - TReal tan_y = -vCols[1].x;// BDE-AF => F - - pRotation.z = std::atan2(tan_y, tan_x); - } -} - -#undef ASSIMP_MATRIX4_4_DECOMPOSE_PART - -template <typename TReal> -AI_FORCE_INLINE -void aiMatrix4x4t<TReal>::Decompose(aiVector3t<TReal>& pScaling, aiVector3t<TReal>& pRotationAxis, TReal& pRotationAngle, - aiVector3t<TReal>& pPosition) const { - aiQuaterniont<TReal> pRotation; - - Decompose(pScaling, pRotation, pPosition); - pRotation.Normalize(); - - TReal angle_cos = pRotation.w; - TReal angle_sin = std::sqrt(1.0f - angle_cos * angle_cos); - - pRotationAngle = std::acos(angle_cos) * 2; - - // Use a small epsilon to solve floating-point inaccuracies - const TReal epsilon = 10e-3f; - - if(std::fabs(angle_sin) < epsilon) angle_sin = 1; - - pRotationAxis.x = pRotation.x / angle_sin; - pRotationAxis.y = pRotation.y / angle_sin; - pRotationAxis.z = pRotation.z / angle_sin; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -void aiMatrix4x4t<TReal>::DecomposeNoScaling (aiQuaterniont<TReal>& rotation, - aiVector3t<TReal>& position) const { - const aiMatrix4x4t<TReal>& _this = *this; - - // extract translation - position.x = _this[0][3]; - position.y = _this[1][3]; - position.z = _this[2][3]; - - // extract rotation - rotation = aiQuaterniont<TReal>((aiMatrix3x3t<TReal>)_this); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::FromEulerAnglesXYZ(const aiVector3t<TReal>& blubb) { - return FromEulerAnglesXYZ(blubb.x,blubb.y,blubb.z); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::FromEulerAnglesXYZ(TReal x, TReal y, TReal z) { - aiMatrix4x4t<TReal>& _this = *this; - - TReal cx = std::cos(x); - TReal sx = std::sin(x); - TReal cy = std::cos(y); - TReal sy = std::sin(y); - TReal cz = std::cos(z); - TReal sz = std::sin(z); - - // mz*my*mx - _this.a1 = cz * cy; - _this.a2 = cz * sy * sx - sz * cx; - _this.a3 = sz * sx + cz * sy * cx; - - _this.b1 = sz * cy; - _this.b2 = cz * cx + sz * sy * sx; - _this.b3 = sz * sy * cx - cz * sx; - - _this.c1 = -sy; - _this.c2 = cy * sx; - _this.c3 = cy * cx; - - return *this; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -bool aiMatrix4x4t<TReal>::IsIdentity() const { - // Use a small epsilon to solve floating-point inaccuracies - const static TReal epsilon = 10e-3f; - - return (a2 <= epsilon && a2 >= -epsilon && - a3 <= epsilon && a3 >= -epsilon && - a4 <= epsilon && a4 >= -epsilon && - b1 <= epsilon && b1 >= -epsilon && - b3 <= epsilon && b3 >= -epsilon && - b4 <= epsilon && b4 >= -epsilon && - c1 <= epsilon && c1 >= -epsilon && - c2 <= epsilon && c2 >= -epsilon && - c4 <= epsilon && c4 >= -epsilon && - d1 <= epsilon && d1 >= -epsilon && - d2 <= epsilon && d2 >= -epsilon && - d3 <= epsilon && d3 >= -epsilon && - a1 <= 1.f+epsilon && a1 >= 1.f-epsilon && - b2 <= 1.f+epsilon && b2 >= 1.f-epsilon && - c3 <= 1.f+epsilon && c3 >= 1.f-epsilon && - d4 <= 1.f+epsilon && d4 >= 1.f-epsilon); -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::RotationX(TReal a, aiMatrix4x4t<TReal>& out) { - /* - | 1 0 0 0 | - M = | 0 cos(A) -sin(A) 0 | - | 0 sin(A) cos(A) 0 | - | 0 0 0 1 | */ - out = aiMatrix4x4t<TReal>(); - out.b2 = out.c3 = std::cos(a); - out.b3 = -(out.c2 = std::sin(a)); - return out; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::RotationY(TReal a, aiMatrix4x4t<TReal>& out) { - /* - | cos(A) 0 sin(A) 0 | - M = | 0 1 0 0 | - | -sin(A) 0 cos(A) 0 | - | 0 0 0 1 | - */ - out = aiMatrix4x4t<TReal>(); - out.a1 = out.c3 = std::cos(a); - out.c1 = -(out.a3 = std::sin(a)); - return out; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::RotationZ(TReal a, aiMatrix4x4t<TReal>& out) { - /* - | cos(A) -sin(A) 0 0 | - M = | sin(A) cos(A) 0 0 | - | 0 0 1 0 | - | 0 0 0 1 | */ - out = aiMatrix4x4t<TReal>(); - out.a1 = out.b2 = std::cos(a); - out.a2 = -(out.b1 = std::sin(a)); - return out; -} - -// ---------------------------------------------------------------------------------------- -// Returns a rotation matrix for a rotation around an arbitrary axis. -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::Rotation( TReal a, const aiVector3t<TReal>& axis, aiMatrix4x4t<TReal>& out) { - TReal c = std::cos( a), s = std::sin( a), t = 1 - c; - TReal x = axis.x, y = axis.y, z = axis.z; - - // Many thanks to MathWorld and Wikipedia - out.a1 = t*x*x + c; out.a2 = t*x*y - s*z; out.a3 = t*x*z + s*y; - out.b1 = t*x*y + s*z; out.b2 = t*y*y + c; out.b3 = t*y*z - s*x; - out.c1 = t*x*z - s*y; out.c2 = t*y*z + s*x; out.c3 = t*z*z + c; - out.a4 = out.b4 = out.c4 = static_cast<TReal>(0.0); - out.d1 = out.d2 = out.d3 = static_cast<TReal>(0.0); - out.d4 = static_cast<TReal>(1.0); - - return out; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::Translation( const aiVector3t<TReal>& v, aiMatrix4x4t<TReal>& out) { - out = aiMatrix4x4t<TReal>(); - out.a4 = v.x; - out.b4 = v.y; - out.c4 = v.z; - return out; -} - -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::Scaling( const aiVector3t<TReal>& v, aiMatrix4x4t<TReal>& out) { - out = aiMatrix4x4t<TReal>(); - out.a1 = v.x; - out.b2 = v.y; - out.c3 = v.z; - return out; -} - -// ---------------------------------------------------------------------------------------- -/** A function for creating a rotation matrix that rotates a vector called - * "from" into another vector called "to". - * Input : from[3], to[3] which both must be *normalized* non-zero vectors - * Output: mtx[3][3] -- a 3x3 matrix in colum-major form - * Authors: Tomas Möller, John Hughes - * "Efficiently Building a Matrix to Rotate One Vector to Another" - * Journal of Graphics Tools, 4(4):1-4, 1999 - */ -// ---------------------------------------------------------------------------------------- -template <typename TReal> -AI_FORCE_INLINE -aiMatrix4x4t<TReal>& aiMatrix4x4t<TReal>::FromToMatrix(const aiVector3t<TReal>& from, - const aiVector3t<TReal>& to, aiMatrix4x4t<TReal>& mtx) { - aiMatrix3x3t<TReal> m3; - aiMatrix3x3t<TReal>::FromToMatrix(from,to,m3); - mtx = aiMatrix4x4t<TReal>(m3); - return mtx; -} - -#endif // __cplusplus -#endif // AI_MATRIX4X4_INL_INC diff --git a/thirdparty/assimp/include/assimp/mesh.h b/thirdparty/assimp/include/assimp/mesh.h deleted file mode 100644 index fbf2a857ad..0000000000 --- a/thirdparty/assimp/include/assimp/mesh.h +++ /dev/null @@ -1,879 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file mesh.h - * @brief Declares the data structures in which the imported geometry is - returned by ASSIMP: aiMesh, aiFace and aiBone data structures. - */ -#pragma once -#ifndef AI_MESH_H_INC -#define AI_MESH_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <assimp/aabb.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -// Limits. These values are required to match the settings Assimp was -// compiled against. Therefore, do not redefine them unless you build the -// library from source using the same definitions. -// --------------------------------------------------------------------------- - -/** @def AI_MAX_FACE_INDICES - * Maximum number of indices per face (polygon). */ - -#ifndef AI_MAX_FACE_INDICES -# define AI_MAX_FACE_INDICES 0x7fff -#endif - -/** @def AI_MAX_BONE_WEIGHTS - * Maximum number of indices per face (polygon). */ - -#ifndef AI_MAX_BONE_WEIGHTS -# define AI_MAX_BONE_WEIGHTS 0x7fffffff -#endif - -/** @def AI_MAX_VERTICES - * Maximum number of vertices per mesh. */ - -#ifndef AI_MAX_VERTICES -# define AI_MAX_VERTICES 0x7fffffff -#endif - -/** @def AI_MAX_FACES - * Maximum number of faces per mesh. */ - -#ifndef AI_MAX_FACES -# define AI_MAX_FACES 0x7fffffff -#endif - -/** @def AI_MAX_NUMBER_OF_COLOR_SETS - * Supported number of vertex color sets per mesh. */ - -#ifndef AI_MAX_NUMBER_OF_COLOR_SETS -# define AI_MAX_NUMBER_OF_COLOR_SETS 0x8 -#endif // !! AI_MAX_NUMBER_OF_COLOR_SETS - -/** @def AI_MAX_NUMBER_OF_TEXTURECOORDS - * Supported number of texture coord sets (UV(W) channels) per mesh */ - -#ifndef AI_MAX_NUMBER_OF_TEXTURECOORDS -# define AI_MAX_NUMBER_OF_TEXTURECOORDS 0x8 -#endif // !! AI_MAX_NUMBER_OF_TEXTURECOORDS - -// --------------------------------------------------------------------------- -/** @brief A single face in a mesh, referring to multiple vertices. - * - * If mNumIndices is 3, we call the face 'triangle', for mNumIndices > 3 - * it's called 'polygon' (hey, that's just a definition!). - * <br> - * aiMesh::mPrimitiveTypes can be queried to quickly examine which types of - * primitive are actually present in a mesh. The #aiProcess_SortByPType flag - * executes a special post-processing algorithm which splits meshes with - * *different* primitive types mixed up (e.g. lines and triangles) in several - * 'clean' submeshes. Furthermore there is a configuration option ( - * #AI_CONFIG_PP_SBP_REMOVE) to force #aiProcess_SortByPType to remove - * specific kinds of primitives from the imported scene, completely and forever. - * In many cases you'll probably want to set this setting to - * @code - * aiPrimitiveType_LINE|aiPrimitiveType_POINT - * @endcode - * Together with the #aiProcess_Triangulate flag you can then be sure that - * #aiFace::mNumIndices is always 3. - * @note Take a look at the @link data Data Structures page @endlink for - * more information on the layout and winding order of a face. - */ -struct aiFace -{ - //! Number of indices defining this face. - //! The maximum value for this member is #AI_MAX_FACE_INDICES. - unsigned int mNumIndices; - - //! Pointer to the indices array. Size of the array is given in numIndices. - unsigned int* mIndices; - -#ifdef __cplusplus - - //! Default constructor - aiFace() AI_NO_EXCEPT - : mNumIndices( 0 ) - , mIndices( nullptr ) { - // empty - } - - //! Default destructor. Delete the index array - ~aiFace() - { - delete [] mIndices; - } - - //! Copy constructor. Copy the index array - aiFace( const aiFace& o) - : mNumIndices(0) - , mIndices( nullptr ) { - *this = o; - } - - //! Assignment operator. Copy the index array - aiFace& operator = ( const aiFace& o) { - if (&o == this) { - return *this; - } - - delete[] mIndices; - mNumIndices = o.mNumIndices; - if (mNumIndices) { - mIndices = new unsigned int[mNumIndices]; - ::memcpy( mIndices, o.mIndices, mNumIndices * sizeof( unsigned int)); - } else { - mIndices = nullptr; - } - - return *this; - } - - //! Comparison operator. Checks whether the index array - //! of two faces is identical - bool operator== (const aiFace& o) const { - if (mIndices == o.mIndices) { - return true; - } - - if (nullptr != mIndices && mNumIndices != o.mNumIndices) { - return false; - } - - if (nullptr == mIndices) { - return false; - } - - for (unsigned int i = 0; i < this->mNumIndices; ++i) { - if (mIndices[i] != o.mIndices[i]) { - return false; - } - } - - return true; - } - - //! Inverse comparison operator. Checks whether the index - //! array of two faces is NOT identical - bool operator != (const aiFace& o) const { - return !(*this == o); - } -#endif // __cplusplus -}; // struct aiFace - - -// --------------------------------------------------------------------------- -/** @brief A single influence of a bone on a vertex. - */ -struct aiVertexWeight { - //! Index of the vertex which is influenced by the bone. - unsigned int mVertexId; - - //! The strength of the influence in the range (0...1). - //! The influence from all bones at one vertex amounts to 1. - float mWeight; - -#ifdef __cplusplus - - //! Default constructor - aiVertexWeight() AI_NO_EXCEPT - : mVertexId(0) - , mWeight(0.0f) { - // empty - } - - //! Initialization from a given index and vertex weight factor - //! \param pID ID - //! \param pWeight Vertex weight factor - aiVertexWeight( unsigned int pID, float pWeight ) - : mVertexId( pID ) - , mWeight( pWeight ) { - // empty - } - - bool operator == ( const aiVertexWeight &rhs ) const { - return ( mVertexId == rhs.mVertexId && mWeight == rhs.mWeight ); - } - - bool operator != ( const aiVertexWeight &rhs ) const { - return ( *this == rhs ); - } - -#endif // __cplusplus -}; - - -// Forward declare aiNode (pointer use only) -struct aiNode; - -// --------------------------------------------------------------------------- -/** @brief A single bone of a mesh. - * - * A bone has a name by which it can be found in the frame hierarchy and by - * which it can be addressed by animations. In addition it has a number of - * influences on vertices, and a matrix relating the mesh position to the - * position of the bone at the time of binding. - */ -struct aiBone { - //! The name of the bone. - C_STRUCT aiString mName; - - //! The number of vertices affected by this bone. - //! The maximum value for this member is #AI_MAX_BONE_WEIGHTS. - unsigned int mNumWeights; - -#ifndef ASSIMP_BUILD_NO_ARMATUREPOPULATE_PROCESS - // The bone armature node - used for skeleton conversion - // you must enable aiProcess_PopulateArmatureData to populate this - C_STRUCT aiNode* mArmature; - - // The bone node in the scene - used for skeleton conversion - // you must enable aiProcess_PopulateArmatureData to populate this - C_STRUCT aiNode* mNode; - -#endif - //! The influence weights of this bone, by vertex index. - C_STRUCT aiVertexWeight* mWeights; - - /** Matrix that transforms from bone space to mesh space in bind pose. - * - * This matrix describes the position of the mesh - * in the local space of this bone when the skeleton was bound. - * Thus it can be used directly to determine a desired vertex position, - * given the world-space transform of the bone when animated, - * and the position of the vertex in mesh space. - * - * It is sometimes called an inverse-bind matrix, - * or inverse bind pose matrix. - */ - C_STRUCT aiMatrix4x4 mOffsetMatrix; - -#ifdef __cplusplus - - //! Default constructor - aiBone() AI_NO_EXCEPT - : mName() - , mNumWeights( 0 ) - , mWeights( nullptr ) - , mOffsetMatrix() { - // empty - } - - //! Copy constructor - aiBone(const aiBone& other) - : mName( other.mName ) - , mNumWeights( other.mNumWeights ) - , mWeights(nullptr) - , mOffsetMatrix( other.mOffsetMatrix ) { - if (other.mWeights && other.mNumWeights) { - mWeights = new aiVertexWeight[mNumWeights]; - ::memcpy(mWeights,other.mWeights,mNumWeights * sizeof(aiVertexWeight)); - } - } - - - //! Assignment operator - aiBone &operator=(const aiBone& other) { - if (this == &other) { - return *this; - } - - mName = other.mName; - mNumWeights = other.mNumWeights; - mOffsetMatrix = other.mOffsetMatrix; - - if (other.mWeights && other.mNumWeights) - { - if (mWeights) { - delete[] mWeights; - } - - mWeights = new aiVertexWeight[mNumWeights]; - ::memcpy(mWeights,other.mWeights,mNumWeights * sizeof(aiVertexWeight)); - } - - return *this; - } - - bool operator == ( const aiBone &rhs ) const { - if ( mName != rhs.mName || mNumWeights != rhs.mNumWeights ) { - return false; - } - - for ( size_t i = 0; i < mNumWeights; ++i ) { - if ( mWeights[ i ] != rhs.mWeights[ i ] ) { - return false; - } - } - - return true; - } - //! Destructor - deletes the array of vertex weights - ~aiBone() { - delete [] mWeights; - } -#endif // __cplusplus -}; - - -// --------------------------------------------------------------------------- -/** @brief Enumerates the types of geometric primitives supported by Assimp. - * - * @see aiFace Face data structure - * @see aiProcess_SortByPType Per-primitive sorting of meshes - * @see aiProcess_Triangulate Automatic triangulation - * @see AI_CONFIG_PP_SBP_REMOVE Removal of specific primitive types. - */ -enum aiPrimitiveType -{ - /** A point primitive. - * - * This is just a single vertex in the virtual world, - * #aiFace contains just one index for such a primitive. - */ - aiPrimitiveType_POINT = 0x1, - - /** A line primitive. - * - * This is a line defined through a start and an end position. - * #aiFace contains exactly two indices for such a primitive. - */ - aiPrimitiveType_LINE = 0x2, - - /** A triangular primitive. - * - * A triangle consists of three indices. - */ - aiPrimitiveType_TRIANGLE = 0x4, - - /** A higher-level polygon with more than 3 edges. - * - * A triangle is a polygon, but polygon in this context means - * "all polygons that are not triangles". The "Triangulate"-Step - * is provided for your convenience, it splits all polygons in - * triangles (which are much easier to handle). - */ - aiPrimitiveType_POLYGON = 0x8, - - - /** This value is not used. It is just here to force the - * compiler to map this enum to a 32 Bit integer. - */ -#ifndef SWIG - _aiPrimitiveType_Force32Bit = INT_MAX -#endif -}; //! enum aiPrimitiveType - -// Get the #aiPrimitiveType flag for a specific number of face indices -#define AI_PRIMITIVE_TYPE_FOR_N_INDICES(n) \ - ((n) > 3 ? aiPrimitiveType_POLYGON : (aiPrimitiveType)(1u << ((n)-1))) - - - -// --------------------------------------------------------------------------- -/** @brief An AnimMesh is an attachment to an #aiMesh stores per-vertex - * animations for a particular frame. - * - * You may think of an #aiAnimMesh as a `patch` for the host mesh, which - * replaces only certain vertex data streams at a particular time. - * Each mesh stores n attached attached meshes (#aiMesh::mAnimMeshes). - * The actual relationship between the time line and anim meshes is - * established by #aiMeshAnim, which references singular mesh attachments - * by their ID and binds them to a time offset. -*/ -struct aiAnimMesh -{ - /**Anim Mesh name */ - C_STRUCT aiString mName; - - /** Replacement for aiMesh::mVertices. If this array is non-nullptr, - * it *must* contain mNumVertices entries. The corresponding - * array in the host mesh must be non-nullptr as well - animation - * meshes may neither add or nor remove vertex components (if - * a replacement array is nullptr and the corresponding source - * array is not, the source data is taken instead)*/ - C_STRUCT aiVector3D* mVertices; - - /** Replacement for aiMesh::mNormals. */ - C_STRUCT aiVector3D* mNormals; - - /** Replacement for aiMesh::mTangents. */ - C_STRUCT aiVector3D* mTangents; - - /** Replacement for aiMesh::mBitangents. */ - C_STRUCT aiVector3D* mBitangents; - - /** Replacement for aiMesh::mColors */ - C_STRUCT aiColor4D* mColors[AI_MAX_NUMBER_OF_COLOR_SETS]; - - /** Replacement for aiMesh::mTextureCoords */ - C_STRUCT aiVector3D* mTextureCoords[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - - /** The number of vertices in the aiAnimMesh, and thus the length of all - * the member arrays. - * - * This has always the same value as the mNumVertices property in the - * corresponding aiMesh. It is duplicated here merely to make the length - * of the member arrays accessible even if the aiMesh is not known, e.g. - * from language bindings. - */ - unsigned int mNumVertices; - - /** - * Weight of the AnimMesh. - */ - float mWeight; - -#ifdef __cplusplus - - aiAnimMesh() AI_NO_EXCEPT - : mVertices( nullptr ) - , mNormals(nullptr) - , mTangents(nullptr) - , mBitangents(nullptr) - , mColors() - , mTextureCoords() - , mNumVertices( 0 ) - , mWeight( 0.0f ) - { - // fixme consider moving this to the ctor initializer list as well - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; a++){ - mTextureCoords[a] = nullptr; - } - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; a++) { - mColors[a] = nullptr; - } - } - - ~aiAnimMesh() - { - delete [] mVertices; - delete [] mNormals; - delete [] mTangents; - delete [] mBitangents; - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; a++) { - delete [] mTextureCoords[a]; - } - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; a++) { - delete [] mColors[a]; - } - } - - /** Check whether the anim mesh overrides the vertex positions - * of its host mesh*/ - bool HasPositions() const { - return mVertices != nullptr; - } - - /** Check whether the anim mesh overrides the vertex normals - * of its host mesh*/ - bool HasNormals() const { - return mNormals != nullptr; - } - - /** Check whether the anim mesh overrides the vertex tangents - * and bitangents of its host mesh. As for aiMesh, - * tangents and bitangents always go together. */ - bool HasTangentsAndBitangents() const { - return mTangents != nullptr; - } - - /** Check whether the anim mesh overrides a particular - * set of vertex colors on his host mesh. - * @param pIndex 0<index<AI_MAX_NUMBER_OF_COLOR_SETS */ - bool HasVertexColors( unsigned int pIndex) const { - return pIndex >= AI_MAX_NUMBER_OF_COLOR_SETS ? false : mColors[pIndex] != nullptr; - } - - /** Check whether the anim mesh overrides a particular - * set of texture coordinates on his host mesh. - * @param pIndex 0<index<AI_MAX_NUMBER_OF_TEXTURECOORDS */ - bool HasTextureCoords( unsigned int pIndex) const { - return pIndex >= AI_MAX_NUMBER_OF_TEXTURECOORDS ? false : mTextureCoords[pIndex] != nullptr; - } - -#endif -}; - -// --------------------------------------------------------------------------- -/** @brief Enumerates the methods of mesh morphing supported by Assimp. - */ -enum aiMorphingMethod -{ - /** Interpolation between morph targets */ - aiMorphingMethod_VERTEX_BLEND = 0x1, - - /** Normalized morphing between morph targets */ - aiMorphingMethod_MORPH_NORMALIZED = 0x2, - - /** Relative morphing between morph targets */ - aiMorphingMethod_MORPH_RELATIVE = 0x3, - - /** This value is not used. It is just here to force the - * compiler to map this enum to a 32 Bit integer. - */ -#ifndef SWIG - _aiMorphingMethod_Force32Bit = INT_MAX -#endif -}; //! enum aiMorphingMethod - -// --------------------------------------------------------------------------- -/** @brief A mesh represents a geometry or model with a single material. -* -* It usually consists of a number of vertices and a series of primitives/faces -* referencing the vertices. In addition there might be a series of bones, each -* of them addressing a number of vertices with a certain weight. Vertex data -* is presented in channels with each channel containing a single per-vertex -* information such as a set of texture coords or a normal vector. -* If a data pointer is non-null, the corresponding data stream is present. -* From C++-programs you can also use the comfort functions Has*() to -* test for the presence of various data streams. -* -* A Mesh uses only a single material which is referenced by a material ID. -* @note The mPositions member is usually not optional. However, vertex positions -* *could* be missing if the #AI_SCENE_FLAGS_INCOMPLETE flag is set in -* @code -* aiScene::mFlags -* @endcode -*/ -struct aiMesh -{ - /** Bitwise combination of the members of the #aiPrimitiveType enum. - * This specifies which types of primitives are present in the mesh. - * The "SortByPrimitiveType"-Step can be used to make sure the - * output meshes consist of one primitive type each. - */ - unsigned int mPrimitiveTypes; - - /** The number of vertices in this mesh. - * This is also the size of all of the per-vertex data arrays. - * The maximum value for this member is #AI_MAX_VERTICES. - */ - unsigned int mNumVertices; - - /** The number of primitives (triangles, polygons, lines) in this mesh. - * This is also the size of the mFaces array. - * The maximum value for this member is #AI_MAX_FACES. - */ - unsigned int mNumFaces; - - /** Vertex positions. - * This array is always present in a mesh. The array is - * mNumVertices in size. - */ - C_STRUCT aiVector3D* mVertices; - - /** Vertex normals. - * The array contains normalized vectors, nullptr if not present. - * The array is mNumVertices in size. Normals are undefined for - * point and line primitives. A mesh consisting of points and - * lines only may not have normal vectors. Meshes with mixed - * primitive types (i.e. lines and triangles) may have normals, - * but the normals for vertices that are only referenced by - * point or line primitives are undefined and set to QNaN (WARN: - * qNaN compares to inequal to *everything*, even to qNaN itself. - * Using code like this to check whether a field is qnan is: - * @code - * #define IS_QNAN(f) (f != f) - * @endcode - * still dangerous because even 1.f == 1.f could evaluate to false! ( - * remember the subtleties of IEEE754 artithmetics). Use stuff like - * @c fpclassify instead. - * @note Normal vectors computed by Assimp are always unit-length. - * However, this needn't apply for normals that have been taken - * directly from the model file. - */ - C_STRUCT aiVector3D* mNormals; - - /** Vertex tangents. - * The tangent of a vertex points in the direction of the positive - * X texture axis. The array contains normalized vectors, nullptr if - * not present. The array is mNumVertices in size. A mesh consisting - * of points and lines only may not have normal vectors. Meshes with - * mixed primitive types (i.e. lines and triangles) may have - * normals, but the normals for vertices that are only referenced by - * point or line primitives are undefined and set to qNaN. See - * the #mNormals member for a detailed discussion of qNaNs. - * @note If the mesh contains tangents, it automatically also - * contains bitangents. - */ - C_STRUCT aiVector3D* mTangents; - - /** Vertex bitangents. - * The bitangent of a vertex points in the direction of the positive - * Y texture axis. The array contains normalized vectors, nullptr if not - * present. The array is mNumVertices in size. - * @note If the mesh contains tangents, it automatically also contains - * bitangents. - */ - C_STRUCT aiVector3D* mBitangents; - - /** Vertex color sets. - * A mesh may contain 0 to #AI_MAX_NUMBER_OF_COLOR_SETS vertex - * colors per vertex. nullptr if not present. Each array is - * mNumVertices in size if present. - */ - C_STRUCT aiColor4D* mColors[AI_MAX_NUMBER_OF_COLOR_SETS]; - - /** Vertex texture coords, also known as UV channels. - * A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per - * vertex. nullptr if not present. The array is mNumVertices in size. - */ - C_STRUCT aiVector3D* mTextureCoords[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - - /** Specifies the number of components for a given UV channel. - * Up to three channels are supported (UVW, for accessing volume - * or cube maps). If the value is 2 for a given channel n, the - * component p.z of mTextureCoords[n][p] is set to 0.0f. - * If the value is 1 for a given channel, p.y is set to 0.0f, too. - * @note 4D coords are not supported - */ - unsigned int mNumUVComponents[AI_MAX_NUMBER_OF_TEXTURECOORDS]; - - /** The faces the mesh is constructed from. - * Each face refers to a number of vertices by their indices. - * This array is always present in a mesh, its size is given - * in mNumFaces. If the #AI_SCENE_FLAGS_NON_VERBOSE_FORMAT - * is NOT set each face references an unique set of vertices. - */ - C_STRUCT aiFace* mFaces; - - /** The number of bones this mesh contains. - * Can be 0, in which case the mBones array is nullptr. - */ - unsigned int mNumBones; - - /** The bones of this mesh. - * A bone consists of a name by which it can be found in the - * frame hierarchy and a set of vertex weights. - */ - C_STRUCT aiBone** mBones; - - /** The material used by this mesh. - * A mesh uses only a single material. If an imported model uses - * multiple materials, the import splits up the mesh. Use this value - * as index into the scene's material list. - */ - unsigned int mMaterialIndex; - - /** Name of the mesh. Meshes can be named, but this is not a - * requirement and leaving this field empty is totally fine. - * There are mainly three uses for mesh names: - * - some formats name nodes and meshes independently. - * - importers tend to split meshes up to meet the - * one-material-per-mesh requirement. Assigning - * the same (dummy) name to each of the result meshes - * aids the caller at recovering the original mesh - * partitioning. - * - Vertex animations refer to meshes by their names. - **/ - C_STRUCT aiString mName; - - - /** The number of attachment meshes. Note! Currently only works with Collada loader. */ - unsigned int mNumAnimMeshes; - - /** Attachment meshes for this mesh, for vertex-based animation. - * Attachment meshes carry replacement data for some of the - * mesh'es vertex components (usually positions, normals). - * Note! Currently only works with Collada loader.*/ - C_STRUCT aiAnimMesh** mAnimMeshes; - - /** - * Method of morphing when animeshes are specified. - */ - unsigned int mMethod; - - /** - * - */ - C_STRUCT aiAABB mAABB; - -#ifdef __cplusplus - - //! Default constructor. Initializes all members to 0 - aiMesh() AI_NO_EXCEPT - : mPrimitiveTypes( 0 ) - , mNumVertices( 0 ) - , mNumFaces( 0 ) - , mVertices( nullptr ) - , mNormals(nullptr) - , mTangents(nullptr) - , mBitangents(nullptr) - , mColors() - , mTextureCoords() - , mNumUVComponents() - , mFaces(nullptr) - , mNumBones( 0 ) - , mBones(nullptr) - , mMaterialIndex( 0 ) - , mNumAnimMeshes( 0 ) - , mAnimMeshes(nullptr) - , mMethod( 0 ) - , mAABB() { - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a ) { - mNumUVComponents[a] = 0; - mTextureCoords[a] = nullptr; - } - - for (unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; ++a) { - mColors[a] = nullptr; - } - } - - //! Deletes all storage allocated for the mesh - ~aiMesh() { - delete [] mVertices; - delete [] mNormals; - delete [] mTangents; - delete [] mBitangents; - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; a++) { - delete [] mTextureCoords[a]; - } - for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; a++) { - delete [] mColors[a]; - } - - // DO NOT REMOVE THIS ADDITIONAL CHECK - if (mNumBones && mBones) { - for( unsigned int a = 0; a < mNumBones; a++) { - if(mBones[a]) - { - delete mBones[a]; - } - } - delete [] mBones; - } - - if (mNumAnimMeshes && mAnimMeshes) { - for( unsigned int a = 0; a < mNumAnimMeshes; a++) { - delete mAnimMeshes[a]; - } - delete [] mAnimMeshes; - } - - delete [] mFaces; - } - - //! Check whether the mesh contains positions. Provided no special - //! scene flags are set, this will always be true - bool HasPositions() const - { return mVertices != nullptr && mNumVertices > 0; } - - //! Check whether the mesh contains faces. If no special scene flags - //! are set this should always return true - bool HasFaces() const - { return mFaces != nullptr && mNumFaces > 0; } - - //! Check whether the mesh contains normal vectors - bool HasNormals() const - { return mNormals != nullptr && mNumVertices > 0; } - - //! Check whether the mesh contains tangent and bitangent vectors - //! It is not possible that it contains tangents and no bitangents - //! (or the other way round). The existence of one of them - //! implies that the second is there, too. - bool HasTangentsAndBitangents() const - { return mTangents != nullptr && mBitangents != nullptr && mNumVertices > 0; } - - //! Check whether the mesh contains a vertex color set - //! \param pIndex Index of the vertex color set - bool HasVertexColors( unsigned int pIndex) const { - if (pIndex >= AI_MAX_NUMBER_OF_COLOR_SETS) { - return false; - } else { - return mColors[pIndex] != nullptr && mNumVertices > 0; - } - } - - //! Check whether the mesh contains a texture coordinate set - //! \param pIndex Index of the texture coordinates set - bool HasTextureCoords( unsigned int pIndex) const { - if (pIndex >= AI_MAX_NUMBER_OF_TEXTURECOORDS) { - return false; - } else { - return mTextureCoords[pIndex] != nullptr && mNumVertices > 0; - } - } - - //! Get the number of UV channels the mesh contains - unsigned int GetNumUVChannels() const { - unsigned int n( 0 ); - while (n < AI_MAX_NUMBER_OF_TEXTURECOORDS && mTextureCoords[n]) { - ++n; - } - - return n; - } - - //! Get the number of vertex color channels the mesh contains - unsigned int GetNumColorChannels() const { - unsigned int n(0); - while (n < AI_MAX_NUMBER_OF_COLOR_SETS && mColors[n]) { - ++n; - } - return n; - } - - //! Check whether the mesh contains bones - bool HasBones() const { - return mBones != nullptr && mNumBones > 0; - } - -#endif // __cplusplus -}; - -#ifdef __cplusplus -} -#endif //! extern "C" -#endif // AI_MESH_H_INC - diff --git a/thirdparty/assimp/include/assimp/metadata.h b/thirdparty/assimp/include/assimp/metadata.h deleted file mode 100644 index 849d90f485..0000000000 --- a/thirdparty/assimp/include/assimp/metadata.h +++ /dev/null @@ -1,384 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file metadata.h - * @brief Defines the data structures for holding node meta information. - */ -#pragma once -#ifndef AI_METADATA_H_INC -#define AI_METADATA_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#if defined(_MSC_VER) && (_MSC_VER <= 1500) -# include "Compiler/pstdint.h" -#else -# include <stdint.h> -#endif - -// ------------------------------------------------------------------------------- -/** - * Enum used to distinguish data types - */ - // ------------------------------------------------------------------------------- -typedef enum aiMetadataType { - AI_BOOL = 0, - AI_INT32 = 1, - AI_UINT64 = 2, - AI_FLOAT = 3, - AI_DOUBLE = 4, - AI_AISTRING = 5, - AI_AIVECTOR3D = 6, - AI_META_MAX = 7, - -#ifndef SWIG - FORCE_32BIT = INT_MAX -#endif -} aiMetadataType; - -// ------------------------------------------------------------------------------- -/** - * Metadata entry - * - * The type field uniquely identifies the underlying type of the data field - */ - // ------------------------------------------------------------------------------- -struct aiMetadataEntry { - aiMetadataType mType; - void* mData; -}; - -#ifdef __cplusplus - -#include <string> - -// ------------------------------------------------------------------------------- -/** - * Helper functions to get the aiType enum entry for a type - */ - // ------------------------------------------------------------------------------- - -inline aiMetadataType GetAiType( bool ) { return AI_BOOL; } -inline aiMetadataType GetAiType( int32_t ) { return AI_INT32; } -inline aiMetadataType GetAiType( uint64_t ) { return AI_UINT64; } -inline aiMetadataType GetAiType( float ) { return AI_FLOAT; } -inline aiMetadataType GetAiType( double ) { return AI_DOUBLE; } -inline aiMetadataType GetAiType( const aiString & ) { return AI_AISTRING; } -inline aiMetadataType GetAiType( const aiVector3D & ) { return AI_AIVECTOR3D; } - -#endif // __cplusplus - -// ------------------------------------------------------------------------------- -/** - * Container for holding metadata. - * - * Metadata is a key-value store using string keys and values. - */ - // ------------------------------------------------------------------------------- -struct aiMetadata { - /** Length of the mKeys and mValues arrays, respectively */ - unsigned int mNumProperties; - - /** Arrays of keys, may not be NULL. Entries in this array may not be NULL as well. */ - C_STRUCT aiString* mKeys; - - /** Arrays of values, may not be NULL. Entries in this array may be NULL if the - * corresponding property key has no assigned value. */ - C_STRUCT aiMetadataEntry* mValues; - -#ifdef __cplusplus - - /** - * @brief The default constructor, set all members to zero by default. - */ - aiMetadata() AI_NO_EXCEPT - : mNumProperties(0) - , mKeys(nullptr) - , mValues(nullptr) { - // empty - } - - aiMetadata( const aiMetadata &rhs ) - : mNumProperties( rhs.mNumProperties ) - , mKeys( nullptr ) - , mValues( nullptr ) { - mKeys = new aiString[ mNumProperties ]; - for ( size_t i = 0; i < static_cast<size_t>( mNumProperties ); ++i ) { - mKeys[ i ] = rhs.mKeys[ i ]; - } - mValues = new aiMetadataEntry[ mNumProperties ]; - for ( size_t i = 0; i < static_cast<size_t>(mNumProperties); ++i ) { - mValues[ i ].mType = rhs.mValues[ i ].mType; - switch ( rhs.mValues[ i ].mType ) { - case AI_BOOL: - mValues[ i ].mData = new bool; - ::memcpy( mValues[ i ].mData, rhs.mValues[ i ].mData, sizeof(bool) ); - break; - case AI_INT32: { - int32_t v; - ::memcpy( &v, rhs.mValues[ i ].mData, sizeof( int32_t ) ); - mValues[ i ].mData = new int32_t( v ); - } - break; - case AI_UINT64: { - uint64_t v; - ::memcpy( &v, rhs.mValues[ i ].mData, sizeof( uint64_t ) ); - mValues[ i ].mData = new uint64_t( v ); - } - break; - case AI_FLOAT: { - float v; - ::memcpy( &v, rhs.mValues[ i ].mData, sizeof( float ) ); - mValues[ i ].mData = new float( v ); - } - break; - case AI_DOUBLE: { - double v; - ::memcpy( &v, rhs.mValues[ i ].mData, sizeof( double ) ); - mValues[ i ].mData = new double( v ); - } - break; - case AI_AISTRING: { - aiString v; - rhs.Get<aiString>( mKeys[ i ], v ); - mValues[ i ].mData = new aiString( v ); - } - break; - case AI_AIVECTOR3D: { - aiVector3D v; - rhs.Get<aiVector3D>( mKeys[ i ], v ); - mValues[ i ].mData = new aiVector3D( v ); - } - break; -#ifndef SWIG - case FORCE_32BIT: -#endif - default: - break; - } - - } - } - - /** - * @brief The destructor. - */ - ~aiMetadata() { - delete [] mKeys; - mKeys = nullptr; - if (mValues) { - // Delete each metadata entry - for (unsigned i=0; i<mNumProperties; ++i) { - void* data = mValues[i].mData; - switch (mValues[i].mType) { - case AI_BOOL: - delete static_cast< bool* >( data ); - break; - case AI_INT32: - delete static_cast< int32_t* >( data ); - break; - case AI_UINT64: - delete static_cast< uint64_t* >( data ); - break; - case AI_FLOAT: - delete static_cast< float* >( data ); - break; - case AI_DOUBLE: - delete static_cast< double* >( data ); - break; - case AI_AISTRING: - delete static_cast< aiString* >( data ); - break; - case AI_AIVECTOR3D: - delete static_cast< aiVector3D* >( data ); - break; -#ifndef SWIG - case FORCE_32BIT: -#endif - default: - break; - } - } - - // Delete the metadata array - delete [] mValues; - mValues = nullptr; - } - } - - /** - * @brief Allocates property fields + keys. - * @param numProperties Number of requested properties. - */ - static inline - aiMetadata *Alloc( unsigned int numProperties ) { - if ( 0 == numProperties ) { - return nullptr; - } - - aiMetadata *data = new aiMetadata; - data->mNumProperties = numProperties; - data->mKeys = new aiString[ data->mNumProperties ](); - data->mValues = new aiMetadataEntry[ data->mNumProperties ](); - - return data; - } - - /** - * @brief Deallocates property fields + keys. - */ - static inline - void Dealloc( aiMetadata *metadata ) { - delete metadata; - } - - template<typename T> - inline - void Add(const std::string& key, const T& value) { - aiString* new_keys = new aiString[mNumProperties + 1]; - aiMetadataEntry* new_values = new aiMetadataEntry[mNumProperties + 1]; - - for(unsigned int i = 0; i < mNumProperties; ++i) - { - new_keys[i] = mKeys[i]; - new_values[i] = mValues[i]; - } - - delete mKeys; - delete mValues; - - mKeys = new_keys; - mValues = new_values; - - mNumProperties++; - - Set(mNumProperties - 1, key, value); - } - - template<typename T> - inline - bool Set( unsigned index, const std::string& key, const T& value ) { - // In range assertion - if ( index >= mNumProperties ) { - return false; - } - - // Ensure that we have a valid key. - if ( key.empty() ) { - return false; - } - - // Set metadata key - mKeys[index] = key; - - // Set metadata type - mValues[index].mType = GetAiType(value); - // Copy the given value to the dynamic storage - mValues[index].mData = new T(value); - - return true; - } - - template<typename T> - inline - bool Get( unsigned index, T& value ) const { - // In range assertion - if ( index >= mNumProperties ) { - return false; - } - - // Return false if the output data type does - // not match the found value's data type - if ( GetAiType( value ) != mValues[ index ].mType ) { - return false; - } - - // Otherwise, output the found value and - // return true - value = *static_cast<T*>(mValues[index].mData); - - return true; - } - - template<typename T> - inline - bool Get( const aiString& key, T& value ) const { - // Search for the given key - for ( unsigned int i = 0; i < mNumProperties; ++i ) { - if ( mKeys[ i ] == key ) { - return Get( i, value ); - } - } - return false; - } - - template<typename T> - inline - bool Get( const std::string& key, T& value ) const { - return Get(aiString(key), value); - } - - /// Return metadata entry for analyzing it by user. - /// \param [in] pIndex - index of the entry. - /// \param [out] pKey - pointer to the key value. - /// \param [out] pEntry - pointer to the entry: type and value. - /// \return false - if pIndex is out of range, else - true. - inline - bool Get(size_t index, const aiString*& key, const aiMetadataEntry*& entry) const { - if ( index >= mNumProperties ) { - return false; - } - - key = &mKeys[index]; - entry = &mValues[index]; - - return true; - } - -#endif // __cplusplus - -}; - -#endif // AI_METADATA_H_INC diff --git a/thirdparty/assimp/include/assimp/pbrmaterial.h b/thirdparty/assimp/include/assimp/pbrmaterial.h deleted file mode 100644 index 892a6347f7..0000000000 --- a/thirdparty/assimp/include/assimp/pbrmaterial.h +++ /dev/null @@ -1,80 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file pbrmaterial.h - * @brief Defines the material system of the library - */ -#pragma once -#ifndef AI_PBRMATERIAL_H_INC -#define AI_PBRMATERIAL_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#define AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_BASE_COLOR_FACTOR "$mat.gltf.pbrMetallicRoughness.baseColorFactor", 0, 0 -#define AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLIC_FACTOR "$mat.gltf.pbrMetallicRoughness.metallicFactor", 0, 0 -#define AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_ROUGHNESS_FACTOR "$mat.gltf.pbrMetallicRoughness.roughnessFactor", 0, 0 -#define AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_BASE_COLOR_TEXTURE aiTextureType_DIFFUSE, 1 -#define AI_MATKEY_GLTF_PBRMETALLICROUGHNESS_METALLICROUGHNESS_TEXTURE aiTextureType_UNKNOWN, 0 -#define AI_MATKEY_GLTF_ALPHAMODE "$mat.gltf.alphaMode", 0, 0 -#define AI_MATKEY_GLTF_ALPHACUTOFF "$mat.gltf.alphaCutoff", 0, 0 -#define AI_MATKEY_GLTF_PBRSPECULARGLOSSINESS "$mat.gltf.pbrSpecularGlossiness", 0, 0 -#define AI_MATKEY_GLTF_PBRSPECULARGLOSSINESS_GLOSSINESS_FACTOR "$mat.gltf.pbrMetallicRoughness.glossinessFactor", 0, 0 -#define AI_MATKEY_GLTF_UNLIT "$mat.gltf.unlit", 0, 0 - -#define _AI_MATKEY_GLTF_TEXTURE_TEXCOORD_BASE "$tex.file.texCoord" -#define _AI_MATKEY_GLTF_MAPPINGNAME_BASE "$tex.mappingname" -#define _AI_MATKEY_GLTF_MAPPINGID_BASE "$tex.mappingid" -#define _AI_MATKEY_GLTF_MAPPINGFILTER_MAG_BASE "$tex.mappingfiltermag" -#define _AI_MATKEY_GLTF_MAPPINGFILTER_MIN_BASE "$tex.mappingfiltermin" -#define _AI_MATKEY_GLTF_SCALE_BASE "$tex.scale" -#define _AI_MATKEY_GLTF_STRENGTH_BASE "$tex.strength" - -#define AI_MATKEY_GLTF_TEXTURE_TEXCOORD(type, N) _AI_MATKEY_GLTF_TEXTURE_TEXCOORD_BASE, type, N -#define AI_MATKEY_GLTF_MAPPINGNAME(type, N) _AI_MATKEY_GLTF_MAPPINGNAME_BASE, type, N -#define AI_MATKEY_GLTF_MAPPINGID(type, N) _AI_MATKEY_GLTF_MAPPINGID_BASE, type, N -#define AI_MATKEY_GLTF_MAPPINGFILTER_MAG(type, N) _AI_MATKEY_GLTF_MAPPINGFILTER_MAG_BASE, type, N -#define AI_MATKEY_GLTF_MAPPINGFILTER_MIN(type, N) _AI_MATKEY_GLTF_MAPPINGFILTER_MIN_BASE, type, N -#define AI_MATKEY_GLTF_TEXTURE_SCALE(type, N) _AI_MATKEY_GLTF_SCALE_BASE, type, N -#define AI_MATKEY_GLTF_TEXTURE_STRENGTH(type, N) _AI_MATKEY_GLTF_STRENGTH_BASE, type, N - -#endif //!!AI_PBRMATERIAL_H_INC diff --git a/thirdparty/assimp/include/assimp/port/AndroidJNI/AndroidJNIIOSystem.h b/thirdparty/assimp/include/assimp/port/AndroidJNI/AndroidJNIIOSystem.h deleted file mode 100644 index 41d8004877..0000000000 --- a/thirdparty/assimp/include/assimp/port/AndroidJNI/AndroidJNIIOSystem.h +++ /dev/null @@ -1,92 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2016, assimp team -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file Android implementation of IOSystem using the standard C file functions. - * Aimed to ease the access to android assets */ - -#if __ANDROID__ and __ANDROID_API__ > 9 and defined(AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT) -#ifndef AI_ANDROIDJNIIOSYSTEM_H_INC -#define AI_ANDROIDJNIIOSYSTEM_H_INC - -#include <assimp/DefaultIOSystem.h> -#include <android/asset_manager.h> -#include <android/asset_manager_jni.h> -#include <android/native_activity.h> - -namespace Assimp { - -// --------------------------------------------------------------------------- -/** Android extension to DefaultIOSystem using the standard C file functions */ -class ASSIMP_API AndroidJNIIOSystem : public DefaultIOSystem -{ -public: - - /** Initialize android activity data */ - std::string mApkWorkspacePath; - AAssetManager* mApkAssetManager; - - /** Constructor. */ - AndroidJNIIOSystem(ANativeActivity* activity); - - /** Destructor. */ - ~AndroidJNIIOSystem(); - - // ------------------------------------------------------------------- - /** Tests for the existence of a file at the given path. */ - bool Exists( const char* pFile) const; - - // ------------------------------------------------------------------- - /** Opens a file at the given path, with given mode */ - IOStream* Open( const char* strFile, const char* strMode); - - // ------------------------------------------------------------------------------------------------ - // Inits Android extractor - void AndroidActivityInit(ANativeActivity* activity); - - // ------------------------------------------------------------------------------------------------ - // Extracts android asset - bool AndroidExtractAsset(std::string name); - -}; - -} //!ns Assimp - -#endif //AI_ANDROIDJNIIOSYSTEM_H_INC -#endif //__ANDROID__ and __ANDROID_API__ > 9 and defined(AI_CONFIG_ANDROID_JNI_ASSIMP_MANAGER_SUPPORT) diff --git a/thirdparty/assimp/include/assimp/postprocess.h b/thirdparty/assimp/include/assimp/postprocess.h deleted file mode 100644 index 4b6732e80a..0000000000 --- a/thirdparty/assimp/include/assimp/postprocess.h +++ /dev/null @@ -1,703 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file postprocess.h - * @brief Definitions for import post processing steps - */ -#pragma once -#ifndef AI_POSTPROCESS_H_INC -#define AI_POSTPROCESS_H_INC - -#include <assimp/types.h> - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -// ----------------------------------------------------------------------------------- -/** @enum aiPostProcessSteps - * @brief Defines the flags for all possible post processing steps. - * - * @note Some steps are influenced by properties set on the Assimp::Importer itself - * - * @see Assimp::Importer::ReadFile() - * @see Assimp::Importer::SetPropertyInteger() - * @see aiImportFile - * @see aiImportFileEx - */ -// ----------------------------------------------------------------------------------- -enum aiPostProcessSteps -{ - - // ------------------------------------------------------------------------- - /** <hr>Calculates the tangents and bitangents for the imported meshes. - * - * Does nothing if a mesh does not have normals. You might want this post - * processing step to be executed if you plan to use tangent space calculations - * such as normal mapping applied to the meshes. There's an importer property, - * <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE</tt>, which allows you to specify - * a maximum smoothing angle for the algorithm. However, usually you'll - * want to leave it at the default value. - */ - aiProcess_CalcTangentSpace = 0x1, - - // ------------------------------------------------------------------------- - /** <hr>Identifies and joins identical vertex data sets within all - * imported meshes. - * - * After this step is run, each mesh contains unique vertices, - * so a vertex may be used by multiple faces. You usually want - * to use this post processing step. If your application deals with - * indexed geometry, this step is compulsory or you'll just waste rendering - * time. <b>If this flag is not specified</b>, no vertices are referenced by - * more than one face and <b>no index buffer is required</b> for rendering. - */ - aiProcess_JoinIdenticalVertices = 0x2, - - // ------------------------------------------------------------------------- - /** <hr>Converts all the imported data to a left-handed coordinate space. - * - * By default the data is returned in a right-handed coordinate space (which - * OpenGL prefers). In this space, +X points to the right, - * +Z points towards the viewer, and +Y points upwards. In the DirectX - * coordinate space +X points to the right, +Y points upwards, and +Z points - * away from the viewer. - * - * You'll probably want to consider this flag if you use Direct3D for - * rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this - * setting and bundles all conversions typically required for D3D-based - * applications. - */ - aiProcess_MakeLeftHanded = 0x4, - - // ------------------------------------------------------------------------- - /** <hr>Triangulates all faces of all meshes. - * - * By default the imported mesh data might contain faces with more than 3 - * indices. For rendering you'll usually want all faces to be triangles. - * This post processing step splits up faces with more than 3 indices into - * triangles. Line and point primitives are *not* modified! If you want - * 'triangles only' with no other kinds of primitives, try the following - * solution: - * <ul> - * <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType </li> - * <li>Ignore all point and line meshes when you process assimp's output</li> - * </ul> - */ - aiProcess_Triangulate = 0x8, - - // ------------------------------------------------------------------------- - /** <hr>Removes some parts of the data structure (animations, materials, - * light sources, cameras, textures, vertex components). - * - * The components to be removed are specified in a separate - * importer property, <tt>#AI_CONFIG_PP_RVC_FLAGS</tt>. This is quite useful - * if you don't need all parts of the output structure. Vertex colors - * are rarely used today for example... Calling this step to remove unneeded - * data from the pipeline as early as possible results in increased - * performance and a more optimized output data structure. - * This step is also useful if you want to force Assimp to recompute - * normals or tangents. The corresponding steps don't recompute them if - * they're already there (loaded from the source asset). By using this - * step you can make sure they are NOT there. - * - * This flag is a poor one, mainly because its purpose is usually - * misunderstood. Consider the following case: a 3D model has been exported - * from a CAD app, and it has per-face vertex colors. Vertex positions can't be - * shared, thus the #aiProcess_JoinIdenticalVertices step fails to - * optimize the data because of these nasty little vertex colors. - * Most apps don't even process them, so it's all for nothing. By using - * this step, unneeded components are excluded as early as possible - * thus opening more room for internal optimizations. - */ - aiProcess_RemoveComponent = 0x10, - - // ------------------------------------------------------------------------- - /** <hr>Generates normals for all faces of all meshes. - * - * This is ignored if normals are already there at the time this flag - * is evaluated. Model importers try to load them from the source file, so - * they're usually already there. Face normals are shared between all points - * of a single face, so a single point can have multiple normals, which - * forces the library to duplicate vertices in some cases. - * #aiProcess_JoinIdenticalVertices is *senseless* then. - * - * This flag may not be specified together with #aiProcess_GenSmoothNormals. - */ - aiProcess_GenNormals = 0x20, - - // ------------------------------------------------------------------------- - /** <hr>Generates smooth normals for all vertices in the mesh. - * - * This is ignored if normals are already there at the time this flag - * is evaluated. Model importers try to load them from the source file, so - * they're usually already there. - * - * This flag may not be specified together with - * #aiProcess_GenNormals. There's a importer property, - * <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE</tt> which allows you to specify - * an angle maximum for the normal smoothing algorithm. Normals exceeding - * this limit are not smoothed, resulting in a 'hard' seam between two faces. - * Using a decent angle here (e.g. 80 degrees) results in very good visual - * appearance. - */ - aiProcess_GenSmoothNormals = 0x40, - - // ------------------------------------------------------------------------- - /** <hr>Splits large meshes into smaller sub-meshes. - * - * This is quite useful for real-time rendering, where the number of triangles - * which can be maximally processed in a single draw-call is limited - * by the video driver/hardware. The maximum vertex buffer is usually limited - * too. Both requirements can be met with this step: you may specify both a - * triangle and vertex limit for a single mesh. - * - * The split limits can (and should!) be set through the - * <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT</tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT</tt> - * importer properties. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES</tt> and - * <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES</tt>. - * - * Note that splitting is generally a time-consuming task, but only if there's - * something to split. The use of this step is recommended for most users. - */ - aiProcess_SplitLargeMeshes = 0x80, - - // ------------------------------------------------------------------------- - /** <hr>Removes the node graph and pre-transforms all vertices with - * the local transformation matrices of their nodes. - * - * The output scene still contains nodes, however there is only a - * root node with children, each one referencing only one mesh, - * and each mesh referencing one material. For rendering, you can - * simply render all meshes in order - you don't need to pay - * attention to local transformations and the node hierarchy. - * Animations are removed during this step. - * This step is intended for applications without a scenegraph. - * The step CAN cause some problems: if e.g. a mesh of the asset - * contains normals and another, using the same material index, does not, - * they will be brought together, but the first meshes's part of - * the normal list is zeroed. However, these artifacts are rare. - * @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE</tt> configuration property - * can be set to normalize the scene's spatial dimension to the -1...1 - * range. - */ - aiProcess_PreTransformVertices = 0x100, - - // ------------------------------------------------------------------------- - /** <hr>Limits the number of bones simultaneously affecting a single vertex - * to a maximum value. - * - * If any vertex is affected by more than the maximum number of bones, the least - * important vertex weights are removed and the remaining vertex weights are - * renormalized so that the weights still sum up to 1. - * The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS</tt> in - * config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS</tt> importer - * property to supply your own limit to the post processing step. - * - * If you intend to perform the skinning in hardware, this post processing - * step might be of interest to you. - */ - aiProcess_LimitBoneWeights = 0x200, - - // ------------------------------------------------------------------------- - /** <hr>Validates the imported scene data structure. - * This makes sure that all indices are valid, all animations and - * bones are linked correctly, all material references are correct .. etc. - * - * It is recommended that you capture Assimp's log output if you use this flag, - * so you can easily find out what's wrong if a file fails the - * validation. The validator is quite strict and will find *all* - * inconsistencies in the data structure... It is recommended that plugin - * developers use it to debug their loaders. There are two types of - * validation failures: - * <ul> - * <li>Error: There's something wrong with the imported data. Further - * postprocessing is not possible and the data is not usable at all. - * The import fails. #Importer::GetErrorString() or #aiGetErrorString() - * carry the error message around.</li> - * <li>Warning: There are some minor issues (e.g. 1000000 animation - * keyframes with the same time), but further postprocessing and use - * of the data structure is still safe. Warning details are written - * to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING</tt> is set - * in #aiScene::mFlags</li> - * </ul> - * - * This post-processing step is not time-consuming. Its use is not - * compulsory, but recommended. - */ - aiProcess_ValidateDataStructure = 0x400, - - // ------------------------------------------------------------------------- - /** <hr>Reorders triangles for better vertex cache locality. - * - * The step tries to improve the ACMR (average post-transform vertex cache - * miss ratio) for all meshes. The implementation runs in O(n) and is - * roughly based on the 'tipsify' algorithm (see <a href=" - * http://www.cs.princeton.edu/gfx/pubs/Sander_2007_%3ETR/tipsy.pdf">this - * paper</a>). - * - * If you intend to render huge models in hardware, this step might - * be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE</tt> - * importer property can be used to fine-tune the cache optimization. - */ - aiProcess_ImproveCacheLocality = 0x800, - - // ------------------------------------------------------------------------- - /** <hr>Searches for redundant/unreferenced materials and removes them. - * - * This is especially useful in combination with the - * #aiProcess_PreTransformVertices and #aiProcess_OptimizeMeshes flags. - * Both join small meshes with equal characteristics, but they can't do - * their work if two meshes have different materials. Because several - * material settings are lost during Assimp's import filters, - * (and because many exporters don't check for redundant materials), huge - * models often have materials which are are defined several times with - * exactly the same settings. - * - * Several material settings not contributing to the final appearance of - * a surface are ignored in all comparisons (e.g. the material name). - * So, if you're passing additional information through the - * content pipeline (probably using *magic* material names), don't - * specify this flag. Alternatively take a look at the - * <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST</tt> importer property. - */ - aiProcess_RemoveRedundantMaterials = 0x1000, - - // ------------------------------------------------------------------------- - /** <hr>This step tries to determine which meshes have normal vectors - * that are facing inwards and inverts them. - * - * The algorithm is simple but effective: - * the bounding box of all vertices + their normals is compared against - * the volume of the bounding box of all vertices without their normals. - * This works well for most objects, problems might occur with planar - * surfaces. However, the step tries to filter such cases. - * The step inverts all in-facing normals. Generally it is recommended - * to enable this step, although the result is not always correct. - */ - aiProcess_FixInfacingNormals = 0x2000, - - - - // ------------------------------------------------------------------------- - /** - * This step generically populates aiBone->mArmature and aiBone->mNode generically - * The point of these is it saves you later having to calculate these elements - * This is useful when handling rest information or skin information - * If you have multiple armatures on your models we strongly recommend enabling this - * Instead of writing your own multi-root, multi-armature lookups we have done the - * hard work for you :) - */ - aiProcess_PopulateArmatureData = 0x4000, - - // ------------------------------------------------------------------------- - /** <hr>This step splits meshes with more than one primitive type in - * homogeneous sub-meshes. - * - * The step is executed after the triangulation step. After the step - * returns, just one bit is set in aiMesh::mPrimitiveTypes. This is - * especially useful for real-time rendering where point and line - * primitives are often ignored or rendered separately. - * You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE</tt> importer property to - * specify which primitive types you need. This can be used to easily - * exclude lines and points, which are rarely used, from the import. - */ - aiProcess_SortByPType = 0x8000, - - // ------------------------------------------------------------------------- - /** <hr>This step searches all meshes for degenerate primitives and - * converts them to proper lines or points. - * - * A face is 'degenerate' if one or more of its points are identical. - * To have the degenerate stuff not only detected and collapsed but - * removed, try one of the following procedures: - * <br><b>1.</b> (if you support lines and points for rendering but don't - * want the degenerates)<br> - * <ul> - * <li>Specify the #aiProcess_FindDegenerates flag. - * </li> - * <li>Set the <tt>#AI_CONFIG_PP_FD_REMOVE</tt> importer property to - * 1. This will cause the step to remove degenerate triangles from the - * import as soon as they're detected. They won't pass any further - * pipeline steps. - * </li> - * </ul> - * <br><b>2.</b>(if you don't support lines and points at all)<br> - * <ul> - * <li>Specify the #aiProcess_FindDegenerates flag. - * </li> - * <li>Specify the #aiProcess_SortByPType flag. This moves line and - * point primitives to separate meshes. - * </li> - * <li>Set the <tt>#AI_CONFIG_PP_SBP_REMOVE</tt> importer property to - * @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES - * @endcode to cause SortByPType to reject point - * and line meshes from the scene. - * </li> - * </ul> - * - * This step also removes very small triangles with a surface area smaller - * than 10^-6. If you rely on having these small triangles, or notice holes - * in your model, set the property <tt>#AI_CONFIG_PP_FD_CHECKAREA</tt> to - * false. - * @note Degenerate polygons are not necessarily evil and that's why - * they're not removed by default. There are several file formats which - * don't support lines or points, and some exporters bypass the - * format specification and write them as degenerate triangles instead. - */ - aiProcess_FindDegenerates = 0x10000, - - // ------------------------------------------------------------------------- - /** <hr>This step searches all meshes for invalid data, such as zeroed - * normal vectors or invalid UV coords and removes/fixes them. This is - * intended to get rid of some common exporter errors. - * - * This is especially useful for normals. If they are invalid, and - * the step recognizes this, they will be removed and can later - * be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br> - * The step will also remove meshes that are infinitely small and reduce - * animation tracks consisting of hundreds if redundant keys to a single - * key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY</tt> config property decides - * the accuracy of the check for duplicate animation tracks. - */ - aiProcess_FindInvalidData = 0x20000, - - // ------------------------------------------------------------------------- - /** <hr>This step converts non-UV mappings (such as spherical or - * cylindrical mapping) to proper texture coordinate channels. - * - * Most applications will support UV mapping only, so you will - * probably want to specify this step in every case. Note that Assimp is not - * always able to match the original mapping implementation of the - * 3D app which produced a model perfectly. It's always better to let the - * modelling app compute the UV channels - 3ds max, Maya, Blender, - * LightWave, and Modo do this for example. - * - * @note If this step is not requested, you'll need to process the - * <tt>#AI_MATKEY_MAPPING</tt> material property in order to display all assets - * properly. - */ - aiProcess_GenUVCoords = 0x40000, - - // ------------------------------------------------------------------------- - /** <hr>This step applies per-texture UV transformations and bakes - * them into stand-alone vtexture coordinate channels. - * - * UV transformations are specified per-texture - see the - * <tt>#AI_MATKEY_UVTRANSFORM</tt> material key for more information. - * This step processes all textures with - * transformed input UV coordinates and generates a new (pre-transformed) UV channel - * which replaces the old channel. Most applications won't support UV - * transformations, so you will probably want to specify this step. - * - * @note UV transformations are usually implemented in real-time apps by - * transforming texture coordinates at vertex shader stage with a 3x3 - * (homogenous) transformation matrix. - */ - aiProcess_TransformUVCoords = 0x80000, - - // ------------------------------------------------------------------------- - /** <hr>This step searches for duplicate meshes and replaces them - * with references to the first mesh. - * - * This step takes a while, so don't use it if speed is a concern. - * Its main purpose is to workaround the fact that many export - * file formats don't support instanced meshes, so exporters need to - * duplicate meshes. This step removes the duplicates again. Please - * note that Assimp does not currently support per-node material - * assignment to meshes, which means that identical meshes with - * different materials are currently *not* joined, although this is - * planned for future versions. - */ - aiProcess_FindInstances = 0x100000, - - // ------------------------------------------------------------------------- - /** <hr>A post-processing step to reduce the number of meshes. - * - * This will, in fact, reduce the number of draw calls. - * - * This is a very effective optimization and is recommended to be used - * together with #aiProcess_OptimizeGraph, if possible. The flag is fully - * compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType. - */ - aiProcess_OptimizeMeshes = 0x200000, - - - // ------------------------------------------------------------------------- - /** <hr>A post-processing step to optimize the scene hierarchy. - * - * Nodes without animations, bones, lights or cameras assigned are - * collapsed and joined. - * - * Node names can be lost during this step. If you use special 'tag nodes' - * to pass additional information through your content pipeline, use the - * <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST</tt> importer property to specify a - * list of node names you want to be kept. Nodes matching one of the names - * in this list won't be touched or modified. - * - * Use this flag with caution. Most simple files will be collapsed to a - * single node, so complex hierarchies are usually completely lost. This is not - * useful for editor environments, but probably a very effective - * optimization if you just want to get the model data, convert it to your - * own format, and render it as fast as possible. - * - * This flag is designed to be used with #aiProcess_OptimizeMeshes for best - * results. - * - * @note 'Crappy' scenes with thousands of extremely small meshes packed - * in deeply nested nodes exist for almost all file formats. - * #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph - * usually fixes them all and makes them renderable. - */ - aiProcess_OptimizeGraph = 0x400000, - - // ------------------------------------------------------------------------- - /** <hr>This step flips all UV coordinates along the y-axis and adjusts - * material settings and bitangents accordingly. - * - * <b>Output UV coordinate system:</b> - * @code - * 0y|0y ---------- 1x|0y - * | | - * | | - * | | - * 0x|1y ---------- 1x|1y - * @endcode - * - * You'll probably want to consider this flag if you use Direct3D for - * rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this - * setting and bundles all conversions typically required for D3D-based - * applications. - */ - aiProcess_FlipUVs = 0x800000, - - // ------------------------------------------------------------------------- - /** <hr>This step adjusts the output face winding order to be CW. - * - * The default face winding order is counter clockwise (CCW). - * - * <b>Output face order:</b> - * @code - * x2 - * - * x0 - * x1 - * @endcode - */ - aiProcess_FlipWindingOrder = 0x1000000, - - // ------------------------------------------------------------------------- - /** <hr>This step splits meshes with many bones into sub-meshes so that each - * sub-mesh has fewer or as many bones as a given limit. - */ - aiProcess_SplitByBoneCount = 0x2000000, - - // ------------------------------------------------------------------------- - /** <hr>This step removes bones losslessly or according to some threshold. - * - * In some cases (i.e. formats that require it) exporters are forced to - * assign dummy bone weights to otherwise static meshes assigned to - * animated meshes. Full, weight-based skinning is expensive while - * animating nodes is extremely cheap, so this step is offered to clean up - * the data in that regard. - * - * Use <tt>#AI_CONFIG_PP_DB_THRESHOLD</tt> to control this. - * Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE</tt> if you want bones removed if and - * only if all bones within the scene qualify for removal. - */ - aiProcess_Debone = 0x4000000, - - - - // ------------------------------------------------------------------------- - /** <hr>This step will perform a global scale of the model. - * - * Some importers are providing a mechanism to define a scaling unit for the - * model. This post processing step can be used to do so. You need to get the - * global scaling from your importer settings like in FBX. Use the flag - * AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY from the global property table to configure this. - * - * Use <tt>#AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY</tt> to setup the global scaling factor. - */ - aiProcess_GlobalScale = 0x8000000, - - // ------------------------------------------------------------------------- - /** <hr>A postprocessing step to embed of textures. - * - * This will remove external data dependencies for textures. - * If a texture's file does not exist at the specified path - * (due, for instance, to an absolute path generated on another system), - * it will check if a file with the same name exists at the root folder - * of the imported model. And if so, it uses that. - */ - aiProcess_EmbedTextures = 0x10000000, - - // aiProcess_GenEntityMeshes = 0x100000, - // aiProcess_OptimizeAnimations = 0x200000 - // aiProcess_FixTexturePaths = 0x200000 - - - aiProcess_ForceGenNormals = 0x20000000, - - // ------------------------------------------------------------------------- - /** <hr>Drops normals for all faces of all meshes. - * - * This is ignored if no normals are present. - * Face normals are shared between all points of a single face, - * so a single point can have multiple normals, which - * forces the library to duplicate vertices in some cases. - * #aiProcess_JoinIdenticalVertices is *senseless* then. - * This process gives sense back to aiProcess_JoinIdenticalVertices - */ - aiProcess_DropNormals = 0x40000000, - - // ------------------------------------------------------------------------- - /** - */ - aiProcess_GenBoundingBoxes = 0x80000000 -}; - - -// --------------------------------------------------------------------------------------- -/** @def aiProcess_ConvertToLeftHanded - * @brief Shortcut flag for Direct3D-based applications. - * - * Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and - * #aiProcess_FlipWindingOrder flags. - * The output data matches Direct3D's conventions: left-handed geometry, upper-left - * origin for UV coordinates and finally clockwise face order, suitable for CCW culling. - * - * @deprecated - */ -#define aiProcess_ConvertToLeftHanded ( \ - aiProcess_MakeLeftHanded | \ - aiProcess_FlipUVs | \ - aiProcess_FlipWindingOrder | \ - 0 ) - - -// --------------------------------------------------------------------------------------- -/** @def aiProcessPreset_TargetRealtime_Fast - * @brief Default postprocess configuration optimizing the data for real-time rendering. - * - * Applications would want to use this preset to load models on end-user PCs, - * maybe for direct use in game. - * - * If you're using DirectX, don't forget to combine this value with - * the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations - * in your application apply the #aiProcess_TransformUVCoords step, too. - * @note Please take the time to read the docs for the steps enabled by this preset. - * Some of them offer further configurable properties, while some of them might not be of - * use for you so it might be better to not specify them. - */ -#define aiProcessPreset_TargetRealtime_Fast ( \ - aiProcess_CalcTangentSpace | \ - aiProcess_GenNormals | \ - aiProcess_JoinIdenticalVertices | \ - aiProcess_Triangulate | \ - aiProcess_GenUVCoords | \ - aiProcess_SortByPType | \ - 0 ) - - // --------------------------------------------------------------------------------------- - /** @def aiProcessPreset_TargetRealtime_Quality - * @brief Default postprocess configuration optimizing the data for real-time rendering. - * - * Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration - * performs some extra optimizations to improve rendering speed and - * to minimize memory usage. It could be a good choice for a level editor - * environment where import speed is not so important. - * - * If you're using DirectX, don't forget to combine this value with - * the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations - * in your application apply the #aiProcess_TransformUVCoords step, too. - * @note Please take the time to read the docs for the steps enabled by this preset. - * Some of them offer further configurable properties, while some of them might not be - * of use for you so it might be better to not specify them. - */ -#define aiProcessPreset_TargetRealtime_Quality ( \ - aiProcess_CalcTangentSpace | \ - aiProcess_GenSmoothNormals | \ - aiProcess_JoinIdenticalVertices | \ - aiProcess_ImproveCacheLocality | \ - aiProcess_LimitBoneWeights | \ - aiProcess_RemoveRedundantMaterials | \ - aiProcess_SplitLargeMeshes | \ - aiProcess_Triangulate | \ - aiProcess_GenUVCoords | \ - aiProcess_SortByPType | \ - aiProcess_FindDegenerates | \ - aiProcess_FindInvalidData | \ - 0 ) - - // --------------------------------------------------------------------------------------- - /** @def aiProcessPreset_TargetRealtime_MaxQuality - * @brief Default postprocess configuration optimizing the data for real-time rendering. - * - * This preset enables almost every optimization step to achieve perfectly - * optimized data. It's your choice for level editor environments where import speed - * is not important. - * - * If you're using DirectX, don't forget to combine this value with - * the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations - * in your application, apply the #aiProcess_TransformUVCoords step, too. - * @note Please take the time to read the docs for the steps enabled by this preset. - * Some of them offer further configurable properties, while some of them might not be - * of use for you so it might be better to not specify them. - */ -#define aiProcessPreset_TargetRealtime_MaxQuality ( \ - aiProcessPreset_TargetRealtime_Quality | \ - aiProcess_FindInstances | \ - aiProcess_ValidateDataStructure | \ - aiProcess_OptimizeMeshes | \ - 0 ) - - -#ifdef __cplusplus -} // end of extern "C" -#endif - -#endif // AI_POSTPROCESS_H_INC diff --git a/thirdparty/assimp/include/assimp/qnan.h b/thirdparty/assimp/include/assimp/qnan.h deleted file mode 100644 index 06780da5b8..0000000000 --- a/thirdparty/assimp/include/assimp/qnan.h +++ /dev/null @@ -1,162 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file qnan.h - * @brief Some utilities for our dealings with qnans. - * - * @note Some loaders use qnans to mark invalid values tempoarily, also - * Assimp explicitly enforces undefined normals to be set to qnan. - * qnan utilities are available in standard libraries (C99 for example) - * but last time I checked compiler coverage was so bad that I decided - * to reinvent the wheel. - */ -#pragma once -#ifndef AI_QNAN_H_INCLUDED -#define AI_QNAN_H_INCLUDED - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -#include <limits> -#include <stdint.h> - -// --------------------------------------------------------------------------- -/** Data structure to represent the bit pattern of a 32 Bit - * IEEE 754 floating-point number. */ -union _IEEESingle { - float Float; - struct - { - uint32_t Frac : 23; - uint32_t Exp : 8; - uint32_t Sign : 1; - } IEEE; -}; - -// --------------------------------------------------------------------------- -/** Data structure to represent the bit pattern of a 64 Bit - * IEEE 754 floating-point number. */ -union _IEEEDouble { - double Double; - struct - { - uint64_t Frac : 52; - uint64_t Exp : 11; - uint64_t Sign : 1; - } IEEE; -}; - -// --------------------------------------------------------------------------- -/** Check whether a given float is qNaN. - * @param in Input value */ -AI_FORCE_INLINE bool is_qnan(float in) { - // the straightforward solution does not work: - // return (in != in); - // compiler generates code like this - // load <in> to <register-with-different-width> - // compare <register-with-different-width> against <in> - - // FIXME: Use <float> stuff instead? I think fpclassify needs C99 - _IEEESingle temp; - memcpy(&temp, &in, sizeof(float)); - return (temp.IEEE.Exp == (1u << 8)-1 && - temp.IEEE.Frac); -} - -// --------------------------------------------------------------------------- -/** Check whether a given double is qNaN. - * @param in Input value */ -AI_FORCE_INLINE bool is_qnan(double in) { - // the straightforward solution does not work: - // return (in != in); - // compiler generates code like this - // load <in> to <register-with-different-width> - // compare <register-with-different-width> against <in> - - // FIXME: Use <float> stuff instead? I think fpclassify needs C99 - _IEEEDouble temp; - memcpy(&temp, &in, sizeof(in)); - return (temp.IEEE.Exp == (1u << 11)-1 && - temp.IEEE.Frac); -} - -// --------------------------------------------------------------------------- -/** @brief check whether a float is either NaN or (+/-) INF. - * - * Denorms return false, they're treated like normal values. - * @param in Input value */ -AI_FORCE_INLINE bool is_special_float(float in) { - _IEEESingle temp; - memcpy(&temp, &in, sizeof(float)); - return (temp.IEEE.Exp == (1u << 8)-1); -} - -// --------------------------------------------------------------------------- -/** @brief check whether a double is either NaN or (+/-) INF. - * - * Denorms return false, they're treated like normal values. - * @param in Input value */ -AI_FORCE_INLINE bool is_special_float(double in) { - _IEEESingle temp; - memcpy(&temp, &in, sizeof(float)); - return (temp.IEEE.Exp == (1u << 11)-1); -} - -// --------------------------------------------------------------------------- -/** Check whether a float is NOT qNaN. - * @param in Input value */ -template<class TReal> -AI_FORCE_INLINE bool is_not_qnan(TReal in) { - return !is_qnan(in); -} - -// --------------------------------------------------------------------------- -/** @brief Get a fresh qnan. */ -AI_FORCE_INLINE ai_real get_qnan() { - return std::numeric_limits<ai_real>::quiet_NaN(); -} - -#endif // !! AI_QNAN_H_INCLUDED diff --git a/thirdparty/assimp/include/assimp/quaternion.h b/thirdparty/assimp/include/assimp/quaternion.h deleted file mode 100644 index ae45959b41..0000000000 --- a/thirdparty/assimp/include/assimp/quaternion.h +++ /dev/null @@ -1,134 +0,0 @@ -/* -Open Asset Import Library (assimp) ----------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the -following conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- -*/ - -/** @file quaternion.h - * @brief Quaternion structure, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_QUATERNION_H_INC -#define AI_QUATERNION_H_INC - -#ifdef __cplusplus - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/defs.h> - -template <typename TReal> class aiVector3t; -template <typename TReal> class aiMatrix3x3t; - -// --------------------------------------------------------------------------- -/** Represents a quaternion in a 4D vector. */ -template <typename TReal> -class aiQuaterniont -{ -public: - aiQuaterniont() AI_NO_EXCEPT : w(1.0), x(), y(), z() {} - aiQuaterniont(TReal pw, TReal px, TReal py, TReal pz) - : w(pw), x(px), y(py), z(pz) {} - - /** Construct from rotation matrix. Result is undefined if the matrix is not orthonormal. */ - explicit aiQuaterniont( const aiMatrix3x3t<TReal>& pRotMatrix); - - /** Construct from euler angles */ - aiQuaterniont( TReal rotx, TReal roty, TReal rotz); - - /** Construct from an axis-angle pair */ - aiQuaterniont( aiVector3t<TReal> axis, TReal angle); - - /** Construct from a normalized quaternion stored in a vec3 */ - explicit aiQuaterniont( aiVector3t<TReal> normalized); - - /** Returns a matrix representation of the quaternion */ - aiMatrix3x3t<TReal> GetMatrix() const; - -public: - - bool operator== (const aiQuaterniont& o) const; - bool operator!= (const aiQuaterniont& o) const; - - bool Equal(const aiQuaterniont& o, TReal epsilon = 1e-6) const; - -public: - - /** Normalize the quaternion */ - aiQuaterniont& Normalize(); - - /** Compute quaternion conjugate */ - aiQuaterniont& Conjugate (); - - /** Rotate a point by this quaternion */ - aiVector3t<TReal> Rotate (const aiVector3t<TReal>& in); - - /** Multiply two quaternions */ - aiQuaterniont operator* (const aiQuaterniont& two) const; - -public: - - /** Performs a spherical interpolation between two quaternions and writes the result into the third. - * @param pOut Target object to received the interpolated rotation. - * @param pStart Start rotation of the interpolation at factor == 0. - * @param pEnd End rotation, factor == 1. - * @param pFactor Interpolation factor between 0 and 1. Values outside of this range yield undefined results. - */ - static void Interpolate( aiQuaterniont& pOut, const aiQuaterniont& pStart, - const aiQuaterniont& pEnd, TReal pFactor); - -public: - - //! w,x,y,z components of the quaternion - TReal w, x, y, z; -} ; - -typedef aiQuaterniont<ai_real> aiQuaternion; - -#else - -struct aiQuaternion { - ai_real w, x, y, z; -}; - -#endif - -#endif // AI_QUATERNION_H_INC diff --git a/thirdparty/assimp/include/assimp/quaternion.inl b/thirdparty/assimp/include/assimp/quaternion.inl deleted file mode 100644 index 3ce514d1bb..0000000000 --- a/thirdparty/assimp/include/assimp/quaternion.inl +++ /dev/null @@ -1,290 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file quaternion.inl - * @brief Inline implementation of aiQuaterniont<TReal> operators - */ -#pragma once -#ifndef AI_QUATERNION_INL_INC -#define AI_QUATERNION_INL_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -#include <assimp/quaternion.h> - -#include <cmath> - -// --------------------------------------------------------------------------- -template<typename TReal> -bool aiQuaterniont<TReal>::operator== (const aiQuaterniont& o) const -{ - return x == o.x && y == o.y && z == o.z && w == o.w; -} - -// --------------------------------------------------------------------------- -template<typename TReal> -bool aiQuaterniont<TReal>::operator!= (const aiQuaterniont& o) const -{ - return !(*this == o); -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline bool aiQuaterniont<TReal>::Equal(const aiQuaterniont& o, TReal epsilon) const { - return - std::abs(x - o.x) <= epsilon && - std::abs(y - o.y) <= epsilon && - std::abs(z - o.z) <= epsilon && - std::abs(w - o.w) <= epsilon; -} - -// --------------------------------------------------------------------------- -// Constructs a quaternion from a rotation matrix -template<typename TReal> -inline aiQuaterniont<TReal>::aiQuaterniont( const aiMatrix3x3t<TReal> &pRotMatrix) -{ - TReal t = pRotMatrix.a1 + pRotMatrix.b2 + pRotMatrix.c3; - - // large enough - if( t > static_cast<TReal>(0)) - { - TReal s = std::sqrt(1 + t) * static_cast<TReal>(2.0); - x = (pRotMatrix.c2 - pRotMatrix.b3) / s; - y = (pRotMatrix.a3 - pRotMatrix.c1) / s; - z = (pRotMatrix.b1 - pRotMatrix.a2) / s; - w = static_cast<TReal>(0.25) * s; - } // else we have to check several cases - else if( pRotMatrix.a1 > pRotMatrix.b2 && pRotMatrix.a1 > pRotMatrix.c3 ) - { - // Column 0: - TReal s = std::sqrt( static_cast<TReal>(1.0) + pRotMatrix.a1 - pRotMatrix.b2 - pRotMatrix.c3) * static_cast<TReal>(2.0); - x = static_cast<TReal>(0.25) * s; - y = (pRotMatrix.b1 + pRotMatrix.a2) / s; - z = (pRotMatrix.a3 + pRotMatrix.c1) / s; - w = (pRotMatrix.c2 - pRotMatrix.b3) / s; - } - else if( pRotMatrix.b2 > pRotMatrix.c3) - { - // Column 1: - TReal s = std::sqrt( static_cast<TReal>(1.0) + pRotMatrix.b2 - pRotMatrix.a1 - pRotMatrix.c3) * static_cast<TReal>(2.0); - x = (pRotMatrix.b1 + pRotMatrix.a2) / s; - y = static_cast<TReal>(0.25) * s; - z = (pRotMatrix.c2 + pRotMatrix.b3) / s; - w = (pRotMatrix.a3 - pRotMatrix.c1) / s; - } else - { - // Column 2: - TReal s = std::sqrt( static_cast<TReal>(1.0) + pRotMatrix.c3 - pRotMatrix.a1 - pRotMatrix.b2) * static_cast<TReal>(2.0); - x = (pRotMatrix.a3 + pRotMatrix.c1) / s; - y = (pRotMatrix.c2 + pRotMatrix.b3) / s; - z = static_cast<TReal>(0.25) * s; - w = (pRotMatrix.b1 - pRotMatrix.a2) / s; - } -} - -// --------------------------------------------------------------------------- -// Construction from euler angles -template<typename TReal> -inline aiQuaterniont<TReal>::aiQuaterniont( TReal fPitch, TReal fYaw, TReal fRoll ) -{ - const TReal fSinPitch(std::sin(fPitch*static_cast<TReal>(0.5))); - const TReal fCosPitch(std::cos(fPitch*static_cast<TReal>(0.5))); - const TReal fSinYaw(std::sin(fYaw*static_cast<TReal>(0.5))); - const TReal fCosYaw(std::cos(fYaw*static_cast<TReal>(0.5))); - const TReal fSinRoll(std::sin(fRoll*static_cast<TReal>(0.5))); - const TReal fCosRoll(std::cos(fRoll*static_cast<TReal>(0.5))); - const TReal fCosPitchCosYaw(fCosPitch*fCosYaw); - const TReal fSinPitchSinYaw(fSinPitch*fSinYaw); - x = fSinRoll * fCosPitchCosYaw - fCosRoll * fSinPitchSinYaw; - y = fCosRoll * fSinPitch * fCosYaw + fSinRoll * fCosPitch * fSinYaw; - z = fCosRoll * fCosPitch * fSinYaw - fSinRoll * fSinPitch * fCosYaw; - w = fCosRoll * fCosPitchCosYaw + fSinRoll * fSinPitchSinYaw; -} - -// --------------------------------------------------------------------------- -// Returns a matrix representation of the quaternion -template<typename TReal> -inline aiMatrix3x3t<TReal> aiQuaterniont<TReal>::GetMatrix() const -{ - aiMatrix3x3t<TReal> resMatrix; - resMatrix.a1 = static_cast<TReal>(1.0) - static_cast<TReal>(2.0) * (y * y + z * z); - resMatrix.a2 = static_cast<TReal>(2.0) * (x * y - z * w); - resMatrix.a3 = static_cast<TReal>(2.0) * (x * z + y * w); - resMatrix.b1 = static_cast<TReal>(2.0) * (x * y + z * w); - resMatrix.b2 = static_cast<TReal>(1.0) - static_cast<TReal>(2.0) * (x * x + z * z); - resMatrix.b3 = static_cast<TReal>(2.0) * (y * z - x * w); - resMatrix.c1 = static_cast<TReal>(2.0) * (x * z - y * w); - resMatrix.c2 = static_cast<TReal>(2.0) * (y * z + x * w); - resMatrix.c3 = static_cast<TReal>(1.0) - static_cast<TReal>(2.0) * (x * x + y * y); - - return resMatrix; -} - -// --------------------------------------------------------------------------- -// Construction from an axis-angle pair -template<typename TReal> -inline aiQuaterniont<TReal>::aiQuaterniont( aiVector3t<TReal> axis, TReal angle) -{ - axis.Normalize(); - - const TReal sin_a = std::sin( angle / 2 ); - const TReal cos_a = std::cos( angle / 2 ); - x = axis.x * sin_a; - y = axis.y * sin_a; - z = axis.z * sin_a; - w = cos_a; -} -// --------------------------------------------------------------------------- -// Construction from am existing, normalized quaternion -template<typename TReal> -inline aiQuaterniont<TReal>::aiQuaterniont( aiVector3t<TReal> normalized) -{ - x = normalized.x; - y = normalized.y; - z = normalized.z; - - const TReal t = static_cast<TReal>(1.0) - (x*x) - (y*y) - (z*z); - - if (t < static_cast<TReal>(0.0)) { - w = static_cast<TReal>(0.0); - } - else w = std::sqrt (t); -} - -// --------------------------------------------------------------------------- -// Performs a spherical interpolation between two quaternions -// Implementation adopted from the gmtl project. All others I found on the net fail in some cases. -// Congrats, gmtl! -template<typename TReal> -inline void aiQuaterniont<TReal>::Interpolate( aiQuaterniont& pOut, const aiQuaterniont& pStart, const aiQuaterniont& pEnd, TReal pFactor) -{ - // calc cosine theta - TReal cosom = pStart.x * pEnd.x + pStart.y * pEnd.y + pStart.z * pEnd.z + pStart.w * pEnd.w; - - // adjust signs (if necessary) - aiQuaterniont end = pEnd; - if( cosom < static_cast<TReal>(0.0)) - { - cosom = -cosom; - end.x = -end.x; // Reverse all signs - end.y = -end.y; - end.z = -end.z; - end.w = -end.w; - } - - // Calculate coefficients - TReal sclp, sclq; - if( (static_cast<TReal>(1.0) - cosom) > static_cast<TReal>(0.0001)) // 0.0001 -> some epsillon - { - // Standard case (slerp) - TReal omega, sinom; - omega = std::acos( cosom); // extract theta from dot product's cos theta - sinom = std::sin( omega); - sclp = std::sin( (static_cast<TReal>(1.0) - pFactor) * omega) / sinom; - sclq = std::sin( pFactor * omega) / sinom; - } else - { - // Very close, do linear interp (because it's faster) - sclp = static_cast<TReal>(1.0) - pFactor; - sclq = pFactor; - } - - pOut.x = sclp * pStart.x + sclq * end.x; - pOut.y = sclp * pStart.y + sclq * end.y; - pOut.z = sclp * pStart.z + sclq * end.z; - pOut.w = sclp * pStart.w + sclq * end.w; -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline aiQuaterniont<TReal>& aiQuaterniont<TReal>::Normalize() -{ - // compute the magnitude and divide through it - const TReal mag = std::sqrt(x*x + y*y + z*z + w*w); - if (mag) - { - const TReal invMag = static_cast<TReal>(1.0)/mag; - x *= invMag; - y *= invMag; - z *= invMag; - w *= invMag; - } - return *this; -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline aiQuaterniont<TReal> aiQuaterniont<TReal>::operator* (const aiQuaterniont& t) const -{ - return aiQuaterniont(w*t.w - x*t.x - y*t.y - z*t.z, - w*t.x + x*t.w + y*t.z - z*t.y, - w*t.y + y*t.w + z*t.x - x*t.z, - w*t.z + z*t.w + x*t.y - y*t.x); -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline aiQuaterniont<TReal>& aiQuaterniont<TReal>::Conjugate () -{ - x = -x; - y = -y; - z = -z; - return *this; -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline aiVector3t<TReal> aiQuaterniont<TReal>::Rotate (const aiVector3t<TReal>& v) -{ - aiQuaterniont q2(0.f,v.x,v.y,v.z), q = *this, qinv = q; - qinv.Conjugate(); - - q = q*q2*qinv; - return aiVector3t<TReal>(q.x,q.y,q.z); -} - -#endif -#endif // AI_QUATERNION_INL_INC diff --git a/thirdparty/assimp/include/assimp/scene.h b/thirdparty/assimp/include/assimp/scene.h deleted file mode 100644 index b76709eb15..0000000000 --- a/thirdparty/assimp/include/assimp/scene.h +++ /dev/null @@ -1,429 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file scene.h - * @brief Defines the data structures in which the imported scene is returned. - */ -#pragma once -#ifndef AI_SCENE_H_INC -#define AI_SCENE_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> -#include <assimp/texture.h> -#include <assimp/mesh.h> -#include <assimp/light.h> -#include <assimp/camera.h> -#include <assimp/material.h> -#include <assimp/anim.h> -#include <assimp/metadata.h> - -#ifdef __cplusplus -# include <cstdlib> -extern "C" { -#endif - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wattributes" -#endif - -// ------------------------------------------------------------------------------- -/** - * A node in the imported hierarchy. - * - * Each node has name, a parent node (except for the root node), - * a transformation relative to its parent and possibly several child nodes. - * Simple file formats don't support hierarchical structures - for these formats - * the imported scene does consist of only a single root node without children. - */ -// ------------------------------------------------------------------------------- -struct ASSIMP_API aiNode -{ - /** The name of the node. - * - * The name might be empty (length of zero) but all nodes which - * need to be referenced by either bones or animations are named. - * Multiple nodes may have the same name, except for nodes which are referenced - * by bones (see #aiBone and #aiMesh::mBones). Their names *must* be unique. - * - * Cameras and lights reference a specific node by name - if there - * are multiple nodes with this name, they are assigned to each of them. - * <br> - * There are no limitations with regard to the characters contained in - * the name string as it is usually taken directly from the source file. - * - * Implementations should be able to handle tokens such as whitespace, tabs, - * line feeds, quotation marks, ampersands etc. - * - * Sometimes assimp introduces new nodes not present in the source file - * into the hierarchy (usually out of necessity because sometimes the - * source hierarchy format is simply not compatible). Their names are - * surrounded by @verbatim <> @endverbatim e.g. - * @verbatim<DummyRootNode> @endverbatim. - */ - C_STRUCT aiString mName; - - /** The transformation relative to the node's parent. */ - C_STRUCT aiMatrix4x4 mTransformation; - - /** Parent node. nullptr if this node is the root node. */ - C_STRUCT aiNode* mParent; - - /** The number of child nodes of this node. */ - unsigned int mNumChildren; - - /** The child nodes of this node. nullptr if mNumChildren is 0. */ - C_STRUCT aiNode** mChildren; - - /** The number of meshes of this node. */ - unsigned int mNumMeshes; - - /** The meshes of this node. Each entry is an index into the - * mesh list of the #aiScene. - */ - unsigned int* mMeshes; - - /** Metadata associated with this node or nullptr if there is no metadata. - * Whether any metadata is generated depends on the source file format. See the - * @link importer_notes @endlink page for more information on every source file - * format. Importers that don't document any metadata don't write any. - */ - C_STRUCT aiMetadata* mMetaData; - -#ifdef __cplusplus - /** Constructor */ - aiNode(); - - /** Construction from a specific name */ - explicit aiNode(const std::string& name); - - /** Destructor */ - ~aiNode(); - - /** Searches for a node with a specific name, beginning at this - * nodes. Normally you will call this method on the root node - * of the scene. - * - * @param name Name to search for - * @return nullptr or a valid Node if the search was successful. - */ - inline - const aiNode* FindNode(const aiString& name) const { - return FindNode(name.data); - } - - inline - aiNode* FindNode(const aiString& name) { - return FindNode(name.data); - } - - const aiNode* FindNode(const char* name) const; - - aiNode* FindNode(const char* name); - - /** - * @brief Will add new children. - * @param numChildren Number of children to add. - * @param children The array with pointers showing to the children. - */ - void addChildren(unsigned int numChildren, aiNode **children); -#endif // __cplusplus -}; - -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -// ------------------------------------------------------------------------------- -/** - * Specifies that the scene data structure that was imported is not complete. - * This flag bypasses some internal validations and allows the import - * of animation skeletons, material libraries or camera animation paths - * using Assimp. Most applications won't support such data. - */ -#define AI_SCENE_FLAGS_INCOMPLETE 0x1 - -/** - * This flag is set by the validation postprocess-step (aiPostProcess_ValidateDS) - * if the validation is successful. In a validated scene you can be sure that - * any cross references in the data structure (e.g. vertex indices) are valid. - */ -#define AI_SCENE_FLAGS_VALIDATED 0x2 - -/** - * This flag is set by the validation postprocess-step (aiPostProcess_ValidateDS) - * if the validation is successful but some issues have been found. - * This can for example mean that a texture that does not exist is referenced - * by a material or that the bone weights for a vertex don't sum to 1.0 ... . - * In most cases you should still be able to use the import. This flag could - * be useful for applications which don't capture Assimp's log output. - */ -#define AI_SCENE_FLAGS_VALIDATION_WARNING 0x4 - -/** - * This flag is currently only set by the aiProcess_JoinIdenticalVertices step. - * It indicates that the vertices of the output meshes aren't in the internal - * verbose format anymore. In the verbose format all vertices are unique, - * no vertex is ever referenced by more than one face. - */ -#define AI_SCENE_FLAGS_NON_VERBOSE_FORMAT 0x8 - - /** - * Denotes pure height-map terrain data. Pure terrains usually consist of quads, - * sometimes triangles, in a regular grid. The x,y coordinates of all vertex - * positions refer to the x,y coordinates on the terrain height map, the z-axis - * stores the elevation at a specific point. - * - * TER (Terragen) and HMP (3D Game Studio) are height map formats. - * @note Assimp is probably not the best choice for loading *huge* terrains - - * fully triangulated data takes extremely much free store and should be avoided - * as long as possible (typically you'll do the triangulation when you actually - * need to render it). - */ -#define AI_SCENE_FLAGS_TERRAIN 0x10 - - /** - * Specifies that the scene data can be shared between structures. For example: - * one vertex in few faces. \ref AI_SCENE_FLAGS_NON_VERBOSE_FORMAT can not be - * used for this because \ref AI_SCENE_FLAGS_NON_VERBOSE_FORMAT has internal - * meaning about postprocessing steps. - */ -#define AI_SCENE_FLAGS_ALLOW_SHARED 0x20 - -// ------------------------------------------------------------------------------- -/** The root structure of the imported data. - * - * Everything that was imported from the given file can be accessed from here. - * Objects of this class are generally maintained and owned by Assimp, not - * by the caller. You shouldn't want to instance it, nor should you ever try to - * delete a given scene on your own. - */ -// ------------------------------------------------------------------------------- -struct aiScene -{ - /** Any combination of the AI_SCENE_FLAGS_XXX flags. By default - * this value is 0, no flags are set. Most applications will - * want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE - * bit set. - */ - unsigned int mFlags; - - /** The root node of the hierarchy. - * - * There will always be at least the root node if the import - * was successful (and no special flags have been set). - * Presence of further nodes depends on the format and content - * of the imported file. - */ - C_STRUCT aiNode* mRootNode; - - /** The number of meshes in the scene. */ - unsigned int mNumMeshes; - - /** The array of meshes. - * - * Use the indices given in the aiNode structure to access - * this array. The array is mNumMeshes in size. If the - * AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always - * be at least ONE material. - */ - C_STRUCT aiMesh** mMeshes; - - /** The number of materials in the scene. */ - unsigned int mNumMaterials; - - /** The array of materials. - * - * Use the index given in each aiMesh structure to access this - * array. The array is mNumMaterials in size. If the - * AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always - * be at least ONE material. - */ - C_STRUCT aiMaterial** mMaterials; - - /** The number of animations in the scene. */ - unsigned int mNumAnimations; - - /** The array of animations. - * - * All animations imported from the given file are listed here. - * The array is mNumAnimations in size. - */ - C_STRUCT aiAnimation** mAnimations; - - /** The number of textures embedded into the file */ - unsigned int mNumTextures; - - /** The array of embedded textures. - * - * Not many file formats embed their textures into the file. - * An example is Quake's MDL format (which is also used by - * some GameStudio versions) - */ - C_STRUCT aiTexture** mTextures; - - /** The number of light sources in the scene. Light sources - * are fully optional, in most cases this attribute will be 0 - */ - unsigned int mNumLights; - - /** The array of light sources. - * - * All light sources imported from the given file are - * listed here. The array is mNumLights in size. - */ - C_STRUCT aiLight** mLights; - - /** The number of cameras in the scene. Cameras - * are fully optional, in most cases this attribute will be 0 - */ - unsigned int mNumCameras; - - /** The array of cameras. - * - * All cameras imported from the given file are listed here. - * The array is mNumCameras in size. The first camera in the - * array (if existing) is the default camera view into - * the scene. - */ - C_STRUCT aiCamera** mCameras; - - /** - * @brief The global metadata assigned to the scene itself. - * - * This data contains global metadata which belongs to the scene like - * unit-conversions, versions, vendors or other model-specific data. This - * can be used to store format-specific metadata as well. - */ - C_STRUCT aiMetadata* mMetaData; - - -#ifdef __cplusplus - - //! Default constructor - set everything to 0/nullptr - ASSIMP_API aiScene(); - - //! Destructor - ASSIMP_API ~aiScene(); - - //! Check whether the scene contains meshes - //! Unless no special scene flags are set this will always be true. - inline bool HasMeshes() const { - return mMeshes != nullptr && mNumMeshes > 0; - } - - //! Check whether the scene contains materials - //! Unless no special scene flags are set this will always be true. - inline bool HasMaterials() const { - return mMaterials != nullptr && mNumMaterials > 0; - } - - //! Check whether the scene contains lights - inline bool HasLights() const { - return mLights != nullptr && mNumLights > 0; - } - - //! Check whether the scene contains textures - inline bool HasTextures() const { - return mTextures != nullptr && mNumTextures > 0; - } - - //! Check whether the scene contains cameras - inline bool HasCameras() const { - return mCameras != nullptr && mNumCameras > 0; - } - - //! Check whether the scene contains animations - inline bool HasAnimations() const { - return mAnimations != nullptr && mNumAnimations > 0; - } - - //! Returns a short filename from a full path - static const char* GetShortFilename(const char* filename) { - const char* lastSlash = strrchr(filename, '/'); - if (lastSlash == nullptr) { - lastSlash = strrchr(filename, '\\'); - } - const char* shortFilename = lastSlash != nullptr ? lastSlash + 1 : filename; - return shortFilename; - } - - //! Returns an embedded texture - const aiTexture* GetEmbeddedTexture(const char* filename) const { - // lookup using texture ID (if referenced like: "*1", "*2", etc.) - if ('*' == *filename) { - int index = std::atoi(filename + 1); - if (0 > index || mNumTextures <= static_cast<unsigned>(index)) - return nullptr; - return mTextures[index]; - } - // lookup using filename - const char* shortFilename = GetShortFilename(filename); - for (unsigned int i = 0; i < mNumTextures; i++) { - const char* shortTextureFilename = GetShortFilename(mTextures[i]->mFilename.C_Str()); - if (strcmp(shortTextureFilename, shortFilename) == 0) { - return mTextures[i]; - } - } - return nullptr; - } -#endif // __cplusplus - - /** Internal data, do not touch */ -#ifdef __cplusplus - void* mPrivate; -#else - char* mPrivate; -#endif - -}; - -#ifdef __cplusplus -} //! namespace Assimp -#endif - -#endif // AI_SCENE_H_INC diff --git a/thirdparty/assimp/include/assimp/texture.h b/thirdparty/assimp/include/assimp/texture.h deleted file mode 100644 index 0867659f46..0000000000 --- a/thirdparty/assimp/include/assimp/texture.h +++ /dev/null @@ -1,226 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file texture.h - * @brief Defines texture helper structures for the library - * - * Used for file formats which embed their textures into the model file. - * Supported are both normal textures, which are stored as uncompressed - * pixels, and "compressed" textures, which are stored in a file format - * such as PNG or TGA. - */ -#pragma once -#ifndef AI_TEXTURE_H_INC -#define AI_TEXTURE_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#include <assimp/types.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// -------------------------------------------------------------------------------- - -/** \def AI_EMBEDDED_TEXNAME_PREFIX - * \ref AI_MAKE_EMBEDDED_TEXNAME - */ -#ifndef AI_EMBEDDED_TEXNAME_PREFIX -# define AI_EMBEDDED_TEXNAME_PREFIX "*" -#endif - -/** @def AI_MAKE_EMBEDDED_TEXNAME - * Used to build the reserved path name used by the material system to - * reference textures that are embedded into their corresponding - * model files. The parameter specifies the index of the texture - * (zero-based, in the aiScene::mTextures array) - */ -#if (!defined AI_MAKE_EMBEDDED_TEXNAME) -# define AI_MAKE_EMBEDDED_TEXNAME(_n_) AI_EMBEDDED_TEXNAME_PREFIX # _n_ -#endif - -#include "./Compiler/pushpack1.h" - -// -------------------------------------------------------------------------------- -/** @brief Helper structure to represent a texel in a ARGB8888 format -* -* Used by aiTexture. -*/ -struct aiTexel { - unsigned char b,g,r,a; - -#ifdef __cplusplus - //! Comparison operator - bool operator== (const aiTexel& other) const - { - return b == other.b && r == other.r && - g == other.g && a == other.a; - } - - //! Inverse comparison operator - bool operator!= (const aiTexel& other) const - { - return b != other.b || r != other.r || - g != other.g || a != other.a; - } - - //! Conversion to a floating-point 4d color - operator aiColor4D() const - { - return aiColor4D(r/255.f,g/255.f,b/255.f,a/255.f); - } -#endif // __cplusplus - -} PACK_STRUCT; - -#include "./Compiler/poppack1.h" - -#define HINTMAXTEXTURELEN 9 - -// -------------------------------------------------------------------------------- -/** Helper structure to describe an embedded texture - * - * Normally textures are contained in external files but some file formats embed - * them directly in the model file. There are two types of embedded textures: - * 1. Uncompressed textures. The color data is given in an uncompressed format. - * 2. Compressed textures stored in a file format like png or jpg. The raw file - * bytes are given so the application must utilize an image decoder (e.g. DevIL) to - * get access to the actual color data. - * - * Embedded textures are referenced from materials using strings like "*0", "*1", etc. - * as the texture paths (a single asterisk character followed by the - * zero-based index of the texture in the aiScene::mTextures array). - */ -struct aiTexture { - /** Width of the texture, in pixels - * - * If mHeight is zero the texture is compressed in a format - * like JPEG. In this case mWidth specifies the size of the - * memory area pcData is pointing to, in bytes. - */ - unsigned int mWidth; - - /** Height of the texture, in pixels - * - * If this value is zero, pcData points to an compressed texture - * in any format (e.g. JPEG). - */ - unsigned int mHeight; - - /** A hint from the loader to make it easier for applications - * to determine the type of embedded textures. - * - * If mHeight != 0 this member is show how data is packed. Hint will consist of - * two parts: channel order and channel bitness (count of the bits for every - * color channel). For simple parsing by the viewer it's better to not omit - * absent color channel and just use 0 for bitness. For example: - * 1. Image contain RGBA and 8 bit per channel, achFormatHint == "rgba8888"; - * 2. Image contain ARGB and 8 bit per channel, achFormatHint == "argb8888"; - * 3. Image contain RGB and 5 bit for R and B channels and 6 bit for G channel, achFormatHint == "rgba5650"; - * 4. One color image with B channel and 1 bit for it, achFormatHint == "rgba0010"; - * If mHeight == 0 then achFormatHint is set set to '\\0\\0\\0\\0' if the loader has no additional - * information about the texture file format used OR the - * file extension of the format without a trailing dot. If there - * are multiple file extensions for a format, the shortest - * extension is chosen (JPEG maps to 'jpg', not to 'jpeg'). - * E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case. - * The fourth character will always be '\\0'. - */ - char achFormatHint[ HINTMAXTEXTURELEN ];// 8 for string + 1 for terminator. - - /** Data of the texture. - * - * Points to an array of mWidth * mHeight aiTexel's. - * The format of the texture data is always ARGB8888 to - * make the implementation for user of the library as easy - * as possible. If mHeight = 0 this is a pointer to a memory - * buffer of size mWidth containing the compressed texture - * data. Good luck, have fun! - */ - C_STRUCT aiTexel* pcData; - - /** Texture original filename - * - * Used to get the texture reference - */ - C_STRUCT aiString mFilename; - -#ifdef __cplusplus - - //! For compressed textures (mHeight == 0): compare the - //! format hint against a given string. - //! @param s Input string. 3 characters are maximally processed. - //! Example values: "jpg", "png" - //! @return true if the given string matches the format hint - bool CheckFormat(const char* s) const { - if (nullptr == s) { - return false; - } - - return (0 == ::strncmp(achFormatHint, s, sizeof(achFormatHint))); - } - - // Construction - aiTexture() AI_NO_EXCEPT - : mWidth(0) - , mHeight(0) - , pcData(nullptr) - , mFilename() { - achFormatHint[0] = achFormatHint[1] = 0; - achFormatHint[2] = achFormatHint[3] = 0; - } - - // Destruction - ~aiTexture () { - delete[] pcData; - } -#endif -}; - - -#ifdef __cplusplus -} -#endif - -#endif // AI_TEXTURE_H_INC diff --git a/thirdparty/assimp/include/assimp/types.h b/thirdparty/assimp/include/assimp/types.h deleted file mode 100644 index e32cae331c..0000000000 --- a/thirdparty/assimp/include/assimp/types.h +++ /dev/null @@ -1,544 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file types.h - * Basic data types and primitives, such as vectors or colors. - */ -#pragma once -#ifndef AI_TYPES_H_INC -#define AI_TYPES_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -// Some runtime headers -#include <sys/types.h> -#include <stddef.h> -#include <string.h> -#include <limits.h> -#include <stdint.h> - -// Our compile configuration -#include <assimp/defs.h> - -// Some types moved to separate header due to size of operators -#include <assimp/vector3.h> -#include <assimp/vector2.h> -#include <assimp/color4.h> -#include <assimp/matrix3x3.h> -#include <assimp/matrix4x4.h> -#include <assimp/quaternion.h> - -typedef int32_t ai_int32; -typedef uint32_t ai_uint32 ; - -#ifdef __cplusplus -#include <cstring> -#include <new> // for std::nothrow_t -#include <string> // for aiString::Set(const std::string&) - -namespace Assimp { -//! @cond never -namespace Intern { - // -------------------------------------------------------------------- - /** @brief Internal helper class to utilize our internal new/delete - * routines for allocating object of this and derived classes. - * - * By doing this you can safely share class objects between Assimp - * and the application - it works even over DLL boundaries. A good - * example is the #IOSystem where the application allocates its custom - * #IOSystem, then calls #Importer::SetIOSystem(). When the Importer - * destructs, Assimp calls operator delete on the stored #IOSystem. - * If it lies on a different heap than Assimp is working with, - * the application is determined to crash. - */ - // -------------------------------------------------------------------- -#ifndef SWIG - struct ASSIMP_API AllocateFromAssimpHeap { - // http://www.gotw.ca/publications/mill15.htm - - // new/delete overload - void *operator new ( size_t num_bytes) /* throw( std::bad_alloc ) */; - void *operator new ( size_t num_bytes, const std::nothrow_t& ) throw(); - void operator delete ( void* data); - - // array new/delete overload - void *operator new[] ( size_t num_bytes) /* throw( std::bad_alloc ) */; - void *operator new[] ( size_t num_bytes, const std::nothrow_t& ) throw(); - void operator delete[] ( void* data); - - }; // struct AllocateFromAssimpHeap -#endif -} // namespace Intern - //! @endcond -} // namespace Assimp - -extern "C" { -#endif - -/** Maximum dimension for strings, ASSIMP strings are zero terminated. */ -#ifdef __cplusplus - static const size_t MAXLEN = 1024; -#else -# define MAXLEN 1024 -#endif - -// ---------------------------------------------------------------------------------- -/** Represents a plane in a three-dimensional, euclidean space -*/ -struct aiPlane { -#ifdef __cplusplus - aiPlane () AI_NO_EXCEPT : a(0.f), b(0.f), c(0.f), d(0.f) {} - aiPlane (ai_real _a, ai_real _b, ai_real _c, ai_real _d) - : a(_a), b(_b), c(_c), d(_d) {} - - aiPlane (const aiPlane& o) : a(o.a), b(o.b), c(o.c), d(o.d) {} - -#endif // !__cplusplus - - //! Plane equation - ai_real a,b,c,d; -}; // !struct aiPlane - -// ---------------------------------------------------------------------------------- -/** Represents a ray -*/ -struct aiRay { -#ifdef __cplusplus - aiRay () AI_NO_EXCEPT {} - aiRay (const aiVector3D& _pos, const aiVector3D& _dir) - : pos(_pos), dir(_dir) {} - - aiRay (const aiRay& o) : pos (o.pos), dir (o.dir) {} - -#endif // !__cplusplus - - //! Position and direction of the ray - C_STRUCT aiVector3D pos, dir; -}; // !struct aiRay - -// ---------------------------------------------------------------------------------- -/** Represents a color in Red-Green-Blue space. -*/ -struct aiColor3D -{ -#ifdef __cplusplus - aiColor3D () AI_NO_EXCEPT : r(0.0f), g(0.0f), b(0.0f) {} - aiColor3D (ai_real _r, ai_real _g, ai_real _b) : r(_r), g(_g), b(_b) {} - explicit aiColor3D (ai_real _r) : r(_r), g(_r), b(_r) {} - aiColor3D (const aiColor3D& o) : r(o.r), g(o.g), b(o.b) {} - - aiColor3D &operator=(const aiColor3D &o) { - r = o.r; - g = o.g; - b = o.b; - return *this; - } - - /** Component-wise comparison */ - // TODO: add epsilon? - bool operator == (const aiColor3D& other) const - {return r == other.r && g == other.g && b == other.b;} - - /** Component-wise inverse comparison */ - // TODO: add epsilon? - bool operator != (const aiColor3D& other) const - {return r != other.r || g != other.g || b != other.b;} - - /** Component-wise comparison */ - // TODO: add epsilon? - bool operator < (const aiColor3D& other) const { - return r < other.r || ( r == other.r && (g < other.g || (g == other.g && b < other.b ) ) ); - } - - /** Component-wise addition */ - aiColor3D operator+(const aiColor3D& c) const { - return aiColor3D(r+c.r,g+c.g,b+c.b); - } - - /** Component-wise subtraction */ - aiColor3D operator-(const aiColor3D& c) const { - return aiColor3D(r-c.r,g-c.g,b-c.b); - } - - /** Component-wise multiplication */ - aiColor3D operator*(const aiColor3D& c) const { - return aiColor3D(r*c.r,g*c.g,b*c.b); - } - - /** Multiply with a scalar */ - aiColor3D operator*(ai_real f) const { - return aiColor3D(r*f,g*f,b*f); - } - - /** Access a specific color component */ - ai_real operator[](unsigned int i) const { - return *(&r + i); - } - - /** Access a specific color component */ - ai_real& operator[](unsigned int i) { - if ( 0 == i ) { - return r; - } else if ( 1 == i ) { - return g; - } else if ( 2 == i ) { - return b; - } - return r; - } - - /** Check whether a color is black */ - bool IsBlack() const { - static const ai_real epsilon = ai_real(10e-3); - return std::fabs( r ) < epsilon && std::fabs( g ) < epsilon && std::fabs( b ) < epsilon; - } - -#endif // !__cplusplus - - //! Red, green and blue color values - ai_real r, g, b; -}; // !struct aiColor3D - -// ---------------------------------------------------------------------------------- -/** Represents an UTF-8 string, zero byte terminated. - * - * The character set of an aiString is explicitly defined to be UTF-8. This Unicode - * transformation was chosen in the belief that most strings in 3d files are limited - * to ASCII, thus the character set needed to be strictly ASCII compatible. - * - * Most text file loaders provide proper Unicode input file handling, special unicode - * characters are correctly transcoded to UTF8 and are kept throughout the libraries' - * import pipeline. - * - * For most applications, it will be absolutely sufficient to interpret the - * aiString as ASCII data and work with it as one would work with a plain char*. - * Windows users in need of proper support for i.e asian characters can use the - * MultiByteToWideChar(), WideCharToMultiByte() WinAPI functionality to convert the - * UTF-8 strings to their working character set (i.e. MBCS, WideChar). - * - * We use this representation instead of std::string to be C-compatible. The - * (binary) length of such a string is limited to MAXLEN characters (including the - * the terminating zero). -*/ -struct aiString -{ -#ifdef __cplusplus - /** Default constructor, the string is set to have zero length */ - aiString() AI_NO_EXCEPT - : length( 0 ) { - data[0] = '\0'; - -#ifdef ASSIMP_BUILD_DEBUG - // Debug build: overwrite the string on its full length with ESC (27) - memset(data+1,27,MAXLEN-1); -#endif - } - - /** Copy constructor */ - aiString(const aiString& rOther) - : length(rOther.length) - { - // Crop the string to the maximum length - length = length>=MAXLEN?MAXLEN-1:length; - memcpy( data, rOther.data, length); - data[length] = '\0'; - } - - /** Constructor from std::string */ - explicit aiString(const std::string& pString) : - length( (ai_uint32) pString.length()) - { - length = length>=MAXLEN?MAXLEN-1:length; - memcpy( data, pString.c_str(), length); - data[length] = '\0'; - } - - /** Copy a std::string to the aiString */ - void Set( const std::string& pString) { - if( pString.length() > MAXLEN - 1) { - return; - } - length = (ai_uint32)pString.length(); - memcpy( data, pString.c_str(), length); - data[length] = 0; - } - - /** Copy a const char* to the aiString */ - void Set( const char* sz) { - const ai_int32 len = (ai_uint32) ::strlen(sz); - if( len > (ai_int32)MAXLEN - 1) { - return; - } - length = len; - memcpy( data, sz, len); - data[len] = 0; - } - - - /** Assignment operator */ - aiString& operator = (const aiString &rOther) { - if (this == &rOther) { - return *this; - } - - length = rOther.length;; - memcpy( data, rOther.data, length); - data[length] = '\0'; - return *this; - } - - - /** Assign a const char* to the string */ - aiString& operator = (const char* sz) { - Set(sz); - return *this; - } - - /** Assign a cstd::string to the string */ - aiString& operator = ( const std::string& pString) { - Set(pString); - return *this; - } - - /** Comparison operator */ - bool operator==(const aiString& other) const { - return (length == other.length && 0 == memcmp(data,other.data,length)); - } - - /** Inverse comparison operator */ - bool operator!=(const aiString& other) const { - return (length != other.length || 0 != memcmp(data,other.data,length)); - } - - /** Append a string to the string */ - void Append (const char* app) { - const ai_uint32 len = (ai_uint32) ::strlen(app); - if (!len) { - return; - } - if (length + len >= MAXLEN) { - return; - } - - memcpy(&data[length],app,len+1); - length += len; - } - - /** Clear the string - reset its length to zero */ - void Clear () { - length = 0; - data[0] = '\0'; - -#ifdef ASSIMP_BUILD_DEBUG - // Debug build: overwrite the string on its full length with ESC (27) - memset(data+1,27,MAXLEN-1); -#endif - } - - /** Returns a pointer to the underlying zero-terminated array of characters */ - const char* C_Str() const { - return data; - } - -#endif // !__cplusplus - - /** Binary length of the string excluding the terminal 0. This is NOT the - * logical length of strings containing UTF-8 multi-byte sequences! It's - * the number of bytes from the beginning of the string to its end.*/ - ai_uint32 length; - - /** String buffer. Size limit is MAXLEN */ - char data[MAXLEN]; -} ; // !struct aiString - - -// ---------------------------------------------------------------------------------- -/** Standard return type for some library functions. - * Rarely used, and if, mostly in the C API. - */ -typedef enum aiReturn -{ - /** Indicates that a function was successful */ - aiReturn_SUCCESS = 0x0, - - /** Indicates that a function failed */ - aiReturn_FAILURE = -0x1, - - /** Indicates that not enough memory was available - * to perform the requested operation - */ - aiReturn_OUTOFMEMORY = -0x3, - - /** @cond never - * Force 32-bit size enum - */ - _AI_ENFORCE_ENUM_SIZE = 0x7fffffff - - /// @endcond -} aiReturn; // !enum aiReturn - -// just for backwards compatibility, don't use these constants anymore -#define AI_SUCCESS aiReturn_SUCCESS -#define AI_FAILURE aiReturn_FAILURE -#define AI_OUTOFMEMORY aiReturn_OUTOFMEMORY - -// ---------------------------------------------------------------------------------- -/** Seek origins (for the virtual file system API). - * Much cooler than using SEEK_SET, SEEK_CUR or SEEK_END. - */ -enum aiOrigin -{ - /** Beginning of the file */ - aiOrigin_SET = 0x0, - - /** Current position of the file pointer */ - aiOrigin_CUR = 0x1, - - /** End of the file, offsets must be negative */ - aiOrigin_END = 0x2, - - /** @cond never - * Force 32-bit size enum - */ - _AI_ORIGIN_ENFORCE_ENUM_SIZE = 0x7fffffff - - /// @endcond -}; // !enum aiOrigin - -// ---------------------------------------------------------------------------------- -/** @brief Enumerates predefined log streaming destinations. - * Logging to these streams can be enabled with a single call to - * #LogStream::createDefaultStream. - */ -enum aiDefaultLogStream -{ - /** Stream the log to a file */ - aiDefaultLogStream_FILE = 0x1, - - /** Stream the log to std::cout */ - aiDefaultLogStream_STDOUT = 0x2, - - /** Stream the log to std::cerr */ - aiDefaultLogStream_STDERR = 0x4, - - /** MSVC only: Stream the log the the debugger - * (this relies on OutputDebugString from the Win32 SDK) - */ - aiDefaultLogStream_DEBUGGER = 0x8, - - /** @cond never - * Force 32-bit size enum - */ - _AI_DLS_ENFORCE_ENUM_SIZE = 0x7fffffff - /// @endcond -}; // !enum aiDefaultLogStream - -// just for backwards compatibility, don't use these constants anymore -#define DLS_FILE aiDefaultLogStream_FILE -#define DLS_STDOUT aiDefaultLogStream_STDOUT -#define DLS_STDERR aiDefaultLogStream_STDERR -#define DLS_DEBUGGER aiDefaultLogStream_DEBUGGER - -// ---------------------------------------------------------------------------------- -/** Stores the memory requirements for different components (e.g. meshes, materials, - * animations) of an import. All sizes are in bytes. - * @see Importer::GetMemoryRequirements() -*/ -struct aiMemoryInfo -{ -#ifdef __cplusplus - - /** Default constructor */ - aiMemoryInfo() AI_NO_EXCEPT - : textures (0) - , materials (0) - , meshes (0) - , nodes (0) - , animations (0) - , cameras (0) - , lights (0) - , total (0) - {} - -#endif - - /** Storage allocated for texture data */ - unsigned int textures; - - /** Storage allocated for material data */ - unsigned int materials; - - /** Storage allocated for mesh data */ - unsigned int meshes; - - /** Storage allocated for node data */ - unsigned int nodes; - - /** Storage allocated for animation data */ - unsigned int animations; - - /** Storage allocated for camera data */ - unsigned int cameras; - - /** Storage allocated for light data */ - unsigned int lights; - - /** Total storage allocated for the full import. */ - unsigned int total; -}; // !struct aiMemoryInfo - -#ifdef __cplusplus -} -#endif //! __cplusplus - -// Include implementation files -#include "vector2.inl" -#include "vector3.inl" -#include "color4.inl" -#include "quaternion.inl" -#include "matrix3x3.inl" -#include "matrix4x4.inl" - -#endif // AI_TYPES_H_INC diff --git a/thirdparty/assimp/include/assimp/vector2.h b/thirdparty/assimp/include/assimp/vector2.h deleted file mode 100644 index c8b1ebbbc2..0000000000 --- a/thirdparty/assimp/include/assimp/vector2.h +++ /dev/null @@ -1,111 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file vector2.h - * @brief 2D vector structure, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_VECTOR2D_H_INC -#define AI_VECTOR2D_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -# include <cmath> -#else -# include <math.h> -#endif - -#include "defs.h" - -// ---------------------------------------------------------------------------------- -/** Represents a two-dimensional vector. - */ - -#ifdef __cplusplus -template <typename TReal> -class aiVector2t { -public: - aiVector2t () : x(), y() {} - aiVector2t (TReal _x, TReal _y) : x(_x), y(_y) {} - explicit aiVector2t (TReal _xyz) : x(_xyz), y(_xyz) {} - aiVector2t (const aiVector2t& o) = default; - - void Set( TReal pX, TReal pY); - TReal SquareLength() const ; - TReal Length() const ; - aiVector2t& Normalize(); - - const aiVector2t& operator += (const aiVector2t& o); - const aiVector2t& operator -= (const aiVector2t& o); - const aiVector2t& operator *= (TReal f); - const aiVector2t& operator /= (TReal f); - - TReal operator[](unsigned int i) const; - - bool operator== (const aiVector2t& other) const; - bool operator!= (const aiVector2t& other) const; - - bool Equal(const aiVector2t& other, TReal epsilon = 1e-6) const; - - aiVector2t& operator= (TReal f); - const aiVector2t SymMul(const aiVector2t& o); - - template <typename TOther> - operator aiVector2t<TOther> () const; - - TReal x, y; -}; - -typedef aiVector2t<ai_real> aiVector2D; - -#else - -struct aiVector2D { - ai_real x, y; -}; - -#endif // __cplusplus - -#endif // AI_VECTOR2D_H_INC diff --git a/thirdparty/assimp/include/assimp/vector2.inl b/thirdparty/assimp/include/assimp/vector2.inl deleted file mode 100644 index 4bbf432ff8..0000000000 --- a/thirdparty/assimp/include/assimp/vector2.inl +++ /dev/null @@ -1,248 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file vector2.inl - * @brief Inline implementation of aiVector2t<TReal> operators - */ -#pragma once -#ifndef AI_VECTOR2D_INL_INC -#define AI_VECTOR2D_INL_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -#include <assimp/vector2.h> - -#include <cmath> - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -template <typename TOther> -aiVector2t<TReal>::operator aiVector2t<TOther> () const { - return aiVector2t<TOther>(static_cast<TOther>(x),static_cast<TOther>(y)); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -void aiVector2t<TReal>::Set( TReal pX, TReal pY) { - x = pX; y = pY; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -TReal aiVector2t<TReal>::SquareLength() const { - return x*x + y*y; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -TReal aiVector2t<TReal>::Length() const { - return std::sqrt( SquareLength()); -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -aiVector2t<TReal>& aiVector2t<TReal>::Normalize() { - *this /= Length(); - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -const aiVector2t<TReal>& aiVector2t<TReal>::operator += (const aiVector2t& o) { - x += o.x; y += o.y; - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -const aiVector2t<TReal>& aiVector2t<TReal>::operator -= (const aiVector2t& o) { - x -= o.x; y -= o.y; - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -const aiVector2t<TReal>& aiVector2t<TReal>::operator *= (TReal f) { - x *= f; y *= f; - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -const aiVector2t<TReal>& aiVector2t<TReal>::operator /= (TReal f) { - x /= f; y /= f; - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -TReal aiVector2t<TReal>::operator[](unsigned int i) const { - switch (i) { - case 0: - return x; - case 1: - return y; - default: - break; - - } - return x; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -bool aiVector2t<TReal>::operator== (const aiVector2t& other) const { - return x == other.x && y == other.y; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -bool aiVector2t<TReal>::operator!= (const aiVector2t& other) const { - return x != other.x || y != other.y; -} - -// --------------------------------------------------------------------------- -template<typename TReal> -inline -bool aiVector2t<TReal>::Equal(const aiVector2t& other, TReal epsilon) const { - return - std::abs(x - other.x) <= epsilon && - std::abs(y - other.y) <= epsilon; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -aiVector2t<TReal>& aiVector2t<TReal>::operator= (TReal f) { - x = y = f; - return *this; -} - -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -inline -const aiVector2t<TReal> aiVector2t<TReal>::SymMul(const aiVector2t& o) { - return aiVector2t(x*o.x,y*o.y); -} - - -// ------------------------------------------------------------------------------------------------ -// symmetric addition -template <typename TReal> -inline -aiVector2t<TReal> operator + (const aiVector2t<TReal>& v1, const aiVector2t<TReal>& v2) { - return aiVector2t<TReal>( v1.x + v2.x, v1.y + v2.y); -} - -// ------------------------------------------------------------------------------------------------ -// symmetric subtraction -template <typename TReal> -inline -aiVector2t<TReal> operator - (const aiVector2t<TReal>& v1, const aiVector2t<TReal>& v2) { - return aiVector2t<TReal>( v1.x - v2.x, v1.y - v2.y); -} - -// ------------------------------------------------------------------------------------------------ -// scalar product -template <typename TReal> -inline -TReal operator * (const aiVector2t<TReal>& v1, const aiVector2t<TReal>& v2) { - return v1.x*v2.x + v1.y*v2.y; -} - -// ------------------------------------------------------------------------------------------------ -// scalar multiplication -template <typename TReal> -inline -aiVector2t<TReal> operator * ( TReal f, const aiVector2t<TReal>& v) { - return aiVector2t<TReal>( f*v.x, f*v.y); -} - -// ------------------------------------------------------------------------------------------------ -// and the other way around -template <typename TReal> -inline -aiVector2t<TReal> operator * ( const aiVector2t<TReal>& v, TReal f) { - return aiVector2t<TReal>( f*v.x, f*v.y); -} - -// ------------------------------------------------------------------------------------------------ -// scalar division -template <typename TReal> -inline -aiVector2t<TReal> operator / ( const aiVector2t<TReal>& v, TReal f) { - return v * (1/f); -} - -// ------------------------------------------------------------------------------------------------ -// vector division -template <typename TReal> -inline -aiVector2t<TReal> operator / ( const aiVector2t<TReal>& v, const aiVector2t<TReal>& v2) { - return aiVector2t<TReal>(v.x / v2.x,v.y / v2.y); -} - -// ------------------------------------------------------------------------------------------------ -// vector negation -template <typename TReal> -inline -aiVector2t<TReal> operator - ( const aiVector2t<TReal>& v) { - return aiVector2t<TReal>( -v.x, -v.y); -} - -#endif - -#endif // AI_VECTOR2D_INL_INC diff --git a/thirdparty/assimp/include/assimp/vector3.h b/thirdparty/assimp/include/assimp/vector3.h deleted file mode 100644 index fffeb12ad7..0000000000 --- a/thirdparty/assimp/include/assimp/vector3.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ -/** @file vector3.h - * @brief 3D vector structure, including operators when compiling in C++ - */ -#pragma once -#ifndef AI_VECTOR3D_H_INC -#define AI_VECTOR3D_H_INC - -#ifdef __GNUC__ -# pragma GCC system_header -#endif - -#ifdef __cplusplus -# include <cmath> -#else -# include <math.h> -#endif - -#include <assimp/defs.h> - -#ifdef __cplusplus - -template<typename TReal> class aiMatrix3x3t; -template<typename TReal> class aiMatrix4x4t; - -// --------------------------------------------------------------------------- -/** Represents a three-dimensional vector. */ -template <typename TReal> -class aiVector3t { -public: - aiVector3t() AI_NO_EXCEPT : x(), y(), z() {} - aiVector3t(TReal _x, TReal _y, TReal _z) : x(_x), y(_y), z(_z) {} - explicit aiVector3t (TReal _xyz ) : x(_xyz), y(_xyz), z(_xyz) {} - aiVector3t( const aiVector3t& o ) = default; - - // combined operators - const aiVector3t& operator += (const aiVector3t& o); - const aiVector3t& operator -= (const aiVector3t& o); - const aiVector3t& operator *= (TReal f); - const aiVector3t& operator /= (TReal f); - - // transform vector by matrix - aiVector3t& operator *= (const aiMatrix3x3t<TReal>& mat); - aiVector3t& operator *= (const aiMatrix4x4t<TReal>& mat); - - // access a single element - TReal operator[](unsigned int i) const; - TReal& operator[](unsigned int i); - - // comparison - bool operator== (const aiVector3t& other) const; - bool operator!= (const aiVector3t& other) const; - bool operator < (const aiVector3t& other) const; - - bool Equal(const aiVector3t& other, TReal epsilon = 1e-6) const; - - template <typename TOther> - operator aiVector3t<TOther> () const; - - /** @brief Set the components of a vector - * @param pX X component - * @param pY Y component - * @param pZ Z component */ - void Set( TReal pX, TReal pY, TReal pZ); - - /** @brief Get the squared length of the vector - * @return Square length */ - TReal SquareLength() const; - - /** @brief Get the length of the vector - * @return length */ - TReal Length() const; - - - /** @brief Normalize the vector */ - aiVector3t& Normalize(); - - /** @brief Normalize the vector with extra check for zero vectors */ - aiVector3t& NormalizeSafe(); - - /** @brief Componentwise multiplication of two vectors - * - * Note that vec*vec yields the dot product. - * @param o Second factor */ - const aiVector3t SymMul(const aiVector3t& o); - - TReal x, y, z; -}; - - -typedef aiVector3t<ai_real> aiVector3D; - -#else - -struct aiVector3D { - ai_real x, y, z; -}; - -#endif // __cplusplus - -#ifdef __cplusplus - -#endif // __cplusplus - -#endif // AI_VECTOR3D_H_INC diff --git a/thirdparty/assimp/include/assimp/vector3.inl b/thirdparty/assimp/include/assimp/vector3.inl deleted file mode 100644 index 6682d3b32c..0000000000 --- a/thirdparty/assimp/include/assimp/vector3.inl +++ /dev/null @@ -1,309 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file vector3.inl - * @brief Inline implementation of aiVector3t<TReal> operators - */ -#pragma once -#ifndef AI_VECTOR3D_INL_INC -#define AI_VECTOR3D_INL_INC - -#ifdef __cplusplus -#include <assimp/vector3.h> - -#include <cmath> - -// ------------------------------------------------------------------------------------------------ -/** Transformation of a vector by a 3x3 matrix */ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator * (const aiMatrix3x3t<TReal>& pMatrix, const aiVector3t<TReal>& pVector) { - aiVector3t<TReal> res; - res.x = pMatrix.a1 * pVector.x + pMatrix.a2 * pVector.y + pMatrix.a3 * pVector.z; - res.y = pMatrix.b1 * pVector.x + pMatrix.b2 * pVector.y + pMatrix.b3 * pVector.z; - res.z = pMatrix.c1 * pVector.x + pMatrix.c2 * pVector.y + pMatrix.c3 * pVector.z; - return res; -} - -// ------------------------------------------------------------------------------------------------ -/** Transformation of a vector by a 4x4 matrix */ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator * (const aiMatrix4x4t<TReal>& pMatrix, const aiVector3t<TReal>& pVector) { - aiVector3t<TReal> res; - res.x = pMatrix.a1 * pVector.x + pMatrix.a2 * pVector.y + pMatrix.a3 * pVector.z + pMatrix.a4; - res.y = pMatrix.b1 * pVector.x + pMatrix.b2 * pVector.y + pMatrix.b3 * pVector.z + pMatrix.b4; - res.z = pMatrix.c1 * pVector.x + pMatrix.c2 * pVector.y + pMatrix.c3 * pVector.z + pMatrix.c4; - return res; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -template <typename TOther> -aiVector3t<TReal>::operator aiVector3t<TOther> () const { - return aiVector3t<TOther>(static_cast<TOther>(x),static_cast<TOther>(y),static_cast<TOther>(z)); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -void aiVector3t<TReal>::Set( TReal pX, TReal pY, TReal pZ) { - x = pX; - y = pY; - z = pZ; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal aiVector3t<TReal>::SquareLength() const { - return x*x + y*y + z*z; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal aiVector3t<TReal>::Length() const { - return std::sqrt( SquareLength()); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal>& aiVector3t<TReal>::Normalize() { - *this /= Length(); - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal>& aiVector3t<TReal>::NormalizeSafe() { - TReal len = Length(); - if ( len > static_cast< TReal >( 0 ) ) { - *this /= len; - } - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiVector3t<TReal>& aiVector3t<TReal>::operator += (const aiVector3t<TReal>& o) { - x += o.x; - y += o.y; - z += o.z; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiVector3t<TReal>& aiVector3t<TReal>::operator -= (const aiVector3t<TReal>& o) { - x -= o.x; - y -= o.y; - z -= o.z; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiVector3t<TReal>& aiVector3t<TReal>::operator *= (TReal f) { - x *= f; - y *= f; - z *= f; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiVector3t<TReal>& aiVector3t<TReal>::operator /= (TReal f) { - const TReal invF = (TReal) 1.0 / f; - x *= invF; - y *= invF; - z *= invF; - - return *this; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal>& aiVector3t<TReal>::operator *= (const aiMatrix3x3t<TReal>& mat){ - return (*this = mat * (*this)); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal>& aiVector3t<TReal>::operator *= (const aiMatrix4x4t<TReal>& mat){ - return (*this = mat * (*this)); -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal aiVector3t<TReal>::operator[](unsigned int i) const { - switch (i) { - case 0: - return x; - case 1: - return y; - case 2: - return z; - default: - break; - } - return x; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -TReal& aiVector3t<TReal>::operator[](unsigned int i) { -// return *(&x + i); - switch (i) { - case 0: - return x; - case 1: - return y; - case 2: - return z; - default: - break; - } - return x; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiVector3t<TReal>::operator== (const aiVector3t<TReal>& other) const { - return x == other.x && y == other.y && z == other.z; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiVector3t<TReal>::operator!= (const aiVector3t<TReal>& other) const { - return x != other.x || y != other.y || z != other.z; -} -// --------------------------------------------------------------------------- -template<typename TReal> -AI_FORCE_INLINE -bool aiVector3t<TReal>::Equal(const aiVector3t<TReal>& other, TReal epsilon) const { - return - std::abs(x - other.x) <= epsilon && - std::abs(y - other.y) <= epsilon && - std::abs(z - other.z) <= epsilon; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -bool aiVector3t<TReal>::operator < (const aiVector3t<TReal>& other) const { - return x != other.x ? x < other.x : y != other.y ? y < other.y : z < other.z; -} -// ------------------------------------------------------------------------------------------------ -template <typename TReal> -AI_FORCE_INLINE -const aiVector3t<TReal> aiVector3t<TReal>::SymMul(const aiVector3t<TReal>& o) { - return aiVector3t<TReal>(x*o.x,y*o.y,z*o.z); -} -// ------------------------------------------------------------------------------------------------ -// symmetric addition -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator + (const aiVector3t<TReal>& v1, const aiVector3t<TReal>& v2) { - return aiVector3t<TReal>( v1.x + v2.x, v1.y + v2.y, v1.z + v2.z); -} -// ------------------------------------------------------------------------------------------------ -// symmetric subtraction -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator - (const aiVector3t<TReal>& v1, const aiVector3t<TReal>& v2) { - return aiVector3t<TReal>( v1.x - v2.x, v1.y - v2.y, v1.z - v2.z); -} -// ------------------------------------------------------------------------------------------------ -// scalar product -template <typename TReal> -AI_FORCE_INLINE -TReal operator * (const aiVector3t<TReal>& v1, const aiVector3t<TReal>& v2) { - return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z; -} -// ------------------------------------------------------------------------------------------------ -// scalar multiplication -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator * ( TReal f, const aiVector3t<TReal>& v) { - return aiVector3t<TReal>( f*v.x, f*v.y, f*v.z); -} -// ------------------------------------------------------------------------------------------------ -// and the other way around -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator * ( const aiVector3t<TReal>& v, TReal f) { - return aiVector3t<TReal>( f*v.x, f*v.y, f*v.z); -} -// ------------------------------------------------------------------------------------------------ -// scalar division -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator / ( const aiVector3t<TReal>& v, TReal f) { - return v * (1/f); -} -// ------------------------------------------------------------------------------------------------ -// vector division -template <typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator / ( const aiVector3t<TReal>& v, const aiVector3t<TReal>& v2) { - return aiVector3t<TReal>(v.x / v2.x,v.y / v2.y,v.z / v2.z); -} -// ------------------------------------------------------------------------------------------------ -// cross product -template<typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator ^ ( const aiVector3t<TReal>& v1, const aiVector3t<TReal>& v2) { - return aiVector3t<TReal>( v1.y*v2.z - v1.z*v2.y, v1.z*v2.x - v1.x*v2.z, v1.x*v2.y - v1.y*v2.x); -} -// ------------------------------------------------------------------------------------------------ -// vector negation -template<typename TReal> -AI_FORCE_INLINE -aiVector3t<TReal> operator - ( const aiVector3t<TReal>& v) { - return aiVector3t<TReal>( -v.x, -v.y, -v.z); -} - -// ------------------------------------------------------------------------------------------------ - -#endif // __cplusplus -#endif // AI_VECTOR3D_INL_INC diff --git a/thirdparty/assimp/include/assimp/version.h b/thirdparty/assimp/include/assimp/version.h deleted file mode 100644 index 2fdd37a43c..0000000000 --- a/thirdparty/assimp/include/assimp/version.h +++ /dev/null @@ -1,115 +0,0 @@ -/* ---------------------------------------------------------------------------- -Open Asset Import Library (assimp) ---------------------------------------------------------------------------- - -Copyright (c) 2006-2019, assimp team - - - -All rights reserved. - -Redistribution and use of this software in source and binary forms, -with or without modification, are permitted provided that the following -conditions are met: - -* Redistributions of source code must retain the above - copyright notice, this list of conditions and the - following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - -* Neither the name of the assimp team, nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior - written permission of the assimp team. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------- -*/ - -/** @file version.h - * @brief Functions to query the version of the Assimp runtime, check - * compile flags, ... - */ -#pragma once -#ifndef AI_VERSION_H_INC -#define AI_VERSION_H_INC - -#include <assimp/defs.h> - -#ifdef __cplusplus -extern "C" { -#endif - -// --------------------------------------------------------------------------- -/** @brief Returns a string with legal copyright and licensing information - * about Assimp. The string may include multiple lines. - * @return Pointer to static string. - */ -ASSIMP_API const char* aiGetLegalString (void); - -// --------------------------------------------------------------------------- -/** @brief Returns the current minor version number of Assimp. - * @return Minor version of the Assimp runtime the application was - * linked/built against - */ -ASSIMP_API unsigned int aiGetVersionMinor (void); - -// --------------------------------------------------------------------------- -/** @brief Returns the current major version number of Assimp. - * @return Major version of the Assimp runtime the application was - * linked/built against - */ -ASSIMP_API unsigned int aiGetVersionMajor (void); - -// --------------------------------------------------------------------------- -/** @brief Returns the repository revision of the Assimp runtime. - * @return SVN Repository revision number of the Assimp runtime the - * application was linked/built against. - */ -ASSIMP_API unsigned int aiGetVersionRevision (void); - -// --------------------------------------------------------------------------- -/** @brief Returns the branchname of the Assimp runtime. - * @return The current branch name. - */ -ASSIMP_API const char *aiGetBranchName(); - -//! Assimp was compiled as a shared object (Windows: DLL) -#define ASSIMP_CFLAGS_SHARED 0x1 -//! Assimp was compiled against STLport -#define ASSIMP_CFLAGS_STLPORT 0x2 -//! Assimp was compiled as a debug build -#define ASSIMP_CFLAGS_DEBUG 0x4 - -//! Assimp was compiled with ASSIMP_BUILD_BOOST_WORKAROUND defined -#define ASSIMP_CFLAGS_NOBOOST 0x8 -//! Assimp was compiled with ASSIMP_BUILD_SINGLETHREADED defined -#define ASSIMP_CFLAGS_SINGLETHREADED 0x10 - -// --------------------------------------------------------------------------- -/** @brief Returns assimp's compile flags - * @return Any bitwise combination of the ASSIMP_CFLAGS_xxx constants. - */ -ASSIMP_API unsigned int aiGetCompileFlags (void); - -#ifdef __cplusplus -} // end extern "C" -#endif - -#endif // !! #ifndef AI_VERSION_H_INC - diff --git a/thirdparty/assimp/revision.h b/thirdparty/assimp/revision.h deleted file mode 100644 index 66eb875303..0000000000 --- a/thirdparty/assimp/revision.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef ASSIMP_REVISION_H_INC -#define ASSIMP_REVISION_H_INC - -#define GitVersion 0x308db73d -#define GitBranch "master" - -#define VER_MAJOR 5 -#define VER_MINOR 0 -#define VER_PATCH 0 -#define VER_BUILD 0 - -#define STR_HELP(x) #x -#define STR(x) STR_HELP(x) - -#define VER_FILEVERSION VER_MAJOR,VER_MINOR,VER_PATCH,VER_BUILD -#if (GitVersion == 0) -#define VER_FILEVERSION_STR STR(VER_MAJOR) "." STR(VER_MINOR) "." STR(VER_PATCH) "." STR(VER_BUILD) -#else -#define VER_FILEVERSION_STR STR(VER_MAJOR) "." STR(VER_MINOR) "." STR(VER_PATCH) "." STR(VER_BUILD) " (Commit 308db73d)" -#endif - -#ifdef NDEBUG -#define VER_ORIGINAL_FILENAME_STR "assimp.dll" -#else -#define VER_ORIGINAL_FILENAME_STR "assimp.dll" -#endif // NDEBUG - -#endif // ASSIMP_REVISION_H_INC diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h b/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h index 6eb0eeeada..20b9e54014 100644 --- a/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h +++ b/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h @@ -36,5 +36,6 @@ static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fu static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density"; static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation"; static const char* const E_SPV_EXT_shader_atomic_float_add = "SPV_EXT_shader_atomic_float_add"; +static const char* const E_SPV_EXT_shader_image_int64 = "SPV_EXT_shader_image_int64"; #endif // #ifndef GLSLextEXT_H diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h b/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h index d783a8f299..9610c6eeee 100644 --- a/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h +++ b/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h @@ -48,4 +48,7 @@ static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shade static const char* const E_SPV_KHR_non_semantic_info = "SPV_KHR_non_semantic_info"; static const char* const E_SPV_KHR_ray_tracing = "SPV_KHR_ray_tracing"; static const char* const E_SPV_KHR_ray_query = "SPV_KHR_ray_query"; +static const char* const E_SPV_KHR_fragment_shading_rate = "SPV_KHR_fragment_shading_rate"; +static const char* const E_SPV_KHR_terminate_invocation = "SPV_KHR_terminate_invocation"; + #endif // #ifndef GLSLextKHR_H diff --git a/thirdparty/glslang/SPIRV/GlslangToSpv.cpp b/thirdparty/glslang/SPIRV/GlslangToSpv.cpp index c85541603f..1adebef878 100644 --- a/thirdparty/glslang/SPIRV/GlslangToSpv.cpp +++ b/thirdparty/glslang/SPIRV/GlslangToSpv.cpp @@ -149,6 +149,7 @@ protected: spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier); spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier); spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier); + spv::Decoration TranslateNonUniformDecoration(const spv::Builder::AccessChain::CoherentFlags& coherentFlags); spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type); spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); @@ -189,6 +190,7 @@ protected: bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam); void makeFunctions(const glslang::TIntermSequence&); void makeGlobalInitializers(const glslang::TIntermSequence&); + void collectRayTracingLinkerObjects(); void visitFunctions(const glslang::TIntermSequence&); void handleFunctionEntry(const glslang::TIntermAggregate* node); void translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments, @@ -272,6 +274,9 @@ protected: // requiring local translation to and from SPIR-V type on every access. // Maps <builtin-variable-id -> AST-required-type-id> std::unordered_map<spv::Id, spv::Id> forceType; + + // Used later for generating OpTraceKHR/OpExecuteCallableKHR + std::unordered_map<unsigned int, glslang::TIntermSymbol *> locationToSymbol[2]; }; // @@ -539,6 +544,20 @@ spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glsl return spv::DecorationMax; } +// If lvalue flags contains nonUniform, return SPIR-V NonUniform decoration. +spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration( + const spv::Builder::AccessChain::CoherentFlags& coherentFlags) +{ +#ifndef GLSLANG_WEB + if (coherentFlags.isNonUniform()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderNonUniformEXT); + return spv::DecorationNonUniformEXT; + } else +#endif + return spv::DecorationMax; +} + spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess( const spv::Builder::AccessChain::CoherentFlags &coherentFlags) { @@ -614,6 +633,7 @@ spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCohere flags.volatil; flags.isImage = type.getBasicType() == glslang::EbtSampler; #endif + flags.nonUniform = type.getQualifier().nonUniform; return flags; } @@ -709,13 +729,20 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI return spv::BuiltInCullDistance; case glslang::EbvViewportIndex: - builder.addCapability(spv::CapabilityMultiViewport); + if (glslangIntermediate->getStage() == EShLangGeometry || + glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityMultiViewport); + } if (glslangIntermediate->getStage() == EShLangVertex || glslangIntermediate->getStage() == EShLangTessControl || glslangIntermediate->getStage() == EShLangTessEvaluation) { - builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); - builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + if (builder.getSpvVersion() < spv::Spv_1_5) { + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } + else + builder.addCapability(spv::CapabilityShaderViewportIndex); } return spv::BuiltInViewportIndex; @@ -734,13 +761,19 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI if (glslangIntermediate->getStage() == EShLangMeshNV) { return spv::BuiltInLayer; } - builder.addCapability(spv::CapabilityGeometry); + if (glslangIntermediate->getStage() == EShLangGeometry || + glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityGeometry); + } if (glslangIntermediate->getStage() == EShLangVertex || glslangIntermediate->getStage() == EShLangTessControl || glslangIntermediate->getStage() == EShLangTessEvaluation) { - builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); - builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + if (builder.getSpvVersion() < spv::Spv_1_5) { + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } else + builder.addCapability(spv::CapabilityShaderLayer); } return spv::BuiltInLayer; @@ -769,6 +802,16 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI builder.addCapability(spv::CapabilityStencilExportEXT); return spv::BuiltInFragStencilRefEXT; + case glslang::EbvShadingRateKHR: + builder.addExtension(spv::E_SPV_KHR_fragment_shading_rate); + builder.addCapability(spv::CapabilityFragmentShadingRateKHR); + return spv::BuiltInShadingRateKHR; + + case glslang::EbvPrimitiveShadingRateKHR: + builder.addExtension(spv::E_SPV_KHR_fragment_shading_rate); + builder.addCapability(spv::CapabilityFragmentShadingRateKHR); + return spv::BuiltInPrimitiveShadingRateKHR; + case glslang::EbvInvocationId: return spv::BuiltInInvocationId; case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner; case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter; @@ -963,7 +1006,17 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI case glslang::EbvInstanceCustomIndex: return spv::BuiltInInstanceCustomIndexKHR; case glslang::EbvHitT: - return spv::BuiltInHitTKHR; + { + // this is a GLSL alias of RayTmax + // in SPV_NV_ray_tracing it has a dedicated builtin + // but in SPV_KHR_ray_tracing it gets mapped to RayTmax + auto& extensions = glslangIntermediate->getRequestedExtensions(); + if (extensions.find("GL_NV_ray_tracing") != extensions.end()) { + return spv::BuiltInHitTNV; + } else { + return spv::BuiltInRayTmaxKHR; + } + } case glslang::EbvHitKind: return spv::BuiltInHitKindKHR; case glslang::EbvObjectToWorld: @@ -1071,6 +1124,10 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy builder.addCapability(spv::CapabilityStorageImageExtendedFormats); break; + case glslang::ElfR64ui: + case glslang::ElfR64i: + builder.addExtension(spv::E_SPV_EXT_shader_image_int64); + builder.addCapability(spv::CapabilityInt64ImageEXT); default: break; } @@ -1117,6 +1174,8 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy case glslang::ElfRg8ui: return spv::ImageFormatRg8ui; case glslang::ElfR16ui: return spv::ImageFormatR16ui; case glslang::ElfR8ui: return spv::ImageFormatR8ui; + case glslang::ElfR64ui: return spv::ImageFormatR64ui; + case glslang::ElfR64i: return spv::ImageFormatR64i; default: return spv::ImageFormatMax; } } @@ -1187,7 +1246,7 @@ spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang: spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type) { if (type.getBasicType() == glslang::EbtRayQuery) - return spv::StorageClassFunction; + return spv::StorageClassPrivate; if (type.getQualifier().isPipeInput()) return spv::StorageClassInput; if (type.getQualifier().isPipeOutput()) @@ -1353,6 +1412,8 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa if (parent.writeonly) child.writeonly = true; #endif + if (parent.nonUniform) + child.nonUniform = true; } bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier) @@ -1454,7 +1515,7 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, } if (glslangIntermediate->getLayoutPrimitiveCulling()) { - builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingProvisionalKHR); + builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingKHR); } unsigned int mode; @@ -1621,7 +1682,7 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, { auto& extensions = glslangIntermediate->getRequestedExtensions(); if (extensions.find("GL_NV_ray_tracing") == extensions.end()) { - builder.addCapability(spv::CapabilityRayTracingProvisionalKHR); + builder.addCapability(spv::CapabilityRayTracingKHR); builder.addExtension("SPV_KHR_ray_tracing"); } else { @@ -1710,6 +1771,12 @@ void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol) if (symbol->getType().getQualifier().isSpecConstant()) spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); +#ifdef ENABLE_HLSL + // Skip symbol handling if it is string-typed + if (symbol->getBasicType() == glslang::EbtString) + return; +#endif + // getSymbolId() will set up all the IO decorations on the first call. // Formal function parameters were mapped during makeFunctions(). spv::Id id = getSymbolId(symbol); @@ -1852,9 +1919,11 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T spv::Id leftRValue = accessChainLoad(node->getLeft()->getType()); // do the operation + spv::Builder::AccessChain::CoherentFlags coherentFlags = TranslateCoherent(node->getLeft()->getType()); + coherentFlags |= TranslateCoherent(node->getRight()->getType()); OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), TranslateNoContractionDecoration(node->getType().getQualifier()), - TranslateNonUniformDecoration(node->getType().getQualifier()) }; + TranslateNonUniformDecoration(coherentFlags) }; rValue = createBinaryOperation(node->getOp(), decorations, convertGlslangToSpvType(node->getType()), leftRValue, rValue, node->getType().getBasicType()); @@ -1885,13 +1954,16 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector() && node->getOp() == glslang::EOpIndexDirect) { + // Swizzle is uniform so propagate uniform into access chain + spv::Builder::AccessChain::CoherentFlags coherentFlags = TranslateCoherent(node->getLeft()->getType()); + coherentFlags.nonUniform = 0; // This is essentially a hard-coded vector swizzle of size 1, // so short circuit the access-chain stuff with a swizzle. std::vector<unsigned> swizzle; swizzle.push_back(glslangIndex); int dummySize; builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()), - TranslateCoherent(node->getLeft()->getType()), + coherentFlags, glslangIntermediate->getBaseAlignmentScalar( node->getLeft()->getType(), dummySize)); } else { @@ -1922,9 +1994,14 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T } } + // Struct reference propagates uniform lvalue + spv::Builder::AccessChain::CoherentFlags coherentFlags = + TranslateCoherent(node->getLeft()->getType()); + coherentFlags.nonUniform = 0; + // normal case for indexing array or structure or block builder.accessChainPush(builder.makeIntConstant(spvIndex), - TranslateCoherent(node->getLeft()->getType()), + coherentFlags, node->getLeft()->getType().getBufferReferenceAlignment()); // Add capabilities here for accessing PointSize and clip/cull distance. @@ -1958,15 +2035,20 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T // restore the saved access chain builder.setAccessChain(partial); + // Only if index is nonUniform should we propagate nonUniform into access chain + spv::Builder::AccessChain::CoherentFlags index_flags = TranslateCoherent(node->getRight()->getType()); + spv::Builder::AccessChain::CoherentFlags coherent_flags = TranslateCoherent(node->getLeft()->getType()); + coherent_flags.nonUniform = index_flags.nonUniform; + if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) { int dummySize; - builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()), - TranslateCoherent(node->getLeft()->getType()), + builder.accessChainPushComponent( + index, convertGlslangToSpvType(node->getLeft()->getType()), coherent_flags, glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize)); } else - builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()), - node->getLeft()->getType().getBufferReferenceAlignment()); + builder.accessChainPush(index, coherent_flags, + node->getLeft()->getType().getBufferReferenceAlignment()); } return false; case glslang::EOpVectorSwizzle: @@ -2050,8 +2132,9 @@ std::pair<spv::Id, spv::Id> TGlslangToSpvTraverser::getForcedType(glslang::TBuil // these require changing a 64-bit scaler -> a vector of 32-bit components if (glslangType.isVector()) break; - std::pair<spv::Id, spv::Id> ret(builder.makeVectorType(builder.makeUintType(32), 4), - builder.makeUintType(64)); + spv::Id ivec4_type = builder.makeVectorType(builder.makeUintType(32), 4); + spv::Id uint64_type = builder.makeUintType(64); + std::pair<spv::Id, spv::Id> ret(ivec4_type, uint64_type); return ret; } // There are no SPIR-V builtins defined for these and map onto original non-transposed @@ -2090,7 +2173,7 @@ spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object) // handle 32-bit v.xy* -> 64-bit builder.clearAccessChain(); builder.setAccessChainLValue(object); - object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, spv::DecorationMax, objectTypeId); std::vector<spv::Id> components; components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 0)); components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 1)); @@ -2106,7 +2189,7 @@ spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object) // and we insert a transpose after loading the original non-transposed builtins builder.clearAccessChain(); builder.setAccessChainLValue(object); - object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, spv::DecorationMax, objectTypeId); return builder.createUnaryOp(spv::OpTranspose, desiredTypeId, object); } else { @@ -2292,7 +2375,8 @@ bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TI // The result of operation is always stored, but conditionally the // consumed result. The consumed result is always an r-value. - builder.accessChainStore(result); + builder.accessChainStore(result, + TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags)); builder.clearAccessChain(); if (node->getOp() == glslang::EOpPreIncrement || node->getOp() == glslang::EOpPreDecrement) @@ -2421,6 +2505,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt // anything else gets there, so visit out of order, doing them all now. makeGlobalInitializers(node->getAsAggregate()->getSequence()); + //Pre process linker objects for ray tracing stages + if (glslangIntermediate->isRayTracingStage()) + collectRayTracingLinkerObjects(); + // Initializers are done, don't want to visit again, but functions and link objects need to be processed, // so do them manually. visitFunctions(node->getAsAggregate()->getSequence()); @@ -2611,6 +2699,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt else constructed = builder.createConstructor(precision, arguments, resultType()); + if (node->getType().getQualifier().isNonUniform()) { + builder.addDecoration(constructed, spv::DecorationNonUniformEXT); + } + builder.clearAccessChain(); builder.setAccessChainRValue(constructed); @@ -2726,10 +2818,12 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt binOp = node->getOp(); break; - case glslang::EOpIgnoreIntersection: - case glslang::EOpTerminateRay: - case glslang::EOpTrace: - case glslang::EOpExecuteCallable: + case glslang::EOpIgnoreIntersectionNV: + case glslang::EOpTerminateRayNV: + case glslang::EOpTraceNV: + case glslang::EOpTraceKHR: + case glslang::EOpExecuteCallableNV: + case glslang::EOpExecuteCallableKHR: case glslang::EOpWritePackedPrimitiveIndices4x8NV: noReturnValue = true; break; @@ -2738,7 +2832,7 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt case glslang::EOpRayQueryGenerateIntersection: case glslang::EOpRayQueryConfirmIntersection: builder.addExtension("SPV_KHR_ray_query"); - builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + builder.addCapability(spv::CapabilityRayQueryKHR); noReturnValue = true; break; case glslang::EOpRayQueryProceed: @@ -2761,7 +2855,7 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt case glslang::EOpRayQueryGetIntersectionObjectToWorld: case glslang::EOpRayQueryGetIntersectionWorldToObject: builder.addExtension("SPV_KHR_ray_query"); - builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + builder.addCapability(spv::CapabilityRayQueryKHR); break; case glslang::EOpCooperativeMatrixLoad: case glslang::EOpCooperativeMatrixStore: @@ -3014,11 +3108,18 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt )) { bool cond = glslangOperands[arg]->getAsConstantUnion()->getConstArray()[0].getBConst(); operands.push_back(builder.makeIntConstant(cond ? 1 : 0)); - } - else { + } else if ((arg == 10 && glslangOp == glslang::EOpTraceKHR) || + (arg == 1 && glslangOp == glslang::EOpExecuteCallableKHR)) { + const int opdNum = glslangOp == glslang::EOpTraceKHR ? 10 : 1; + const int set = glslangOp == glslang::EOpTraceKHR ? 0 : 1; + const int location = glslangOperands[opdNum]->getAsConstantUnion()->getConstArray()[0].getUConst(); + auto itNode = locationToSymbol[set].find(location); + visitSymbol(itNode->second); + spv::Id symId = getSymbolId(itNode->second); + operands.push_back(symId); + } else { operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType())); - } - + } } } @@ -3089,7 +3190,8 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt for (unsigned int i = 0; i < temporaryLvalues.size(); ++i) { builder.setAccessChain(complexLvalues[i]); - builder.accessChainStore(builder.createLoad(temporaryLvalues[i], spv::NoPrecision)); + builder.accessChainStore(builder.createLoad(temporaryLvalues[i], spv::NoPrecision), + TranslateNonUniformDecoration(complexLvalues[i].coherentFlags)); } } @@ -3420,7 +3522,11 @@ bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::T switch (node->getFlowOp()) { case glslang::EOpKill: - builder.makeDiscard(); + builder.makeStatementTerminator(spv::OpKill, "post-discard"); + break; + case glslang::EOpTerminateInvocation: + builder.addExtension(spv::E_SPV_KHR_terminate_invocation); + builder.makeStatementTerminator(spv::OpTerminateInvocation, "post-terminate-invocation"); break; case glslang::EOpBreak: if (breakForLoop.top()) @@ -3457,6 +3563,12 @@ bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::T builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation); builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT); break; + case glslang::EOpTerminateRayKHR: + builder.makeStatementTerminator(spv::OpTerminateRayKHR, "post-terminateRayKHR"); + break; + case glslang::EOpIgnoreIntersectionKHR: + builder.makeStatementTerminator(spv::OpIgnoreIntersectionKHR, "post-ignoreIntersectionKHR"); + break; #endif default: @@ -3564,6 +3676,12 @@ spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler) builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch); builder.addCapability(spv::CapabilityFloat16ImageAMD); return builder.makeFloatType(16); + case glslang::EbtInt64: return builder.makeIntType(64); + builder.addExtension(spv::E_SPV_EXT_shader_image_int64); + builder.addCapability(spv::CapabilityFloat16ImageAMD); + case glslang::EbtUint64: return builder.makeUintType(64); + builder.addExtension(spv::E_SPV_EXT_shader_image_int64); + builder.addCapability(spv::CapabilityFloat16ImageAMD); #endif default: assert(0); @@ -3670,10 +3788,36 @@ spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& ty spvType = builder.makeUintType(32); break; case glslang::EbtAccStruct: + switch (glslangIntermediate->getStage()) { + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + // these all should have the RayTracingNV/KHR capability already + break; + default: + { + auto& extensions = glslangIntermediate->getRequestedExtensions(); + if (extensions.find("GL_EXT_ray_query") != extensions.end()) { + builder.addExtension(spv::E_SPV_KHR_ray_query); + builder.addCapability(spv::CapabilityRayQueryKHR); + } + } + break; + } spvType = builder.makeAccelerationStructureType(); break; case glslang::EbtRayQuery: - spvType = builder.makeRayQueryType(); + { + auto& extensions = glslangIntermediate->getRequestedExtensions(); + if (extensions.find("GL_EXT_ray_query") != extensions.end()) { + builder.addExtension(spv::E_SPV_KHR_ray_query); + builder.addCapability(spv::CapabilityRayQueryKHR); + } + spvType = builder.makeRayQueryType(); + } break; case glslang::EbtReference: { @@ -3929,6 +4073,8 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type, // Name and decorate the non-hidden members int offset = -1; int locationOffset = 0; // for use within the members of this struct + bool memberLocationInvalid = type.isArrayOfArrays() || + (type.isArray() && (type.getQualifier().isArrayedIo(glslangIntermediate->getStage()) == false)); for (int i = 0; i < (int)glslangMembers->size(); i++) { glslang::TType& glslangMember = *(*glslangMembers)[i].type; int member = i; @@ -3981,7 +4127,7 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type, // just track whether a member needs to be decorated. // Ignore member locations if the container is an array, as that's // ill-specified and decisions have been made to not allow this. - if (! type.isArray() && memberQualifier.hasLocation()) + if (!memberLocationInvalid && memberQualifier.hasLocation()) builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation); if (qualifier.hasLocation()) // track for upcoming inheritance @@ -4087,6 +4233,7 @@ spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type) alignment |= type.getBufferReferenceAlignment(); spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type), + TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags), TranslateNonUniformDecoration(type.getQualifier()), nominalTypeId, spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask), @@ -4154,7 +4301,7 @@ void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::I unsigned int alignment = builder.getAccessChain().alignment; alignment |= type.getBufferReferenceAlignment(); - builder.accessChainStore(rvalue, + builder.accessChainStore(rvalue, TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags), spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask), TranslateMemoryScope(coherentFlags), alignment); @@ -4542,7 +4689,39 @@ void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequen } } } +// Walk over all linker objects to create a map for payload and callable data linker objects +// and their location to be used during codegen for OpTraceKHR and OpExecuteCallableKHR +// This is done here since it is possible that these linker objects are not be referenced in the AST +void TGlslangToSpvTraverser::collectRayTracingLinkerObjects() +{ + glslang::TIntermAggregate* linkerObjects = glslangIntermediate->findLinkerObjects(); + for (auto& objSeq : linkerObjects->getSequence()) { + auto objNode = objSeq->getAsSymbolNode(); + if (objNode != nullptr) { + if (objNode->getQualifier().hasLocation()) { + unsigned int location = objNode->getQualifier().layoutLocation; + auto st = objNode->getQualifier().storage; + int set; + switch (st) + { + case glslang::EvqPayload: + case glslang::EvqPayloadIn: + set = 0; + break; + case glslang::EvqCallableData: + case glslang::EvqCallableDataIn: + set = 1; + break; + default: + set = -1; + } + if (set != -1) + locationToSymbol[set].insert(std::make_pair(location, objNode)); + } + } + } +} // Process all the functions, while skipping initializers. void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions) { @@ -4686,8 +4865,10 @@ void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& } if (lvalue) { - arguments.push_back(builder.accessChainGetLValue()); + spv::Id lvalue_id = builder.accessChainGetLValue(); + arguments.push_back(lvalue_id); lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + builder.addDecoration(lvalue_id, TranslateNonUniformDecoration(lvalueCoherentFlags)); lvalueCoherentFlags |= TranslateCoherent(glslangArguments[i]->getAsTyped()->getType()); } else #endif @@ -4750,12 +4931,15 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint; + if (builder.isSampledImage(params.sampler) && + ((cracked.query && node->getOp() != glslang::EOpTextureQueryLod) || cracked.fragMask || cracked.fetch)) { + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + if (imageType.getQualifier().isNonUniform()) { + builder.addDecoration(params.sampler, spv::DecorationNonUniformEXT); + } + } // Check for queries if (cracked.query) { - // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first - if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler)) - params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); - switch (node->getOp()) { case glslang::EOpImageQuerySize: case glslang::EOpTextureQuerySize: @@ -5009,10 +5193,6 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO auto opIt = arguments.begin(); std::vector<spv::Id> operands; - // Extract the image if necessary - if (builder.isSampledImage(params.sampler)) - params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); - operands.push_back(params.sampler); ++opIt; @@ -5073,13 +5253,6 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO bias = true; } - // See if the sampler param should really be just the SPV image part - if (cracked.fetch) { - // a fetch needs to have the image extracted first - if (builder.isSampledImage(params.sampler)) - params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); - } - #ifndef GLSLANG_WEB if (cracked.gather) { const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions(); @@ -5239,7 +5412,7 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO builder.accessChainPush(builder.makeIntConstant(i), flags, 0); builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1), - i+1)); + i+1), TranslateNonUniformDecoration(imageType.getQualifier())); } return builder.createCompositeExtract(res, resultType(), 0); } @@ -5375,6 +5548,7 @@ spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAgg // 3. Make the call. spv::Id result = builder.createFunctionCall(function, spvArgs); builder.setPrecision(result, TranslatePrecisionDecoration(node->getType())); + builder.addDecoration(result, TranslateNonUniformDecoration(node->getType().getQualifier())); // 4. Copy back out an "out" arguments. lValueCount = 0; @@ -5384,6 +5558,7 @@ spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAgg else if (writableParam(qualifiers[a])) { if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) { spv::Id copy = builder.createLoad(spvArgs[a], spv::NoPrecision); + builder.addDecoration(copy, TranslateNonUniformDecoration(argTypes[a]->getQualifier())); builder.setAccessChain(lValues[lValueCount]); multiTypeStore(*argTypes[a], copy); } @@ -6171,6 +6346,11 @@ spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDe case glslang::EOpConstructReference: unaryOp = spv::OpBitcast; break; + + case glslang::EOpConvUint64ToAccStruct: + case glslang::EOpConvUvec2ToAccStruct: + unaryOp = spv::OpConvertUToAccelerationStructureKHR; + break; #endif case glslang::EOpCopyObject: @@ -7757,10 +7937,16 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv:: typeId = builder.makeBoolType(); opCode = spv::OpReportIntersectionKHR; break; - case glslang::EOpTrace: + case glslang::EOpTraceNV: + builder.createNoResultOp(spv::OpTraceNV, operands); + return 0; + case glslang::EOpTraceKHR: builder.createNoResultOp(spv::OpTraceRayKHR, operands); return 0; - case glslang::EOpExecuteCallable: + case glslang::EOpExecuteCallableNV: + builder.createNoResultOp(spv::OpExecuteCallableNV, operands); + return 0; + case glslang::EOpExecuteCallableKHR: builder.createNoResultOp(spv::OpExecuteCallableKHR, operands); return 0; @@ -8048,11 +8234,11 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv: spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args); return builder.setPrecision(id, precision); } - case glslang::EOpIgnoreIntersection: - builder.createNoResultOp(spv::OpIgnoreIntersectionKHR); + case glslang::EOpIgnoreIntersectionNV: + builder.createNoResultOp(spv::OpIgnoreIntersectionNV); return 0; - case glslang::EOpTerminateRay: - builder.createNoResultOp(spv::OpTerminateRayKHR); + case glslang::EOpTerminateRayNV: + builder.createNoResultOp(spv::OpTerminateRayNV); return 0; case glslang::EOpRayQueryInitialize: builder.createNoResultOp(spv::OpRayQueryInitializeKHR); @@ -8180,7 +8366,8 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol } #ifndef GLSLANG_WEB - if (symbol->getType().isImage()) { + // Subgroup builtins which have input storage class are volatile for ray tracing stages. + if (symbol->getType().isImage() || symbol->getQualifier().isPipeInput()) { std::vector<spv::Decoration> memory; TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel()); @@ -8188,9 +8375,6 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol builder.addDecoration(id, memory[i]); } - // nonuniform - builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier())); - if (builtIn == spv::BuiltInSampleMask) { spv::Decoration decoration; // GL_NV_sample_mask_override_coverage extension diff --git a/thirdparty/glslang/SPIRV/SPVRemapper.cpp b/thirdparty/glslang/SPIRV/SPVRemapper.cpp index 1adc6a3042..56d7f431a7 100644 --- a/thirdparty/glslang/SPIRV/SPVRemapper.cpp +++ b/thirdparty/glslang/SPIRV/SPVRemapper.cpp @@ -830,7 +830,15 @@ namespace spv { [&](spv::Id& id) { if (thisOpCode != spv::OpNop) { ++idCounter; - const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117; + const std::uint32_t hashval = + // Explicitly cast operands to unsigned int to avoid integer + // promotion to signed int followed by integer overflow, + // which would result in undefined behavior. + static_cast<unsigned int>(opCounter[thisOpCode]) + * thisOpCode + * 50047 + + idCounter + + static_cast<unsigned int>(fnId) * 117; if (isOldIdUnmapped(id)) localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); diff --git a/thirdparty/glslang/SPIRV/SpvBuilder.cpp b/thirdparty/glslang/SPIRV/SpvBuilder.cpp index 9680331469..c8fbcc450e 100644 --- a/thirdparty/glslang/SPIRV/SpvBuilder.cpp +++ b/thirdparty/glslang/SPIRV/SpvBuilder.cpp @@ -621,13 +621,13 @@ Id Builder::makeAccelerationStructureType() Id Builder::makeRayQueryType() { Instruction *type; - if (groupedTypes[OpTypeRayQueryProvisionalKHR].size() == 0) { - type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryProvisionalKHR); - groupedTypes[OpTypeRayQueryProvisionalKHR].push_back(type); + if (groupedTypes[OpTypeRayQueryKHR].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryKHR); + groupedTypes[OpTypeRayQueryKHR].push_back(type); constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type)); module.mapInstruction(type); } else { - type = groupedTypes[OpTypeRayQueryProvisionalKHR].back(); + type = groupedTypes[OpTypeRayQueryKHR].back(); } return type->getResultId(); @@ -1447,10 +1447,10 @@ void Builder::leaveFunction() } // Comments in header -void Builder::makeDiscard() +void Builder::makeStatementTerminator(spv::Op opcode, const char *name) { - buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(OpKill))); - createAndSetNoPredecessorBlock("post-discard"); + buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(opcode))); + createAndSetNoPredecessorBlock(name); } // Comments in header @@ -2761,12 +2761,14 @@ void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizz } // Comments in header -void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +void Builder::accessChainStore(Id rvalue, Decoration nonUniform, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) { assert(accessChain.isRValue == false); transferAccessChainSwizzle(true); Id base = collapseAccessChain(); + addDecoration(base, nonUniform); + Id source = rvalue; // dynamic component should be gone @@ -2789,8 +2791,9 @@ void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, sp } // Comments in header -Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, - spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +Id Builder::accessChainLoad(Decoration precision, Decoration l_nonUniform, + Decoration r_nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, + spv::Scope scope, unsigned int alignment) { Id id; @@ -2854,9 +2857,9 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu // Buffer accesses need the access chain decorated, and this is where // loaded image types get decorated. TODO: This should maybe move to // createImageTextureFunctionCall. - addDecoration(id, nonUniform); + addDecoration(id, l_nonUniform); id = createLoad(id, precision, memoryAccess, scope, alignment); - addDecoration(id, nonUniform); + addDecoration(id, r_nonUniform); } // Done, unless there are swizzles to do @@ -2877,7 +2880,7 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu if (accessChain.component != NoResult) id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision); - addDecoration(id, nonUniform); + addDecoration(id, r_nonUniform); return id; } diff --git a/thirdparty/glslang/SPIRV/SpvBuilder.h b/thirdparty/glslang/SPIRV/SpvBuilder.h index 077945e77b..911ea58b12 100644 --- a/thirdparty/glslang/SPIRV/SpvBuilder.h +++ b/thirdparty/glslang/SPIRV/SpvBuilder.h @@ -357,8 +357,9 @@ public: // Generate all the code needed to finish up a function. void leaveFunction(); - // Create a discard. - void makeDiscard(); + // Create block terminator instruction for certain statements like + // discard, terminate-invocation, terminateRayEXT, or ignoreIntersectionEXT + void makeStatementTerminator(spv::Op opcode, const char *name); // Create a global or function local or IO variable. Id createVariable(Decoration precision, StorageClass, Id type, const char* name = nullptr, @@ -624,6 +625,7 @@ public: CoherentFlags operator |=(const CoherentFlags &other) { return *this; } #else bool isVolatile() const { return volatil; } + bool isNonUniform() const { return nonUniform; } bool anyCoherent() const { return coherent || devicecoherent || queuefamilycoherent || workgroupcoherent || subgroupcoherent || shadercallcoherent; @@ -638,6 +640,7 @@ public: unsigned nonprivate : 1; unsigned volatil : 1; unsigned isImage : 1; + unsigned nonUniform : 1; void clear() { coherent = 0; @@ -649,6 +652,7 @@ public: nonprivate = 0; volatil = 0; isImage = 0; + nonUniform = 0; } CoherentFlags operator |=(const CoherentFlags &other) { @@ -661,6 +665,7 @@ public: nonprivate |= other.nonprivate; volatil |= other.volatil; isImage |= other.isImage; + nonUniform |= other.nonUniform; return *this; } #endif @@ -721,11 +726,12 @@ public: } // use accessChain and swizzle to store value - void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + void accessChainStore(Id rvalue, Decoration nonUniform, + spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); // use accessChain and swizzle to load an r-value - Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, + Id accessChainLoad(Decoration precision, Decoration l_nonUniform, Decoration r_nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); diff --git a/thirdparty/glslang/SPIRV/SpvTools.cpp b/thirdparty/glslang/SPIRV/SpvTools.cpp index 112ac33c5c..16d051a9b2 100644 --- a/thirdparty/glslang/SPIRV/SpvTools.cpp +++ b/thirdparty/glslang/SPIRV/SpvTools.cpp @@ -44,7 +44,6 @@ #include "SpvTools.h" #include "spirv-tools/optimizer.hpp" -#include "spirv-tools/libspirv.h" namespace glslang { @@ -114,11 +113,18 @@ void OptimizerMesssageConsumer(spv_message_level_t level, const char *source, out << std::endl; } -// Use the SPIRV-Tools disassembler to print SPIR-V. +// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment. void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv) { + SpirvToolsDisassemble(out, spirv, spv_target_env::SPV_ENV_UNIVERSAL_1_3); +} + +// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment. +void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv, + spv_target_env requested_context) +{ // disassemble - spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3); + spv_context context = spvContextCreate(requested_context); spv_text text; spv_diagnostic diagnostic = nullptr; spvBinaryToText(context, spirv.data(), spirv.size(), @@ -174,10 +180,7 @@ void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector // line information into all SPIR-V instructions. This avoids loss of // information when instructions are deleted or moved. Later, remove // redundant information to minimize final SPRIR-V size. - if (options->generateDebugInfo) { - optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass()); - } - else if (options->stripDebugInfo) { + if (options->stripDebugInfo) { optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass()); } optimizer.RegisterPass(spvtools::CreateWrapOpKillPass()); @@ -207,9 +210,6 @@ void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector } optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); optimizer.RegisterPass(spvtools::CreateCFGCleanupPass()); - if (options->generateDebugInfo) { - optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass()); - } spvtools::OptimizerOptions spvOptOptions; optimizer.SetTargetEnv(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); diff --git a/thirdparty/glslang/SPIRV/SpvTools.h b/thirdparty/glslang/SPIRV/SpvTools.h index 7779dfa779..3fb3cbacd3 100644 --- a/thirdparty/glslang/SPIRV/SpvTools.h +++ b/thirdparty/glslang/SPIRV/SpvTools.h @@ -41,9 +41,10 @@ #ifndef GLSLANG_SPV_TOOLS_H #define GLSLANG_SPV_TOOLS_H -#ifdef ENABLE_OPT +#if ENABLE_OPT #include <vector> #include <ostream> +#include "spirv-tools/libspirv.h" #endif #include "glslang/MachineIndependent/localintermediate.h" @@ -62,11 +63,15 @@ struct SpvOptions { bool validate; }; -#ifdef ENABLE_OPT +#if ENABLE_OPT -// Use the SPIRV-Tools disassembler to print SPIR-V. +// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment. void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv); +// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment. +void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv, + spv_target_env requested_context); + // Apply the SPIRV-Tools validator to generated SPIR-V. void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, spv::SpvBuildLogger*, bool prelegalization); diff --git a/thirdparty/glslang/SPIRV/disassemble.cpp b/thirdparty/glslang/SPIRV/disassemble.cpp index a95ded49d2..73c988c5b3 100644 --- a/thirdparty/glslang/SPIRV/disassemble.cpp +++ b/thirdparty/glslang/SPIRV/disassemble.cpp @@ -46,7 +46,6 @@ #include "disassemble.h" #include "doc.h" -#include "SpvTools.h" namespace spv { extern "C" { diff --git a/thirdparty/glslang/SPIRV/doc.cpp b/thirdparty/glslang/SPIRV/doc.cpp index 1d052f8c29..5327f2212d 100644 --- a/thirdparty/glslang/SPIRV/doc.cpp +++ b/thirdparty/glslang/SPIRV/doc.cpp @@ -372,6 +372,8 @@ const char* BuiltInString(int builtIn) case 4424: return "BaseVertex"; case 4425: return "BaseInstance"; case 4426: return "DrawIndex"; + case 4432: return "PrimitiveShadingRateKHR"; + case 4444: return "ShadingRateKHR"; case 5014: return "FragStencilRefEXT"; case 4992: return "BaryCoordNoPerspAMD"; @@ -393,7 +395,7 @@ const char* BuiltInString(int builtIn) case BuiltInRayGeometryIndexKHR: return "RayGeometryIndexKHR"; case BuiltInObjectToWorldKHR: return "ObjectToWorldKHR"; case BuiltInWorldToObjectKHR: return "WorldToObjectKHR"; - case BuiltInHitTKHR: return "HitTKHR"; + case BuiltInHitTNV: return "HitTNV"; case BuiltInHitKindKHR: return "HitKindKHR"; case BuiltInIncomingRayFlagsKHR: return "IncomingRayFlagsKHR"; case BuiltInViewportMaskNV: return "ViewportMaskNV"; @@ -521,6 +523,8 @@ const char* ImageFormatString(int format) case 37: return "Rg8ui"; case 38: return "R16ui"; case 39: return "R8ui"; + case 40: return "R64ui"; + case 41: return "R64i"; default: return "Bad"; @@ -911,9 +915,9 @@ const char* CapabilityString(int info) case CapabilityPerViewAttributesNV: return "PerViewAttributesNV"; case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV"; case CapabilityRayTracingNV: return "RayTracingNV"; - case CapabilityRayTracingProvisionalKHR: return "RayTracingProvisionalKHR"; - case CapabilityRayQueryProvisionalKHR: return "RayQueryProvisionalKHR"; - case CapabilityRayTraversalPrimitiveCullingProvisionalKHR: return "RayTraversalPrimitiveCullingProvisionalKHR"; + case CapabilityRayTracingKHR: return "RayTracingKHR"; + case CapabilityRayQueryKHR: return "RayQueryKHR"; + case CapabilityRayTraversalPrimitiveCullingKHR: return "RayTraversalPrimitiveCullingKHR"; case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV"; case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV"; case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV"; @@ -952,8 +956,11 @@ const char* CapabilityString(int info) case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT"; case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT"; + case CapabilityFragmentShadingRateKHR: return "FragmentShadingRateKHR"; + case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT"; case CapabilityShaderClockKHR: return "ShaderClockKHR"; + case CapabilityInt64ImageEXT: return "Int64ImageEXT"; case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL"; @@ -1329,6 +1336,8 @@ const char* OpcodeString(int op) case 365: return "OpGroupNonUniformQuadBroadcast"; case 366: return "OpGroupNonUniformQuadSwap"; + case OpTerminateInvocation: return "OpTerminateInvocation"; + case 4421: return "OpSubgroupBallotKHR"; case 4422: return "OpSubgroupFirstInvocationKHR"; case 4428: return "OpSubgroupAllKHR"; @@ -1355,17 +1364,23 @@ const char* OpcodeString(int op) case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE"; case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE"; + case OpReportIntersectionKHR: return "OpReportIntersectionKHR"; + case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV"; + case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR"; + case OpTerminateRayNV: return "OpTerminateRayNV"; + case OpTerminateRayKHR: return "OpTerminateRayKHR"; + case OpTraceNV: return "OpTraceNV"; + case OpTraceRayKHR: return "OpTraceRayKHR"; + case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR"; + case OpExecuteCallableNV: return "OpExecuteCallableNV"; + case OpExecuteCallableKHR: return "OpExecuteCallableKHR"; + case OpConvertUToAccelerationStructureKHR: return "OpConvertUToAccelerationStructureKHR"; + case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV"; - case OpReportIntersectionKHR: return "OpReportIntersectionKHR"; - case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR"; - case OpTerminateRayKHR: return "OpTerminateRayKHR"; - case OpTraceRayKHR: return "OpTraceRayKHR"; - case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR"; - case OpExecuteCallableKHR: return "OpExecuteCallableKHR"; case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV"; case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV"; - case OpTypeRayQueryProvisionalKHR: return "OpTypeRayQueryProvisionalKHR"; + case OpTypeRayQueryKHR: return "OpTypeRayQueryKHR"; case OpRayQueryInitializeKHR: return "OpRayQueryInitializeKHR"; case OpRayQueryTerminateKHR: return "OpRayQueryTerminateKHR"; case OpRayQueryGenerateIntersectionKHR: return "OpRayQueryGenerateIntersectionKHR"; @@ -1497,6 +1512,7 @@ void Parameterize() InstructionDesc[OpBranchConditional].setResultAndType(false, false); InstructionDesc[OpSwitch].setResultAndType(false, false); InstructionDesc[OpKill].setResultAndType(false, false); + InstructionDesc[OpTerminateInvocation].setResultAndType(false, false); InstructionDesc[OpReturn].setResultAndType(false, false); InstructionDesc[OpReturnValue].setResultAndType(false, false); InstructionDesc[OpUnreachable].setResultAndType(false, false); @@ -2761,7 +2777,20 @@ void Parameterize() InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); - InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'NV Acceleration Structure'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Acceleration Structure'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Flags'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Cull Mask'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Offset'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Stride'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Miss Index'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Origin'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMin'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Direction'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMax'"); + InstructionDesc[OpTraceNV].operands.push(OperandId, "'Payload'"); + InstructionDesc[OpTraceNV].setResultAndType(false, false); + + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Acceleration Structure'"); InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Flags'"); InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Cull Mask'"); InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Offset'"); @@ -2777,17 +2806,28 @@ void Parameterize() InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Parameter'"); InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Kind'"); + InstructionDesc[OpIgnoreIntersectionNV].setResultAndType(false, false); + InstructionDesc[OpIgnoreIntersectionKHR].setResultAndType(false, false); + InstructionDesc[OpTerminateRayNV].setResultAndType(false, false); + InstructionDesc[OpTerminateRayKHR].setResultAndType(false, false); + InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "SBT Record Index"); + InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "CallableData ID"); + InstructionDesc[OpExecuteCallableNV].setResultAndType(false, false); + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "SBT Record Index"); - InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData ID"); + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData"); InstructionDesc[OpExecuteCallableKHR].setResultAndType(false, false); + InstructionDesc[OpConvertUToAccelerationStructureKHR].operands.push(OperandId, "Value"); + InstructionDesc[OpConvertUToAccelerationStructureKHR].setResultAndType(true, true); + // Ray Query InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); - InstructionDesc[OpTypeRayQueryProvisionalKHR].setResultAndType(true, false); + InstructionDesc[OpTypeRayQueryKHR].setResultAndType(true, false); InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayQuery'"); InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'AccelerationS'"); diff --git a/thirdparty/glslang/SPIRV/spirv.hpp b/thirdparty/glslang/SPIRV/spirv.hpp index 35482ea5e2..43dd2aaeec 100644 --- a/thirdparty/glslang/SPIRV/spirv.hpp +++ b/thirdparty/glslang/SPIRV/spirv.hpp @@ -50,11 +50,11 @@ namespace spv { typedef unsigned int Id; #define SPV_VERSION 0x10500 -#define SPV_REVISION 3 +#define SPV_REVISION 4 static const unsigned int MagicNumber = 0x07230203; static const unsigned int Version = 0x00010500; -static const unsigned int Revision = 3; +static const unsigned int Revision = 4; static const unsigned int OpCodeMask = 0xffff; static const unsigned int WordCountShift = 16; @@ -274,6 +274,8 @@ enum ImageFormat { ImageFormatRg8ui = 37, ImageFormatR16ui = 38, ImageFormatR8ui = 39, + ImageFormatR64ui = 40, + ImageFormatR64i = 41, ImageFormatMax = 0x7fffffff, }; @@ -558,8 +560,10 @@ enum BuiltIn { BuiltInBaseVertex = 4424, BuiltInBaseInstance = 4425, BuiltInDrawIndex = 4426, + BuiltInPrimitiveShadingRateKHR = 4432, BuiltInDeviceIndex = 4438, BuiltInViewIndex = 4440, + BuiltInShadingRateKHR = 4444, BuiltInBaryCoordNoPerspAMD = 4992, BuiltInBaryCoordNoPerspCentroidAMD = 4993, BuiltInBaryCoordNoPerspSampleAMD = 4994, @@ -610,7 +614,6 @@ enum BuiltIn { BuiltInObjectToWorldNV = 5330, BuiltInWorldToObjectKHR = 5331, BuiltInWorldToObjectNV = 5331, - BuiltInHitTKHR = 5332, BuiltInHitTNV = 5332, BuiltInHitKindKHR = 5333, BuiltInHitKindNV = 5333, @@ -870,6 +873,7 @@ enum Capability { CapabilityGroupNonUniformQuad = 68, CapabilityShaderLayer = 69, CapabilityShaderViewportIndex = 70, + CapabilityFragmentShadingRateKHR = 4422, CapabilitySubgroupBallotKHR = 4423, CapabilityDrawParameters = 4427, CapabilitySubgroupVoteKHR = 4431, @@ -894,12 +898,15 @@ enum Capability { CapabilityRoundingModeRTE = 4467, CapabilityRoundingModeRTZ = 4468, CapabilityRayQueryProvisionalKHR = 4471, - CapabilityRayTraversalPrimitiveCullingProvisionalKHR = 4478, + CapabilityRayQueryKHR = 4472, + CapabilityRayTraversalPrimitiveCullingKHR = 4478, + CapabilityRayTracingKHR = 4479, CapabilityFloat16ImageAMD = 5008, CapabilityImageGatherBiasLodAMD = 5009, CapabilityFragmentMaskAMD = 5010, CapabilityStencilExportEXT = 5013, CapabilityImageReadWriteLodAMD = 5015, + CapabilityInt64ImageEXT = 5016, CapabilityShaderClockKHR = 5055, CapabilitySampleMaskOverrideCoverageNV = 5249, CapabilityGeometryShaderPassthroughNV = 5251, @@ -1024,6 +1031,22 @@ enum RayQueryCandidateIntersectionType { RayQueryCandidateIntersectionTypeMax = 0x7fffffff, }; +enum FragmentShadingRateShift { + FragmentShadingRateVertical2PixelsShift = 0, + FragmentShadingRateVertical4PixelsShift = 1, + FragmentShadingRateHorizontal2PixelsShift = 2, + FragmentShadingRateHorizontal4PixelsShift = 3, + FragmentShadingRateMax = 0x7fffffff, +}; + +enum FragmentShadingRateMask { + FragmentShadingRateMaskNone = 0, + FragmentShadingRateVertical2PixelsMask = 0x00000001, + FragmentShadingRateVertical4PixelsMask = 0x00000002, + FragmentShadingRateHorizontal2PixelsMask = 0x00000004, + FragmentShadingRateHorizontal4PixelsMask = 0x00000008, +}; + enum Op { OpNop = 0, OpUndef = 1, @@ -1369,13 +1392,19 @@ enum Op { OpPtrEqual = 401, OpPtrNotEqual = 402, OpPtrDiff = 403, + OpTerminateInvocation = 4416, OpSubgroupBallotKHR = 4421, OpSubgroupFirstInvocationKHR = 4422, OpSubgroupAllKHR = 4428, OpSubgroupAnyKHR = 4429, OpSubgroupAllEqualKHR = 4430, OpSubgroupReadInvocationKHR = 4432, - OpTypeRayQueryProvisionalKHR = 4472, + OpTraceRayKHR = 4445, + OpExecuteCallableKHR = 4446, + OpConvertUToAccelerationStructureKHR = 4447, + OpIgnoreIntersectionKHR = 4448, + OpTerminateRayKHR = 4449, + OpTypeRayQueryKHR = 4472, OpRayQueryInitializeKHR = 4473, OpRayQueryTerminateKHR = 4474, OpRayQueryGenerateIntersectionKHR = 4475, @@ -1398,15 +1427,11 @@ enum Op { OpWritePackedPrimitiveIndices4x8NV = 5299, OpReportIntersectionKHR = 5334, OpReportIntersectionNV = 5334, - OpIgnoreIntersectionKHR = 5335, OpIgnoreIntersectionNV = 5335, - OpTerminateRayKHR = 5336, OpTerminateRayNV = 5336, OpTraceNV = 5337, - OpTraceRayKHR = 5337, OpTypeAccelerationStructureKHR = 5341, OpTypeAccelerationStructureNV = 5341, - OpExecuteCallableKHR = 5344, OpExecuteCallableNV = 5344, OpTypeCooperativeMatrixNV = 5358, OpCooperativeMatrixLoadNV = 5359, @@ -1939,13 +1964,19 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) { case OpPtrEqual: *hasResult = true; *hasResultType = true; break; case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break; case OpPtrDiff: *hasResult = true; *hasResultType = true; break; + case OpTerminateInvocation: *hasResult = false; *hasResultType = false; break; case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; - case OpTypeRayQueryProvisionalKHR: *hasResult = true; *hasResultType = false; break; + case OpTraceRayKHR: *hasResult = false; *hasResultType = false; break; + case OpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break; + case OpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayKHR: *hasResult = false; *hasResultType = false; break; + case OpTypeRayQueryKHR: *hasResult = true; *hasResultType = false; break; case OpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; case OpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; case OpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; @@ -2164,6 +2195,7 @@ inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } inline RayFlagsMask operator|(RayFlagsMask a, RayFlagsMask b) { return RayFlagsMask(unsigned(a) | unsigned(b)); } +inline FragmentShadingRateMask operator|(FragmentShadingRateMask a, FragmentShadingRateMask b) { return FragmentShadingRateMask(unsigned(a) | unsigned(b)); } } // end namespace spv diff --git a/thirdparty/glslang/SPIRV/spvIR.h b/thirdparty/glslang/SPIRV/spvIR.h index 868b9bf82d..486e80d000 100644 --- a/thirdparty/glslang/SPIRV/spvIR.h +++ b/thirdparty/glslang/SPIRV/spvIR.h @@ -263,6 +263,7 @@ public: case OpBranchConditional: case OpSwitch: case OpKill: + case OpTerminateInvocation: case OpReturn: case OpReturnValue: case OpUnreachable: diff --git a/thirdparty/glslang/glslang/Include/BaseTypes.h b/thirdparty/glslang/glslang/Include/BaseTypes.h index b69eaebf2d..55bdd25da5 100644 --- a/thirdparty/glslang/glslang/Include/BaseTypes.h +++ b/thirdparty/glslang/glslang/Include/BaseTypes.h @@ -228,6 +228,9 @@ enum TBuiltInVariable { EbvViewIndex, EbvDeviceIndex, + EbvShadingRateKHR, + EbvPrimitiveShadingRateKHR, + EbvFragSizeEXT, EbvFragInvocationCountEXT, @@ -480,6 +483,9 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v) case EbvWarpID: return "WarpIDNV"; case EbvSMID: return "SMIDNV"; + case EbvShadingRateKHR: return "ShadingRateKHR"; + case EbvPrimitiveShadingRateKHR: return "PrimitiveShadingRateKHR"; + default: return "unknown built-in variable"; } } diff --git a/thirdparty/glslang/glslang/Include/Types.h b/thirdparty/glslang/glslang/Include/Types.h index 235ea3f1d5..696daf6dfa 100644 --- a/thirdparty/glslang/glslang/Include/Types.h +++ b/thirdparty/glslang/glslang/Include/Types.h @@ -406,6 +406,7 @@ enum TLayoutFormat { ElfRg8i, ElfR16i, ElfR8i, + ElfR64i, ElfIntGuard, // to help with comparisons @@ -423,6 +424,7 @@ enum TLayoutFormat { ElfRg8ui, ElfR16ui, ElfR8ui, + ElfR64ui, ElfCount }; @@ -755,6 +757,12 @@ public: bool isPerPrimitive() const { return perPrimitiveNV; } bool isPerView() const { return perViewNV; } bool isTaskMemory() const { return perTaskNV; } + bool isAnyPayload() const { + return storage == EvqPayload || storage == EvqPayloadIn; + } + bool isAnyCallable() const { + return storage == EvqCallableData || storage == EvqCallableDataIn; + } // True if this type of IO is supposed to be arrayed with extra level for per-vertex data bool isArrayedIo(EShLanguage language) const @@ -1117,6 +1125,8 @@ public: case ElfR32ui: return "r32ui"; case ElfR16ui: return "r16ui"; case ElfR8ui: return "r8ui"; + case ElfR64ui: return "r64ui"; + case ElfR64i: return "r64i"; default: return "none"; } } @@ -1986,6 +1996,7 @@ public: case EbtAccStruct: return "accelerationStructureNV"; case EbtRayQuery: return "rayQueryEXT"; case EbtReference: return "reference"; + case EbtString: return "string"; #endif default: return "unknown type"; } diff --git a/thirdparty/glslang/glslang/Include/intermediate.h b/thirdparty/glslang/glslang/Include/intermediate.h index 30cb6fb171..19cd32e9a8 100644 --- a/thirdparty/glslang/glslang/Include/intermediate.h +++ b/thirdparty/glslang/glslang/Include/intermediate.h @@ -280,6 +280,12 @@ enum TOperator { EOpConvUvec2ToPtr, EOpConvPtrToUvec2, + // uint64_t -> accelerationStructureEXT + EOpConvUint64ToAccStruct, + + // uvec2 -> accelerationStructureEXT + EOpConvUvec2ToAccStruct, + // // binary operations // @@ -628,13 +634,16 @@ enum TOperator { // Branch // - EOpKill, // Fragment only + EOpKill, // Fragment only + EOpTerminateInvocation, // Fragment only + EOpDemote, // Fragment only + EOpTerminateRayKHR, // Any-hit only + EOpIgnoreIntersectionKHR, // Any-hit only EOpReturn, EOpBreak, EOpContinue, EOpCase, EOpDefault, - EOpDemote, // Fragment only // // Constructors @@ -751,6 +760,7 @@ enum TOperator { EOpConstructNonuniform, // expected to be transformed away, not present in final AST EOpConstructReference, EOpConstructCooperativeMatrix, + EOpConstructAccStruct, EOpConstructGuardEnd, // @@ -911,11 +921,13 @@ enum TOperator { EOpAverageRounded, EOpMul32x16, - EOpTrace, + EOpTraceNV, + EOpTraceKHR, EOpReportIntersection, - EOpIgnoreIntersection, - EOpTerminateRay, - EOpExecuteCallable, + EOpIgnoreIntersectionNV, + EOpTerminateRayNV, + EOpExecuteCallableNV, + EOpExecuteCallableKHR, EOpWritePackedPrimitiveIndices4x8NV, // @@ -1282,6 +1294,8 @@ public: TIntermTyped* getConstSubtree() const { return constSubtree; } #ifndef GLSLANG_WEB void setFlattenSubset(int subset) { flattenSubset = subset; } + virtual const TString& getAccessName() const; + int getFlattenSubset() const { return flattenSubset; } // -1 means full object #endif diff --git a/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp b/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp index 8d5d04fb43..a5ef6ccaf6 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp @@ -509,6 +509,8 @@ TBuiltIns::TBuiltIns() prefixes[EbtUint8] = "u8"; prefixes[EbtInt16] = "i16"; prefixes[EbtUint16] = "u16"; + prefixes[EbtInt64] = "i64"; + prefixes[EbtUint64] = "u64"; #endif postfixes[2] = "2"; @@ -4419,9 +4421,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "\n"); stageBuiltins[EShLangAnyHit].append( "void ignoreIntersectionNV();" - "void ignoreIntersectionEXT();" "void terminateRayNV();" - "void terminateRayEXT();" "\n"); stageBuiltins[EShLangClosestHit].append( "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" @@ -4928,6 +4928,11 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "\n"); } + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangVertex].append( + "out highp int gl_PrimitiveShadingRateEXT;" // GL_EXT_fragment_shading_rate + "\n"); + } //============================================================================ // @@ -5041,6 +5046,12 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "\n"); } + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangGeometry].append( + "out highp int gl_PrimitiveShadingRateEXT;" // GL_EXT_fragment_shading_rate + "\n"); + } + //============================================================================ // // Define the interface to the tessellation control shader. @@ -5338,6 +5349,11 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "in vec3 gl_BaryCoordNoPerspNV;" ); + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "flat in int gl_ShadingRateEXT;" // GL_EXT_fragment_shading_rate + ); + } else { // ES profile @@ -5396,6 +5412,10 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "in vec3 gl_BaryCoordNV;" "in vec3 gl_BaryCoordNoPerspNV;" ); + if (version >= 310) + stageBuiltins[EShLangFragment].append( + "flat in highp int gl_ShadingRateEXT;" // GL_EXT_fragment_shading_rate + ); } #endif @@ -5414,6 +5434,12 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "\n"); } + if (version >= 300 /* both ES and non-ES */) { + stageBuiltins[EShLangFragment].append( + "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 + "\n"); + } + #ifndef GLSLANG_ANGLE // GL_ARB_shader_ballot if (profile != EEsProfile && version >= 450) { @@ -5426,6 +5452,15 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "in uint64_t gl_SubGroupLeMaskARB;" "in uint64_t gl_SubGroupLtMaskARB;" "\n"; + const char* rtBallotDecls = + "uniform volatile uint gl_SubGroupSizeARB;" + "in volatile uint gl_SubGroupInvocationARB;" + "in volatile uint64_t gl_SubGroupEqMaskARB;" + "in volatile uint64_t gl_SubGroupGeMaskARB;" + "in volatile uint64_t gl_SubGroupGtMaskARB;" + "in volatile uint64_t gl_SubGroupLeMaskARB;" + "in volatile uint64_t gl_SubGroupLtMaskARB;" + "\n"; const char* fragmentBallotDecls = "uniform uint gl_SubGroupSizeARB;" "flat in uint gl_SubGroupInvocationARB;" @@ -5443,6 +5478,13 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV stageBuiltins[EShLangFragment] .append(fragmentBallotDecls); stageBuiltins[EShLangMeshNV] .append(ballotDecls); stageBuiltins[EShLangTaskNV] .append(ballotDecls); + stageBuiltins[EShLangRayGen] .append(rtBallotDecls); + stageBuiltins[EShLangIntersect] .append(rtBallotDecls); + // No volatile qualifier on these builtins in any-hit + stageBuiltins[EShLangAnyHit] .append(ballotDecls); + stageBuiltins[EShLangClosestHit] .append(rtBallotDecls); + stageBuiltins[EShLangMiss] .append(rtBallotDecls); + stageBuiltins[EShLangCallable] .append(rtBallotDecls); } // GL_KHR_shader_subgroup @@ -5480,6 +5522,21 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "in highp uint gl_NumSubgroups;" "in highp uint gl_SubgroupID;" "\n"; + // These builtins are volatile for RT stages + const char* rtSubgroupDecls = + "in mediump volatile uint gl_SubgroupSize;" + "in mediump volatile uint gl_SubgroupInvocationID;" + "in highp volatile uvec4 gl_SubgroupEqMask;" + "in highp volatile uvec4 gl_SubgroupGeMask;" + "in highp volatile uvec4 gl_SubgroupGtMask;" + "in highp volatile uvec4 gl_SubgroupLeMask;" + "in highp volatile uvec4 gl_SubgroupLtMask;" + // GL_NV_shader_sm_builtins + "in highp uint gl_WarpsPerSMNV;" + "in highp uint gl_SMCountNV;" + "in highp volatile uint gl_WarpIDNV;" + "in highp volatile uint gl_SMIDNV;" + "\n"; stageBuiltins[EShLangVertex] .append(subgroupDecls); stageBuiltins[EShLangTessControl] .append(subgroupDecls); @@ -5492,12 +5549,13 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV stageBuiltins[EShLangMeshNV] .append(computeSubgroupDecls); stageBuiltins[EShLangTaskNV] .append(subgroupDecls); stageBuiltins[EShLangTaskNV] .append(computeSubgroupDecls); - stageBuiltins[EShLangRayGen] .append(subgroupDecls); - stageBuiltins[EShLangIntersect] .append(subgroupDecls); + stageBuiltins[EShLangRayGen] .append(rtSubgroupDecls); + stageBuiltins[EShLangIntersect] .append(rtSubgroupDecls); + // No volatile qualifier on these builtins in any-hit stageBuiltins[EShLangAnyHit] .append(subgroupDecls); - stageBuiltins[EShLangClosestHit] .append(subgroupDecls); - stageBuiltins[EShLangMiss] .append(subgroupDecls); - stageBuiltins[EShLangCallable] .append(subgroupDecls); + stageBuiltins[EShLangClosestHit] .append(rtSubgroupDecls); + stageBuiltins[EShLangMiss] .append(rtSubgroupDecls); + stageBuiltins[EShLangCallable] .append(rtSubgroupDecls); } // GL_NV_ray_tracing/GL_EXT_ray_tracing @@ -5565,7 +5623,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "in float gl_RayTminNV;" "in float gl_RayTminEXT;" "in float gl_RayTmaxNV;" - "in float gl_RayTmaxEXT;" + "in volatile float gl_RayTmaxEXT;" "in mat4x3 gl_ObjectToWorldNV;" "in mat4x3 gl_ObjectToWorldEXT;" "in mat3x4 gl_ObjectToWorld3x4EXT;" @@ -5685,13 +5743,57 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n"); } -#endif // !GLSLANG_ANGLE - - if (version >= 300 /* both ES and non-ES */) { - stageBuiltins[EShLangFragment].append( - "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 - "\n"); + // Adding these to common built-ins triggers an assert due to a memory corruption in related code when testing + // So instead add to each stage individually, avoiding the GLSLang bug + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) { + for (int stage=EShLangVertex; stage<EShLangCount; stage++) + { + stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag2VerticalPixelsEXT = 1;\n"); + stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag4VerticalPixelsEXT = 2;\n"); + stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag2HorizontalPixelsEXT = 4;\n"); + stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag4HorizontalPixelsEXT = 8;\n"); + } + } + + // GL_EXT_shader_image_int64 + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) { + + const TBasicType bTypes[] = { EbtInt64, EbtUint64 }; + for (int ms = 0; ms <= 1; ++ms) { // loop over "bool" multisample or not + for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not + for (int dim = Esd1D; dim < EsdSubpass; ++dim) { // 1D, ..., buffer + if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile) + continue; + + if ((dim == Esd3D || dim == EsdRect || dim == EsdBuffer) && arrayed) + continue; + + if (dim != Esd2D && ms) + continue; + + // Loop over the bTypes + for (size_t bType = 0; bType < sizeof(bTypes)/sizeof(TBasicType); ++bType) { + // + // Now, make all the function prototypes for the type we just built... + // + TSampler sampler; + + sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false, + false, + ms ? true : false); + + TString typeName = sampler.getString(); + + addQueryFunctions(sampler, typeName, version, profile); + addImageFunctions(sampler, typeName, version, profile); + } + } + } + } } +#endif // !GLSLANG_ANGLE + #endif // !GLSLANG_WEB // printf("%s\n", commonBuiltins.c_str()); @@ -5788,7 +5890,6 @@ void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, c #endif if (shadow && (bTypes[bType] == EbtInt || bTypes[bType] == EbtUint)) continue; - // // Now, make all the function prototypes for the type we just built... // @@ -6013,8 +6114,16 @@ void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int if ( profile != EEsProfile || (profile == EEsProfile && version >= 310)) { - if (sampler.type == EbtInt || sampler.type == EbtUint) { - const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint"; + if (sampler.type == EbtInt || sampler.type == EbtUint || sampler.type == EbtInt64 || sampler.type == EbtUint64 ) { + + const char* dataType; + switch (sampler.type) { + case(EbtInt): dataType = "highp int"; break; + case(EbtUint): dataType = "highp uint"; break; + case(EbtInt64): dataType = "highp int64_t"; break; + case(EbtUint64): dataType = "highp uint64_t"; break; + default: dataType = ""; + } const int numBuiltins = 7; @@ -7650,6 +7759,20 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); } + + if (language == EShLangGeometry || language == EShLangVertex) { + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_PrimitiveShadingRateEXT", 1, &E_GL_EXT_fragment_shading_rate); + BuiltInVariable("gl_PrimitiveShadingRateEXT", EbvPrimitiveShadingRateKHR, symbolTable); + + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } + } + #endif // !GLSLANG_WEB break; @@ -8156,6 +8279,17 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion } symbolTable.setFunctionExtensions("helperInvocationEXT", 1, &E_GL_EXT_demote_to_helper_invocation); + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_ShadingRateEXT", 1, &E_GL_EXT_fragment_shading_rate); + BuiltInVariable("gl_ShadingRateEXT", EbvShadingRateKHR, symbolTable); + + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } #endif // !GLSLANG_WEB break; @@ -8288,6 +8422,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives); symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives); } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } #endif // !GLSLANG_WEB break; @@ -8342,9 +8484,7 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion symbolTable.setFunctionExtensions("reportIntersectionNV", 1, &E_GL_NV_ray_tracing); symbolTable.setFunctionExtensions("reportIntersectionEXT", 1, &E_GL_EXT_ray_tracing); symbolTable.setFunctionExtensions("ignoreIntersectionNV", 1, &E_GL_NV_ray_tracing); - symbolTable.setFunctionExtensions("ignoreIntersectionEXT", 1, &E_GL_EXT_ray_tracing); symbolTable.setFunctionExtensions("terminateRayNV", 1, &E_GL_NV_ray_tracing); - symbolTable.setFunctionExtensions("terminateRayEXT", 1, &E_GL_EXT_ray_tracing); symbolTable.setFunctionExtensions("executeCallableNV", 1, &E_GL_NV_ray_tracing); symbolTable.setFunctionExtensions("executeCallableEXT", 1, &E_GL_EXT_ray_tracing); @@ -8437,6 +8577,13 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); } + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } break; case EShLangMeshNV: @@ -8581,6 +8728,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } break; case EShLangTaskNV: @@ -8681,6 +8836,13 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); } + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 450)) { + symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate); + } break; #endif @@ -9152,10 +9314,10 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion case EShLangClosestHit: case EShLangMiss: if (profile != EEsProfile && version >= 460) { - symbolTable.relateToOperator("traceNV", EOpTrace); - symbolTable.relateToOperator("traceRayEXT", EOpTrace); - symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); - symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + symbolTable.relateToOperator("traceNV", EOpTraceNV); + symbolTable.relateToOperator("traceRayEXT", EOpTraceKHR); + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallableKHR); } break; case EShLangIntersect: @@ -9166,16 +9328,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion break; case EShLangAnyHit: if (profile != EEsProfile && version >= 460) { - symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersection); - symbolTable.relateToOperator("ignoreIntersectionEXT", EOpIgnoreIntersection); - symbolTable.relateToOperator("terminateRayNV", EOpTerminateRay); - symbolTable.relateToOperator("terminateRayEXT", EOpTerminateRay); + symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersectionNV); + symbolTable.relateToOperator("terminateRayNV", EOpTerminateRayNV); } break; case EShLangCallable: if (profile != EEsProfile && version >= 460) { - symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); - symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallableKHR); } break; case EShLangMeshNV: diff --git a/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp b/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp index f46010b712..553b1b5ff8 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp @@ -71,6 +71,13 @@ void TIntermConstantUnion::traverse(TIntermTraverser *it) it->visitConstantUnion(this); } +const TString& TIntermSymbol::getAccessName() const { + if (getBasicType() == EbtBlock) + return getType().getTypeName(); + else + return getName(); +} + // // Traverse a binary node. // diff --git a/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp b/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp index b8c220d781..f6172a2bf7 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp @@ -2298,6 +2298,10 @@ TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const case EbtReference: op = EOpConstructReference; break; + + case EbtAccStruct: + op = EOpConstructAccStruct; + break; #endif default: break; diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp b/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp index b46400914c..3efa27aca3 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp @@ -127,22 +127,6 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, { TIntermBinary* binaryNode = node->getAsBinaryNode(); - if (binaryNode) { - switch(binaryNode->getOp()) { - case EOpIndexDirect: - case EOpIndexIndirect: // fall through - case EOpIndexDirectStruct: // fall through - case EOpVectorSwizzle: - case EOpMatrixSwizzle: - return lValueErrorCheck(loc, op, binaryNode->getLeft()); - default: - break; - } - error(loc, " l-value required", op, "", ""); - - return true; - } - const char* symbol = nullptr; TIntermSymbol* symNode = node->getAsSymbolNode(); if (symNode != nullptr) @@ -203,15 +187,40 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, // Everything else is okay, no error. // if (message == nullptr) + { + if (binaryNode) { + switch (binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: // fall through + case EOpIndexDirectStruct: // fall through + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + return lValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + error(loc, " l-value required", op, "", ""); + + return true; + } return false; + } // // If we get here, we have an error and a message. // + const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true); + if (symNode) error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message); else - error(loc, " l-value required", op, "(%s)", message); + if (binaryNode && binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct) + if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName())) + error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str(), message); + else + error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getName().c_str(), message); + else + error(loc, " l-value required", op, "(%s)", message); return true; } @@ -219,28 +228,41 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, // Test for and give an error if the node can't be read from. void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) { + TIntermBinary* binaryNode = node->getAsBinaryNode(); + const TIntermSymbol* symNode = node->getAsSymbolNode(); + if (! node) return; - TIntermBinary* binaryNode = node->getAsBinaryNode(); - if (binaryNode) { - switch(binaryNode->getOp()) { - case EOpIndexDirect: - case EOpIndexIndirect: - case EOpIndexDirectStruct: - case EOpVectorSwizzle: - case EOpMatrixSwizzle: - rValueErrorCheck(loc, op, binaryNode->getLeft()); - default: - break; - } + if (node->getQualifier().isWriteOnly()) { + const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true); + + if (symNode != nullptr) + error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str()); + else if (binaryNode && + (binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct || + binaryNode->getAsOperator()->getOp() == EOpIndexDirect)) + if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName())) + error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str()); + else + error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getName().c_str()); + else + error(loc, "can't read from writeonly object: ", op, ""); - return; + } else { + if (binaryNode) { + switch (binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + rValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + } } - - TIntermSymbol* symNode = node->getAsSymbolNode(); - if (symNode && symNode->getQualifier().isWriteOnly()) - error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str()); } // Add 'symbol' to the list of deferred linkage symbols, which diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp index 86a5a37866..9c42a204f7 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp @@ -2076,13 +2076,31 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan } #ifndef GLSLANG_WEB - case EOpTrace: + case EOpTraceNV: if (!(*argp)[10]->getAsConstantUnion()) - error(loc, "argument must be compile-time constant", "payload number", ""); + error(loc, "argument must be compile-time constant", "payload number", "a"); break; - case EOpExecuteCallable: + case EOpTraceKHR: + if (!(*argp)[10]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "payload number", "a"); + else { + unsigned int location = (*argp)[10]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst(); + if (intermediate.checkLocationRT(0, location) < 0) + error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location); + } + break; + case EOpExecuteCallableNV: + if (!(*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "callable data number", ""); + break; + case EOpExecuteCallableKHR: if (!(*argp)[1]->getAsConstantUnion()) error(loc, "argument must be compile-time constant", "callable data number", ""); + else { + unsigned int location = (*argp)[1]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst(); + if (intermediate.checkLocationRT(1, location) < 0) + error(loc, "with layout(location =", "no callableDataEXT/callableDataInEXT declared", "%d)", location); + } break; case EOpRayQueryGetIntersectionType: @@ -2121,9 +2139,15 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan { // Make sure the image types have the correct layout() format and correct argument types const TType& imageType = arg0->getType(); - if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { - if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) + if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint || + imageType.getSampler().type == EbtInt64 || imageType.getSampler().type == EbtUint64) { + if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui && + imageType.getQualifier().getFormat() != ElfR64i && imageType.getQualifier().getFormat() != ElfR64ui) error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); + if (callNode.getType().getBasicType() == EbtInt64 && imageType.getQualifier().getFormat() != ElfR64i) + error(loc, "only supported on image with format r64i", fnCandidate.getName().c_str(), ""); + else if (callNode.getType().getBasicType() == EbtUint64 && imageType.getQualifier().getFormat() != ElfR64ui) + error(loc, "only supported on image with format r64ui", fnCandidate.getName().c_str(), ""); } else { bool isImageAtomicOnFloatAllowed = ((fnCandidate.getName().compare(0, 14, "imageAtomicAdd") == 0) || (fnCandidate.getName().compare(0, 15, "imageAtomicLoad") == 0) || @@ -3368,7 +3392,7 @@ void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& t // void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType) { - globalQualifierFixCheck(publicType.loc, publicType.qualifier); + globalQualifierFixCheck(publicType.loc, publicType.qualifier, true); checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers); if (publicType.qualifier.isNonUniform()) { error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", ""); @@ -3379,7 +3403,7 @@ void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType) // // Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level. // -void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier) +void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier, bool isMemberCheck) { bool nonuniformOkay = false; @@ -3404,6 +3428,16 @@ void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& q case EvqTemporary: nonuniformOkay = true; break; + case EvqUniform: + // According to GLSL spec: The std430 qualifier is supported only for shader storage blocks; a shader using + // the std430 qualifier on a uniform block will fail to compile. + // Only check the global declaration: layout(std430) uniform; + if (blockName == nullptr && + qualifier.layoutPacking == ElpStd430) + { + error(loc, "it is invalid to declare std430 qualifier on uniform", "", ""); + } + break; default: break; } @@ -3411,7 +3445,9 @@ void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& q if (!nonuniformOkay && qualifier.isNonUniform()) error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", ""); - invariantCheck(loc, qualifier); + // Storage qualifier isn't ready for memberQualifierCheck, we should skip invariantCheck for it. + if (!isMemberCheck || structNestingLevel > 0) + invariantCheck(loc, qualifier); } // @@ -3422,7 +3458,7 @@ void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQuali if (! symbolTable.atGlobalLevel()) return; - if (!(publicType.userDef && publicType.userDef->isReference())) { + if (!(publicType.userDef && publicType.userDef->isReference()) && !parsingBuiltins) { if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) { error(loc, "memory qualifiers cannot be used on this type", "", ""); } else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) { @@ -4083,6 +4119,9 @@ void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermType if (isRuntimeLength(base)) return; + if (base.getType().getQualifier().builtIn == EbvSampleMask) + return; + // Check for last member of a bufferreference type, which is runtime sizeable // but doesn't support runtime length if (base.getType().getQualifier().storage == EvqBuffer) { @@ -4226,6 +4265,8 @@ TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TS (identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) || identifier == "gl_ClipDistance" || identifier == "gl_CullDistance" || + identifier == "gl_ShadingRateEXT" || + identifier == "gl_PrimitiveShadingRateEXT" || identifier == "gl_FrontColor" || identifier == "gl_BackColor" || identifier == "gl_FrontSecondaryColor" || @@ -4632,14 +4673,14 @@ void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& quali void TParseContext::nestedBlockCheck(const TSourceLoc& loc) { - if (structNestingLevel > 0) + if (structNestingLevel > 0 || blockNestingLevel > 0) error(loc, "cannot nest a block definition inside a structure or block", "", ""); - ++structNestingLevel; + ++blockNestingLevel; } void TParseContext::nestedStructCheck(const TSourceLoc& loc) { - if (structNestingLevel > 0) + if (structNestingLevel > 0 || blockNestingLevel > 0) error(loc, "cannot nest a structure definition inside a structure or block", "", ""); ++structNestingLevel; } @@ -6535,13 +6576,15 @@ void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType error(loc, "atomic_uint binding is too large", "binding", ""); return; } - - if(publicType.qualifier.hasOffset()) { + if (publicType.qualifier.hasOffset()) atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset; - } return; } + if (publicType.arraySizes) { + error(loc, "expect an array name", "", ""); + } + if (publicType.qualifier.hasLayout() && !publicType.qualifier.hasBufferReference()) warn(loc, "useless application of layout qualifier", "layout", ""); #endif @@ -6632,6 +6675,22 @@ TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& iden if (type.getQualifier().storage == EvqShared && type.containsCoopMat()) error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", ""); + if (profile == EEsProfile) { + if (type.getQualifier().isPipeInput() && type.getBasicType() == EbtStruct) { + if (type.getQualifier().isArrayedIo(language)) { + TType perVertexType(type, 0); + if (perVertexType.containsArray() && perVertexType.containsBuiltIn() == false) { + error(loc, "A per vertex structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), ""); + } + } + else if (type.containsArray() && type.containsBuiltIn() == false) { + error(loc, "A structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), ""); + } + if (type.containsStructure()) + error(loc, "A structure containing an struct is not allowed as input in ES", type.getTypeName().c_str(), ""); + } + } + if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger)) error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", ""); if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.getDepth() != EldNone) @@ -6954,6 +7013,15 @@ TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str()); return nullptr; } + TBasicType destType = type.getBasicType(); + for (int i = 0; i < type.getVectorSize(); ++i) { + TBasicType initType = initList->getSequence()[i]->getAsTyped()->getBasicType(); + if (destType != initType && !intermediate.canImplicitlyPromote(initType, destType)) { + error(loc, "type mismatch in initializer list", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + + } } else { error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str()); return nullptr; @@ -7410,6 +7478,19 @@ TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, T return node; + case EOpConstructAccStruct: + if ((node->getType().isScalar() && node->getType().getBasicType() == EbtUint64)) { + // construct acceleration structure from uint64 + requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "uint64_t conversion to acclerationStructureEXT"); + return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToAccStruct, true, node, + type); + } else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint && node->getVectorSize() == 2) { + // construct acceleration structure from uint64 + requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "uvec2 conversion to accelerationStructureEXT"); + return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToAccStruct, true, node, + type); + } else + return nullptr; #endif // GLSLANG_WEB default: @@ -7490,10 +7571,10 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con TType& memberType = *typeList[member].type; TQualifier& memberQualifier = memberType.getQualifier(); const TSourceLoc& memberLoc = typeList[member].loc; - globalQualifierFixCheck(memberLoc, memberQualifier); if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage) error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), ""); memberQualifier.storage = currentBlockQualifier.storage; + globalQualifierFixCheck(memberLoc, memberQualifier); #ifndef GLSLANG_WEB inheritMemoryQualifiers(currentBlockQualifier, memberQualifier); if (currentBlockQualifier.perPrimitiveNV) @@ -8191,7 +8272,7 @@ void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qual bool pipeOut = qualifier.isPipeOutput(); bool pipeIn = qualifier.isPipeInput(); - if (version >= 300 || (!isEsProfile() && version >= 420)) { + if ((version >= 300 && isEsProfile()) || (!isEsProfile() && version >= 420)) { if (! pipeOut) error(loc, "can only apply to an output", "invariant", ""); } else { diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h index 0f09adaffc..fe2b6fbbe1 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h +++ b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h @@ -83,7 +83,7 @@ public: : TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages), scopeMangler("::"), symbolTable(symbolTable), - statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0), + statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), blockNestingLevel(0), controlFlowNestingLevel(0), currentFunctionType(nullptr), postEntryPointReturn(false), contextPragma(true, false), @@ -178,7 +178,8 @@ public: TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile int statementNestingLevel; // 0 if outside all flow control or compound statements int loopNestingLevel; // 0 if outside all loops - int structNestingLevel; // 0 if outside blocks and structures + int structNestingLevel; // 0 if outside structures + int blockNestingLevel; // 0 if outside blocks int controlFlowNestingLevel; // 0 if outside all flow control const TType* currentFunctionType; // the return type of the function that's currently being parsed bool functionReturnsValue; // true if a non-void function has a return @@ -365,7 +366,7 @@ public: void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier); void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier); void memberQualifierCheck(glslang::TPublicType&); - void globalQualifierFixCheck(const TSourceLoc&, TQualifier&); + void globalQualifierFixCheck(const TSourceLoc&, TQualifier&, bool isMemberCheck = false); void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&); bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType); void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force); diff --git a/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp b/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp index bd3181a74f..78c8a365d7 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp @@ -365,6 +365,9 @@ void TScanContext::fillInKeywordMap() (*KeywordMap)["if"] = IF; (*KeywordMap)["else"] = ELSE; (*KeywordMap)["discard"] = DISCARD; + (*KeywordMap)["terminateInvocation"] = TERMINATE_INVOCATION; + (*KeywordMap)["terminateRayEXT"] = TERMINATE_RAY; + (*KeywordMap)["ignoreIntersectionEXT"] = IGNORE_INTERSECTION; (*KeywordMap)["return"] = RETURN; (*KeywordMap)["void"] = VOID; (*KeywordMap)["bool"] = BOOL; @@ -471,6 +474,28 @@ void TScanContext::fillInKeywordMap() (*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY; (*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY; (*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY; + (*KeywordMap)["i64image1D"] = I64IMAGE1D; + (*KeywordMap)["u64image1D"] = U64IMAGE1D; + (*KeywordMap)["i64image2D"] = I64IMAGE2D; + (*KeywordMap)["u64image2D"] = U64IMAGE2D; + (*KeywordMap)["i64image3D"] = I64IMAGE3D; + (*KeywordMap)["u64image3D"] = U64IMAGE3D; + (*KeywordMap)["i64image2DRect"] = I64IMAGE2DRECT; + (*KeywordMap)["u64image2DRect"] = U64IMAGE2DRECT; + (*KeywordMap)["i64imageCube"] = I64IMAGECUBE; + (*KeywordMap)["u64imageCube"] = U64IMAGECUBE; + (*KeywordMap)["i64imageBuffer"] = I64IMAGEBUFFER; + (*KeywordMap)["u64imageBuffer"] = U64IMAGEBUFFER; + (*KeywordMap)["i64image1DArray"] = I64IMAGE1DARRAY; + (*KeywordMap)["u64image1DArray"] = U64IMAGE1DARRAY; + (*KeywordMap)["i64image2DArray"] = I64IMAGE2DARRAY; + (*KeywordMap)["u64image2DArray"] = U64IMAGE2DARRAY; + (*KeywordMap)["i64imageCubeArray"] = I64IMAGECUBEARRAY; + (*KeywordMap)["u64imageCubeArray"] = U64IMAGECUBEARRAY; + (*KeywordMap)["i64image2DMS"] = I64IMAGE2DMS; + (*KeywordMap)["u64image2DMS"] = U64IMAGE2DMS; + (*KeywordMap)["i64image2DMSArray"] = I64IMAGE2DMSARRAY; + (*KeywordMap)["u64image2DMSArray"] = U64IMAGE2DMSARRAY; (*KeywordMap)["double"] = DOUBLE; (*KeywordMap)["dvec2"] = DVEC2; (*KeywordMap)["dvec3"] = DVEC3; @@ -914,6 +939,17 @@ int TScanContext::tokenizeIdentifier() case CASE: return keyword; + case TERMINATE_INVOCATION: + if (!parseContext.extensionTurnedOn(E_GL_EXT_terminate_invocation)) + return identifierOrType(); + return keyword; + + case TERMINATE_RAY: + case IGNORE_INTERSECTION: + if (!parseContext.extensionTurnedOn(E_GL_EXT_ray_tracing)) + return identifierOrType(); + return keyword; + case BUFFER: afterBuffer = true; if ((parseContext.isEsProfile() && parseContext.version < 310) || @@ -982,7 +1018,7 @@ int TScanContext::tokenizeIdentifier() return keyword; case PACKED: if ((parseContext.isEsProfile() && parseContext.version < 300) || - (!parseContext.isEsProfile() && parseContext.version < 330)) + (!parseContext.isEsProfile() && parseContext.version < 140)) return reservedWord(); return identifierOrType(); @@ -1147,6 +1183,19 @@ int TScanContext::tokenizeIdentifier() afterType = true; return firstGenerationImage(false); + case I64IMAGE1D: + case U64IMAGE1D: + case I64IMAGE1DARRAY: + case U64IMAGE1DARRAY: + case I64IMAGE2DRECT: + case U64IMAGE2DRECT: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) { + return firstGenerationImage(false); + } + return identifierOrType(); + case IMAGEBUFFER: case IIMAGEBUFFER: case UIMAGEBUFFER: @@ -1155,6 +1204,18 @@ int TScanContext::tokenizeIdentifier() parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) return keyword; return firstGenerationImage(false); + + case I64IMAGEBUFFER: + case U64IMAGEBUFFER: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) { + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return firstGenerationImage(false); + } + return identifierOrType(); case IMAGE2D: case IIMAGE2D: @@ -1171,6 +1232,20 @@ int TScanContext::tokenizeIdentifier() afterType = true; return firstGenerationImage(true); + case I64IMAGE2D: + case U64IMAGE2D: + case I64IMAGE3D: + case U64IMAGE3D: + case I64IMAGECUBE: + case U64IMAGECUBE: + case I64IMAGE2DARRAY: + case U64IMAGE2DARRAY: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) + return firstGenerationImage(true); + return identifierOrType(); + case IMAGECUBEARRAY: case IIMAGECUBEARRAY: case UIMAGECUBEARRAY: @@ -1179,6 +1254,18 @@ int TScanContext::tokenizeIdentifier() parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) return keyword; return secondGenerationImage(); + + case I64IMAGECUBEARRAY: + case U64IMAGECUBEARRAY: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) { + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) + return keyword; + return secondGenerationImage(); + } + return identifierOrType(); case IMAGE2DMS: case IIMAGE2DMS: @@ -1188,6 +1275,17 @@ int TScanContext::tokenizeIdentifier() case UIMAGE2DMSARRAY: afterType = true; return secondGenerationImage(); + + case I64IMAGE2DMS: + case U64IMAGE2DMS: + case I64IMAGE2DMSARRAY: + case U64IMAGE2DMSARRAY: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) { + return secondGenerationImage(); + } + return identifierOrType(); case DOUBLE: case DVEC2: diff --git a/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp b/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp index 9d7f37b098..c6030bd7c2 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp @@ -2016,7 +2016,7 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages) intermediate[stage] = new TIntermediate(stage, firstIntermediate->getVersion(), firstIntermediate->getProfile()); - + intermediate[stage]->setLimits(firstIntermediate->getLimits()); // The new TIntermediate must use the same origin as the original TIntermediates. // Otherwise linking will fail due to different coordinate systems. diff --git a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp index 007f22c1b4..f6291c397d 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp @@ -85,6 +85,8 @@ void TType::buildMangledName(TString& mangledName) const #endif case EbtInt: mangledName += "i"; break; case EbtUint: mangledName += "u"; break; + case EbtInt64: mangledName += "i64"; break; + case EbtUint64: mangledName += "u64"; break; default: break; // some compilers want this } if (sampler.isImageClass()) @@ -146,6 +148,8 @@ void TType::buildMangledName(TString& mangledName) const if (typeName) mangledName += *typeName; for (unsigned int i = 0; i < structure->size(); ++i) { + if ((*structure)[i].type->getBasicType() == EbtVoid) + continue; mangledName += '-'; (*structure)[i].type->buildMangledName(mangledName); } diff --git a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h index ec4bc3c599..db16c19bca 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h +++ b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h @@ -613,20 +613,24 @@ public: // protected: static const int globalLevel = 3; - bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels - bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals - bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals + static bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels + static bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals + static bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals public: bool isEmpty() { return table.size() == 0; } bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); } bool atGlobalLevel() { return isGlobalLevel(currentLevel()); } - + static bool isBuiltInSymbol(int uniqueId) { + int level = uniqueId >> LevelFlagBitOffset; + return isBuiltInLevel(level); + } void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; } void setSeparateNameSpaces() { separateNameSpaces = true; } void push() { table.push_back(new TSymbolTableLevel); + updateUniqueIdLevelFlag(); } // Make a new symbol-table level to represent the scope introduced by a structure @@ -639,6 +643,7 @@ public: { assert(thisSymbol.getName().size() == 0); table.push_back(new TSymbolTableLevel); + updateUniqueIdLevelFlag(); table.back()->setThisLevel(); insert(thisSymbol); } @@ -648,6 +653,7 @@ public: table[currentLevel()]->getPreviousDefaultPrecisions(p); delete table.back(); table.pop_back(); + updateUniqueIdLevelFlag(); } // @@ -867,12 +873,20 @@ public: table[level]->readOnly(); } + // Add current level in the high-bits of unique id + void updateUniqueIdLevelFlag() { + // clamp level to avoid overflow + uint32_t level = currentLevel() > 7 ? 7 : currentLevel(); + uniqueId &= ((1 << LevelFlagBitOffset) - 1); + uniqueId |= (level << LevelFlagBitOffset); + } + protected: TSymbolTable(TSymbolTable&); TSymbolTable& operator=(TSymbolTableLevel&); int currentLevel() const { return static_cast<int>(table.size()) - 1; } - + static const uint32_t LevelFlagBitOffset = 28; std::vector<TSymbolTableLevel*> table; int uniqueId; // for unique identification in code generation bool noBuiltInRedeclarations; diff --git a/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp b/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp index 896fd5ae20..69b8863ce6 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp @@ -327,6 +327,9 @@ void TParseVersions::initializeExtensionBehavior() extensionBehavior[E_GL_EXT_ray_flags_primitive_culling] = EBhDisable; extensionBehavior[E_GL_EXT_blend_func_extended] = EBhDisable; extensionBehavior[E_GL_EXT_shader_implicit_conversions] = EBhDisable; + extensionBehavior[E_GL_EXT_fragment_shading_rate] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_image_int64] = EBhDisable; + extensionBehavior[E_GL_EXT_terminate_invocation] = EBhDisable; // OVR extensions extensionBehavior[E_GL_OVR_multiview] = EBhDisable; @@ -371,6 +374,7 @@ void TParseVersions::getPreamble(std::string& preamble) "#define GL_EXT_YUV_target 1\n" "#define GL_EXT_shader_texture_lod 1\n" "#define GL_EXT_shadow_samplers 1\n" + "#define GL_EXT_fragment_shading_rate 1\n" // AEP "#define GL_ANDROID_extension_pack_es31a 1\n" @@ -408,7 +412,7 @@ void TParseVersions::getPreamble(std::string& preamble) preamble += "#define GL_NV_shader_noperspective_interpolation 1\n"; } - } else { + } else { // !isEsProfile() preamble = "#define GL_FRAGMENT_PRECISION_HIGH 1\n" "#define GL_ARB_texture_rectangle 1\n" @@ -463,6 +467,7 @@ void TParseVersions::getPreamble(std::string& preamble) "#define GL_EXT_buffer_reference_uvec2 1\n" "#define GL_EXT_demote_to_helper_invocation 1\n" "#define GL_EXT_debug_printf 1\n" + "#define GL_EXT_fragment_shading_rate 1\n" // GL_KHR_shader_subgroup "#define GL_KHR_shader_subgroup_basic 1\n" @@ -474,6 +479,7 @@ void TParseVersions::getPreamble(std::string& preamble) "#define GL_KHR_shader_subgroup_clustered 1\n" "#define GL_KHR_shader_subgroup_quad 1\n" + "#define GL_EXT_shader_image_int64 1\n" "#define GL_EXT_shader_atomic_int64 1\n" "#define GL_EXT_shader_realtime_clock 1\n" "#define GL_EXT_ray_tracing 1\n" @@ -558,6 +564,11 @@ void TParseVersions::getPreamble(std::string& preamble) "#define GL_GOOGLE_include_directive 1\n" "#define GL_KHR_blend_equation_advanced 1\n" ; + + // other general extensions + preamble += + "#define GL_EXT_terminate_invocation 1\n" + ; #endif // #define VULKAN XXXX diff --git a/thirdparty/glslang/glslang/MachineIndependent/Versions.h b/thirdparty/glslang/glslang/MachineIndependent/Versions.h index f52f605ea8..eb17c52e05 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/Versions.h +++ b/thirdparty/glslang/glslang/MachineIndependent/Versions.h @@ -199,6 +199,8 @@ const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query" const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling"; const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended"; const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions"; +const char* const E_GL_EXT_fragment_shading_rate = "GL_EXT_fragment_shading_rate"; +const char* const E_GL_EXT_shader_image_int64 = "GL_EXT_shader_image_int64"; // Arrays of extensions for the above viewportEXTs duplications @@ -297,6 +299,7 @@ const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shad const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16"; const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64"; const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16"; +const char* const E_GL_EXT_terminate_invocation = "GL_EXT_terminate_invocation"; const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float"; diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang.y b/thirdparty/glslang/glslang/MachineIndependent/glslang.y index 23adcb057e..2681d48f79 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/glslang.y +++ b/thirdparty/glslang/glslang/MachineIndependent/glslang.y @@ -242,6 +242,18 @@ extern int yylex(YYSTYPE*, TParseContext&); %token <lex> F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY %token <lex> F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY +%token <lex> I64IMAGE1D U64IMAGE1D +%token <lex> I64IMAGE2D U64IMAGE2D +%token <lex> I64IMAGE3D U64IMAGE3D +%token <lex> I64IMAGE2DRECT U64IMAGE2DRECT +%token <lex> I64IMAGECUBE U64IMAGECUBE +%token <lex> I64IMAGEBUFFER U64IMAGEBUFFER +%token <lex> I64IMAGE1DARRAY U64IMAGE1DARRAY +%token <lex> I64IMAGE2DARRAY U64IMAGE2DARRAY +%token <lex> I64IMAGECUBEARRAY U64IMAGECUBEARRAY +%token <lex> I64IMAGE2DMS U64IMAGE2DMS +%token <lex> I64IMAGE2DMSARRAY U64IMAGE2DMSARRAY + // texture without sampler %token <lex> TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY %token <lex> TEXTURE1D ITEXTURE1D UTEXTURE1D @@ -281,6 +293,8 @@ extern int yylex(YYSTYPE*, TParseContext&); %token <lex> CENTROID IN OUT INOUT %token <lex> STRUCT VOID WHILE %token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT +%token <lex> TERMINATE_INVOCATION +%token <lex> TERMINATE_RAY IGNORE_INTERSECTION %token <lex> UNIFORM SHARED BUFFER %token <lex> FLAT SMOOTH LAYOUT @@ -905,7 +919,7 @@ declaration block_structure : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE { - --parseContext.structNestingLevel; + --parseContext.blockNestingLevel; parseContext.blockName = $2.string; parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); @@ -3203,6 +3217,116 @@ type_specifier_nonarray $$.basicType = EbtSampler; $$.sampler.setImage(EbtUint, Esd2D, true, false, true); } + | I64IMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd1D); + } + | U64IMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd1D); + } + | I64IMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd2D); + } + | U64IMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd2D); + } + | I64IMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd3D); + } + | U64IMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd3D); + } + | I64IMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, EsdRect); + } + | U64IMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, EsdRect); + } + | I64IMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, EsdCube); + } + | U64IMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, EsdCube); + } + | I64IMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, EsdBuffer); + } + | U64IMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, EsdBuffer); + } + | I64IMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd1D, true); + } + | U64IMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd1D, true); + } + | I64IMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd2D, true); + } + | U64IMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd2D, true); + } + | I64IMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, EsdCube, true); + } + | U64IMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, EsdCube, true); + } + | I64IMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd2D, false, false, true); + } + | U64IMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd2D, false, false, true); + } + | I64IMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt64, Esd2D, true, false, true); + } + | U64IMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint64, Esd2D, true, false, true); + } | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); $$.basicType = EbtSampler; @@ -3805,6 +3929,20 @@ jump_statement parseContext.requireStage($1.loc, EShLangFragment, "discard"); $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc); } + | TERMINATE_INVOCATION SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "terminateInvocation"); + $$ = parseContext.intermediate.addBranch(EOpTerminateInvocation, $1.loc); + } + + | TERMINATE_RAY SEMICOLON { + parseContext.requireStage($1.loc, EShLangAnyHit, "terminateRayEXT"); + $$ = parseContext.intermediate.addBranch(EOpTerminateRayKHR, $1.loc); + } + | IGNORE_INTERSECTION SEMICOLON { + parseContext.requireStage($1.loc, EShLangAnyHit, "ignoreIntersectionEXT"); + $$ = parseContext.intermediate.addBranch(EOpIgnoreIntersectionKHR, $1.loc); + } + ; // Grammar Note: No 'goto'. Gotos are not supported. diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp index ac35797797..feecc98200 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp @@ -1,8 +1,9 @@ -/* A Bison parser, made by GNU Bison 3.0.4. */ +/* A Bison parser, made by GNU Bison 3.7.4. */ /* Bison implementation for Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation, + Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -33,6 +34,10 @@ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ +/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual, + especially those whose name start with YY_ or yy_. They are + private implementation details that can be changed or removed. */ + /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. @@ -40,11 +45,11 @@ define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ -/* Identify Bison output. */ -#define YYBISON 1 +/* Identify Bison output, and Bison version. */ +#define YYBISON 30704 -/* Bison version. */ -#define YYBISON_VERSION "3.0.4" +/* Bison version string. */ +#define YYBISON_VERSION "3.7.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" @@ -61,8 +66,8 @@ -/* Copy the first part of user declarations. */ -#line 69 "MachineIndependent/glslang.y" /* yacc.c:339 */ +/* First part of user prologue. */ +#line 69 "MachineIndependent/glslang.y" /* Based on: @@ -88,518 +93,596 @@ Jutta Degener, 1995 using namespace glslang; -#line 92 "MachineIndependent/glslang_tab.cpp" /* yacc.c:339 */ +#line 97 "MachineIndependent/glslang_tab.cpp" +# ifndef YY_CAST +# ifdef __cplusplus +# define YY_CAST(Type, Val) static_cast<Type> (Val) +# define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast<Type> (Val) +# else +# define YY_CAST(Type, Val) ((Type) (Val)) +# define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val)) +# endif +# endif # ifndef YY_NULLPTR -# if defined __cplusplus && 201103L <= __cplusplus -# define YY_NULLPTR nullptr +# if defined __cplusplus +# if 201103L <= __cplusplus +# define YY_NULLPTR nullptr +# else +# define YY_NULLPTR 0 +# endif # else -# define YY_NULLPTR 0 +# define YY_NULLPTR ((void*)0) # endif # endif -/* Enabling verbose error messages. */ -#ifdef YYERROR_VERBOSE -# undef YYERROR_VERBOSE -# define YYERROR_VERBOSE 1 -#else -# define YYERROR_VERBOSE 1 -#endif - -/* In a future release of Bison, this section will be replaced - by #include "glslang_tab.cpp.h". */ -#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED -# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED -/* Debug traces. */ -#ifndef YYDEBUG -# define YYDEBUG 1 -#endif -#if YYDEBUG -extern int yydebug; -#endif - -/* Token type. */ -#ifndef YYTOKENTYPE -# define YYTOKENTYPE - enum yytokentype - { - CONST = 258, - BOOL = 259, - INT = 260, - UINT = 261, - FLOAT = 262, - BVEC2 = 263, - BVEC3 = 264, - BVEC4 = 265, - IVEC2 = 266, - IVEC3 = 267, - IVEC4 = 268, - UVEC2 = 269, - UVEC3 = 270, - UVEC4 = 271, - VEC2 = 272, - VEC3 = 273, - VEC4 = 274, - MAT2 = 275, - MAT3 = 276, - MAT4 = 277, - MAT2X2 = 278, - MAT2X3 = 279, - MAT2X4 = 280, - MAT3X2 = 281, - MAT3X3 = 282, - MAT3X4 = 283, - MAT4X2 = 284, - MAT4X3 = 285, - MAT4X4 = 286, - SAMPLER2D = 287, - SAMPLER3D = 288, - SAMPLERCUBE = 289, - SAMPLER2DSHADOW = 290, - SAMPLERCUBESHADOW = 291, - SAMPLER2DARRAY = 292, - SAMPLER2DARRAYSHADOW = 293, - ISAMPLER2D = 294, - ISAMPLER3D = 295, - ISAMPLERCUBE = 296, - ISAMPLER2DARRAY = 297, - USAMPLER2D = 298, - USAMPLER3D = 299, - USAMPLERCUBE = 300, - USAMPLER2DARRAY = 301, - SAMPLER = 302, - SAMPLERSHADOW = 303, - TEXTURE2D = 304, - TEXTURE3D = 305, - TEXTURECUBE = 306, - TEXTURE2DARRAY = 307, - ITEXTURE2D = 308, - ITEXTURE3D = 309, - ITEXTURECUBE = 310, - ITEXTURE2DARRAY = 311, - UTEXTURE2D = 312, - UTEXTURE3D = 313, - UTEXTURECUBE = 314, - UTEXTURE2DARRAY = 315, - ATTRIBUTE = 316, - VARYING = 317, - FLOAT16_T = 318, - FLOAT32_T = 319, - DOUBLE = 320, - FLOAT64_T = 321, - INT64_T = 322, - UINT64_T = 323, - INT32_T = 324, - UINT32_T = 325, - INT16_T = 326, - UINT16_T = 327, - INT8_T = 328, - UINT8_T = 329, - I64VEC2 = 330, - I64VEC3 = 331, - I64VEC4 = 332, - U64VEC2 = 333, - U64VEC3 = 334, - U64VEC4 = 335, - I32VEC2 = 336, - I32VEC3 = 337, - I32VEC4 = 338, - U32VEC2 = 339, - U32VEC3 = 340, - U32VEC4 = 341, - I16VEC2 = 342, - I16VEC3 = 343, - I16VEC4 = 344, - U16VEC2 = 345, - U16VEC3 = 346, - U16VEC4 = 347, - I8VEC2 = 348, - I8VEC3 = 349, - I8VEC4 = 350, - U8VEC2 = 351, - U8VEC3 = 352, - U8VEC4 = 353, - DVEC2 = 354, - DVEC3 = 355, - DVEC4 = 356, - DMAT2 = 357, - DMAT3 = 358, - DMAT4 = 359, - F16VEC2 = 360, - F16VEC3 = 361, - F16VEC4 = 362, - F16MAT2 = 363, - F16MAT3 = 364, - F16MAT4 = 365, - F32VEC2 = 366, - F32VEC3 = 367, - F32VEC4 = 368, - F32MAT2 = 369, - F32MAT3 = 370, - F32MAT4 = 371, - F64VEC2 = 372, - F64VEC3 = 373, - F64VEC4 = 374, - F64MAT2 = 375, - F64MAT3 = 376, - F64MAT4 = 377, - DMAT2X2 = 378, - DMAT2X3 = 379, - DMAT2X4 = 380, - DMAT3X2 = 381, - DMAT3X3 = 382, - DMAT3X4 = 383, - DMAT4X2 = 384, - DMAT4X3 = 385, - DMAT4X4 = 386, - F16MAT2X2 = 387, - F16MAT2X3 = 388, - F16MAT2X4 = 389, - F16MAT3X2 = 390, - F16MAT3X3 = 391, - F16MAT3X4 = 392, - F16MAT4X2 = 393, - F16MAT4X3 = 394, - F16MAT4X4 = 395, - F32MAT2X2 = 396, - F32MAT2X3 = 397, - F32MAT2X4 = 398, - F32MAT3X2 = 399, - F32MAT3X3 = 400, - F32MAT3X4 = 401, - F32MAT4X2 = 402, - F32MAT4X3 = 403, - F32MAT4X4 = 404, - F64MAT2X2 = 405, - F64MAT2X3 = 406, - F64MAT2X4 = 407, - F64MAT3X2 = 408, - F64MAT3X3 = 409, - F64MAT3X4 = 410, - F64MAT4X2 = 411, - F64MAT4X3 = 412, - F64MAT4X4 = 413, - ATOMIC_UINT = 414, - ACCSTRUCTNV = 415, - ACCSTRUCTEXT = 416, - RAYQUERYEXT = 417, - FCOOPMATNV = 418, - ICOOPMATNV = 419, - UCOOPMATNV = 420, - SAMPLERCUBEARRAY = 421, - SAMPLERCUBEARRAYSHADOW = 422, - ISAMPLERCUBEARRAY = 423, - USAMPLERCUBEARRAY = 424, - SAMPLER1D = 425, - SAMPLER1DARRAY = 426, - SAMPLER1DARRAYSHADOW = 427, - ISAMPLER1D = 428, - SAMPLER1DSHADOW = 429, - SAMPLER2DRECT = 430, - SAMPLER2DRECTSHADOW = 431, - ISAMPLER2DRECT = 432, - USAMPLER2DRECT = 433, - SAMPLERBUFFER = 434, - ISAMPLERBUFFER = 435, - USAMPLERBUFFER = 436, - SAMPLER2DMS = 437, - ISAMPLER2DMS = 438, - USAMPLER2DMS = 439, - SAMPLER2DMSARRAY = 440, - ISAMPLER2DMSARRAY = 441, - USAMPLER2DMSARRAY = 442, - SAMPLEREXTERNALOES = 443, - SAMPLEREXTERNAL2DY2YEXT = 444, - ISAMPLER1DARRAY = 445, - USAMPLER1D = 446, - USAMPLER1DARRAY = 447, - F16SAMPLER1D = 448, - F16SAMPLER2D = 449, - F16SAMPLER3D = 450, - F16SAMPLER2DRECT = 451, - F16SAMPLERCUBE = 452, - F16SAMPLER1DARRAY = 453, - F16SAMPLER2DARRAY = 454, - F16SAMPLERCUBEARRAY = 455, - F16SAMPLERBUFFER = 456, - F16SAMPLER2DMS = 457, - F16SAMPLER2DMSARRAY = 458, - F16SAMPLER1DSHADOW = 459, - F16SAMPLER2DSHADOW = 460, - F16SAMPLER1DARRAYSHADOW = 461, - F16SAMPLER2DARRAYSHADOW = 462, - F16SAMPLER2DRECTSHADOW = 463, - F16SAMPLERCUBESHADOW = 464, - F16SAMPLERCUBEARRAYSHADOW = 465, - IMAGE1D = 466, - IIMAGE1D = 467, - UIMAGE1D = 468, - IMAGE2D = 469, - IIMAGE2D = 470, - UIMAGE2D = 471, - IMAGE3D = 472, - IIMAGE3D = 473, - UIMAGE3D = 474, - IMAGE2DRECT = 475, - IIMAGE2DRECT = 476, - UIMAGE2DRECT = 477, - IMAGECUBE = 478, - IIMAGECUBE = 479, - UIMAGECUBE = 480, - IMAGEBUFFER = 481, - IIMAGEBUFFER = 482, - UIMAGEBUFFER = 483, - IMAGE1DARRAY = 484, - IIMAGE1DARRAY = 485, - UIMAGE1DARRAY = 486, - IMAGE2DARRAY = 487, - IIMAGE2DARRAY = 488, - UIMAGE2DARRAY = 489, - IMAGECUBEARRAY = 490, - IIMAGECUBEARRAY = 491, - UIMAGECUBEARRAY = 492, - IMAGE2DMS = 493, - IIMAGE2DMS = 494, - UIMAGE2DMS = 495, - IMAGE2DMSARRAY = 496, - IIMAGE2DMSARRAY = 497, - UIMAGE2DMSARRAY = 498, - F16IMAGE1D = 499, - F16IMAGE2D = 500, - F16IMAGE3D = 501, - F16IMAGE2DRECT = 502, - F16IMAGECUBE = 503, - F16IMAGE1DARRAY = 504, - F16IMAGE2DARRAY = 505, - F16IMAGECUBEARRAY = 506, - F16IMAGEBUFFER = 507, - F16IMAGE2DMS = 508, - F16IMAGE2DMSARRAY = 509, - TEXTURECUBEARRAY = 510, - ITEXTURECUBEARRAY = 511, - UTEXTURECUBEARRAY = 512, - TEXTURE1D = 513, - ITEXTURE1D = 514, - UTEXTURE1D = 515, - TEXTURE1DARRAY = 516, - ITEXTURE1DARRAY = 517, - UTEXTURE1DARRAY = 518, - TEXTURE2DRECT = 519, - ITEXTURE2DRECT = 520, - UTEXTURE2DRECT = 521, - TEXTUREBUFFER = 522, - ITEXTUREBUFFER = 523, - UTEXTUREBUFFER = 524, - TEXTURE2DMS = 525, - ITEXTURE2DMS = 526, - UTEXTURE2DMS = 527, - TEXTURE2DMSARRAY = 528, - ITEXTURE2DMSARRAY = 529, - UTEXTURE2DMSARRAY = 530, - F16TEXTURE1D = 531, - F16TEXTURE2D = 532, - F16TEXTURE3D = 533, - F16TEXTURE2DRECT = 534, - F16TEXTURECUBE = 535, - F16TEXTURE1DARRAY = 536, - F16TEXTURE2DARRAY = 537, - F16TEXTURECUBEARRAY = 538, - F16TEXTUREBUFFER = 539, - F16TEXTURE2DMS = 540, - F16TEXTURE2DMSARRAY = 541, - SUBPASSINPUT = 542, - SUBPASSINPUTMS = 543, - ISUBPASSINPUT = 544, - ISUBPASSINPUTMS = 545, - USUBPASSINPUT = 546, - USUBPASSINPUTMS = 547, - F16SUBPASSINPUT = 548, - F16SUBPASSINPUTMS = 549, - LEFT_OP = 550, - RIGHT_OP = 551, - INC_OP = 552, - DEC_OP = 553, - LE_OP = 554, - GE_OP = 555, - EQ_OP = 556, - NE_OP = 557, - AND_OP = 558, - OR_OP = 559, - XOR_OP = 560, - MUL_ASSIGN = 561, - DIV_ASSIGN = 562, - ADD_ASSIGN = 563, - MOD_ASSIGN = 564, - LEFT_ASSIGN = 565, - RIGHT_ASSIGN = 566, - AND_ASSIGN = 567, - XOR_ASSIGN = 568, - OR_ASSIGN = 569, - SUB_ASSIGN = 570, - STRING_LITERAL = 571, - LEFT_PAREN = 572, - RIGHT_PAREN = 573, - LEFT_BRACKET = 574, - RIGHT_BRACKET = 575, - LEFT_BRACE = 576, - RIGHT_BRACE = 577, - DOT = 578, - COMMA = 579, - COLON = 580, - EQUAL = 581, - SEMICOLON = 582, - BANG = 583, - DASH = 584, - TILDE = 585, - PLUS = 586, - STAR = 587, - SLASH = 588, - PERCENT = 589, - LEFT_ANGLE = 590, - RIGHT_ANGLE = 591, - VERTICAL_BAR = 592, - CARET = 593, - AMPERSAND = 594, - QUESTION = 595, - INVARIANT = 596, - HIGH_PRECISION = 597, - MEDIUM_PRECISION = 598, - LOW_PRECISION = 599, - PRECISION = 600, - PACKED = 601, - RESOURCE = 602, - SUPERP = 603, - FLOATCONSTANT = 604, - INTCONSTANT = 605, - UINTCONSTANT = 606, - BOOLCONSTANT = 607, - IDENTIFIER = 608, - TYPE_NAME = 609, - CENTROID = 610, - IN = 611, - OUT = 612, - INOUT = 613, - STRUCT = 614, - VOID = 615, - WHILE = 616, - BREAK = 617, - CONTINUE = 618, - DO = 619, - ELSE = 620, - FOR = 621, - IF = 622, - DISCARD = 623, - RETURN = 624, - SWITCH = 625, - CASE = 626, - DEFAULT = 627, - UNIFORM = 628, - SHARED = 629, - BUFFER = 630, - FLAT = 631, - SMOOTH = 632, - LAYOUT = 633, - DOUBLECONSTANT = 634, - INT16CONSTANT = 635, - UINT16CONSTANT = 636, - FLOAT16CONSTANT = 637, - INT32CONSTANT = 638, - UINT32CONSTANT = 639, - INT64CONSTANT = 640, - UINT64CONSTANT = 641, - SUBROUTINE = 642, - DEMOTE = 643, - PAYLOADNV = 644, - PAYLOADINNV = 645, - HITATTRNV = 646, - CALLDATANV = 647, - CALLDATAINNV = 648, - PAYLOADEXT = 649, - PAYLOADINEXT = 650, - HITATTREXT = 651, - CALLDATAEXT = 652, - CALLDATAINEXT = 653, - PATCH = 654, - SAMPLE = 655, - NONUNIFORM = 656, - COHERENT = 657, - VOLATILE = 658, - RESTRICT = 659, - READONLY = 660, - WRITEONLY = 661, - DEVICECOHERENT = 662, - QUEUEFAMILYCOHERENT = 663, - WORKGROUPCOHERENT = 664, - SUBGROUPCOHERENT = 665, - NONPRIVATE = 666, - SHADERCALLCOHERENT = 667, - NOPERSPECTIVE = 668, - EXPLICITINTERPAMD = 669, - PERVERTEXNV = 670, - PERPRIMITIVENV = 671, - PERVIEWNV = 672, - PERTASKNV = 673, - PRECISE = 674 - }; -#endif - -/* Value type. */ -#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED - -union YYSTYPE +#include "glslang_tab.cpp.h" +/* Symbol kind. */ +enum yysymbol_kind_t { -#line 97 "MachineIndependent/glslang.y" /* yacc.c:355 */ - - struct { - glslang::TSourceLoc loc; - union { - glslang::TString *string; - int i; - unsigned int u; - long long i64; - unsigned long long u64; - bool b; - double d; - }; - glslang::TSymbol* symbol; - } lex; - struct { - glslang::TSourceLoc loc; - glslang::TOperator op; - union { - TIntermNode* intermNode; - glslang::TIntermNodePair nodePair; - glslang::TIntermTyped* intermTypedNode; - glslang::TAttributes* attributes; - }; - union { - glslang::TPublicType type; - glslang::TFunction* function; - glslang::TParameter param; - glslang::TTypeLoc typeLine; - glslang::TTypeList* typeList; - glslang::TArraySizes* arraySizes; - glslang::TIdentifierList* identifierList; - }; - glslang::TArraySizes* typeParameters; - } interm; - -#line 588 "MachineIndependent/glslang_tab.cpp" /* yacc.c:355 */ + YYSYMBOL_YYEMPTY = -2, + YYSYMBOL_YYEOF = 0, /* "end of file" */ + YYSYMBOL_YYerror = 1, /* error */ + YYSYMBOL_YYUNDEF = 2, /* "invalid token" */ + YYSYMBOL_CONST = 3, /* CONST */ + YYSYMBOL_BOOL = 4, /* BOOL */ + YYSYMBOL_INT = 5, /* INT */ + YYSYMBOL_UINT = 6, /* UINT */ + YYSYMBOL_FLOAT = 7, /* FLOAT */ + YYSYMBOL_BVEC2 = 8, /* BVEC2 */ + YYSYMBOL_BVEC3 = 9, /* BVEC3 */ + YYSYMBOL_BVEC4 = 10, /* BVEC4 */ + YYSYMBOL_IVEC2 = 11, /* IVEC2 */ + YYSYMBOL_IVEC3 = 12, /* IVEC3 */ + YYSYMBOL_IVEC4 = 13, /* IVEC4 */ + YYSYMBOL_UVEC2 = 14, /* UVEC2 */ + YYSYMBOL_UVEC3 = 15, /* UVEC3 */ + YYSYMBOL_UVEC4 = 16, /* UVEC4 */ + YYSYMBOL_VEC2 = 17, /* VEC2 */ + YYSYMBOL_VEC3 = 18, /* VEC3 */ + YYSYMBOL_VEC4 = 19, /* VEC4 */ + YYSYMBOL_MAT2 = 20, /* MAT2 */ + YYSYMBOL_MAT3 = 21, /* MAT3 */ + YYSYMBOL_MAT4 = 22, /* MAT4 */ + YYSYMBOL_MAT2X2 = 23, /* MAT2X2 */ + YYSYMBOL_MAT2X3 = 24, /* MAT2X3 */ + YYSYMBOL_MAT2X4 = 25, /* MAT2X4 */ + YYSYMBOL_MAT3X2 = 26, /* MAT3X2 */ + YYSYMBOL_MAT3X3 = 27, /* MAT3X3 */ + YYSYMBOL_MAT3X4 = 28, /* MAT3X4 */ + YYSYMBOL_MAT4X2 = 29, /* MAT4X2 */ + YYSYMBOL_MAT4X3 = 30, /* MAT4X3 */ + YYSYMBOL_MAT4X4 = 31, /* MAT4X4 */ + YYSYMBOL_SAMPLER2D = 32, /* SAMPLER2D */ + YYSYMBOL_SAMPLER3D = 33, /* SAMPLER3D */ + YYSYMBOL_SAMPLERCUBE = 34, /* SAMPLERCUBE */ + YYSYMBOL_SAMPLER2DSHADOW = 35, /* SAMPLER2DSHADOW */ + YYSYMBOL_SAMPLERCUBESHADOW = 36, /* SAMPLERCUBESHADOW */ + YYSYMBOL_SAMPLER2DARRAY = 37, /* SAMPLER2DARRAY */ + YYSYMBOL_SAMPLER2DARRAYSHADOW = 38, /* SAMPLER2DARRAYSHADOW */ + YYSYMBOL_ISAMPLER2D = 39, /* ISAMPLER2D */ + YYSYMBOL_ISAMPLER3D = 40, /* ISAMPLER3D */ + YYSYMBOL_ISAMPLERCUBE = 41, /* ISAMPLERCUBE */ + YYSYMBOL_ISAMPLER2DARRAY = 42, /* ISAMPLER2DARRAY */ + YYSYMBOL_USAMPLER2D = 43, /* USAMPLER2D */ + YYSYMBOL_USAMPLER3D = 44, /* USAMPLER3D */ + YYSYMBOL_USAMPLERCUBE = 45, /* USAMPLERCUBE */ + YYSYMBOL_USAMPLER2DARRAY = 46, /* USAMPLER2DARRAY */ + YYSYMBOL_SAMPLER = 47, /* SAMPLER */ + YYSYMBOL_SAMPLERSHADOW = 48, /* SAMPLERSHADOW */ + YYSYMBOL_TEXTURE2D = 49, /* TEXTURE2D */ + YYSYMBOL_TEXTURE3D = 50, /* TEXTURE3D */ + YYSYMBOL_TEXTURECUBE = 51, /* TEXTURECUBE */ + YYSYMBOL_TEXTURE2DARRAY = 52, /* TEXTURE2DARRAY */ + YYSYMBOL_ITEXTURE2D = 53, /* ITEXTURE2D */ + YYSYMBOL_ITEXTURE3D = 54, /* ITEXTURE3D */ + YYSYMBOL_ITEXTURECUBE = 55, /* ITEXTURECUBE */ + YYSYMBOL_ITEXTURE2DARRAY = 56, /* ITEXTURE2DARRAY */ + YYSYMBOL_UTEXTURE2D = 57, /* UTEXTURE2D */ + YYSYMBOL_UTEXTURE3D = 58, /* UTEXTURE3D */ + YYSYMBOL_UTEXTURECUBE = 59, /* UTEXTURECUBE */ + YYSYMBOL_UTEXTURE2DARRAY = 60, /* UTEXTURE2DARRAY */ + YYSYMBOL_ATTRIBUTE = 61, /* ATTRIBUTE */ + YYSYMBOL_VARYING = 62, /* VARYING */ + YYSYMBOL_FLOAT16_T = 63, /* FLOAT16_T */ + YYSYMBOL_FLOAT32_T = 64, /* FLOAT32_T */ + YYSYMBOL_DOUBLE = 65, /* DOUBLE */ + YYSYMBOL_FLOAT64_T = 66, /* FLOAT64_T */ + YYSYMBOL_INT64_T = 67, /* INT64_T */ + YYSYMBOL_UINT64_T = 68, /* UINT64_T */ + YYSYMBOL_INT32_T = 69, /* INT32_T */ + YYSYMBOL_UINT32_T = 70, /* UINT32_T */ + YYSYMBOL_INT16_T = 71, /* INT16_T */ + YYSYMBOL_UINT16_T = 72, /* UINT16_T */ + YYSYMBOL_INT8_T = 73, /* INT8_T */ + YYSYMBOL_UINT8_T = 74, /* UINT8_T */ + YYSYMBOL_I64VEC2 = 75, /* I64VEC2 */ + YYSYMBOL_I64VEC3 = 76, /* I64VEC3 */ + YYSYMBOL_I64VEC4 = 77, /* I64VEC4 */ + YYSYMBOL_U64VEC2 = 78, /* U64VEC2 */ + YYSYMBOL_U64VEC3 = 79, /* U64VEC3 */ + YYSYMBOL_U64VEC4 = 80, /* U64VEC4 */ + YYSYMBOL_I32VEC2 = 81, /* I32VEC2 */ + YYSYMBOL_I32VEC3 = 82, /* I32VEC3 */ + YYSYMBOL_I32VEC4 = 83, /* I32VEC4 */ + YYSYMBOL_U32VEC2 = 84, /* U32VEC2 */ + YYSYMBOL_U32VEC3 = 85, /* U32VEC3 */ + YYSYMBOL_U32VEC4 = 86, /* U32VEC4 */ + YYSYMBOL_I16VEC2 = 87, /* I16VEC2 */ + YYSYMBOL_I16VEC3 = 88, /* I16VEC3 */ + YYSYMBOL_I16VEC4 = 89, /* I16VEC4 */ + YYSYMBOL_U16VEC2 = 90, /* U16VEC2 */ + YYSYMBOL_U16VEC3 = 91, /* U16VEC3 */ + YYSYMBOL_U16VEC4 = 92, /* U16VEC4 */ + YYSYMBOL_I8VEC2 = 93, /* I8VEC2 */ + YYSYMBOL_I8VEC3 = 94, /* I8VEC3 */ + YYSYMBOL_I8VEC4 = 95, /* I8VEC4 */ + YYSYMBOL_U8VEC2 = 96, /* U8VEC2 */ + YYSYMBOL_U8VEC3 = 97, /* U8VEC3 */ + YYSYMBOL_U8VEC4 = 98, /* U8VEC4 */ + YYSYMBOL_DVEC2 = 99, /* DVEC2 */ + YYSYMBOL_DVEC3 = 100, /* DVEC3 */ + YYSYMBOL_DVEC4 = 101, /* DVEC4 */ + YYSYMBOL_DMAT2 = 102, /* DMAT2 */ + YYSYMBOL_DMAT3 = 103, /* DMAT3 */ + YYSYMBOL_DMAT4 = 104, /* DMAT4 */ + YYSYMBOL_F16VEC2 = 105, /* F16VEC2 */ + YYSYMBOL_F16VEC3 = 106, /* F16VEC3 */ + YYSYMBOL_F16VEC4 = 107, /* F16VEC4 */ + YYSYMBOL_F16MAT2 = 108, /* F16MAT2 */ + YYSYMBOL_F16MAT3 = 109, /* F16MAT3 */ + YYSYMBOL_F16MAT4 = 110, /* F16MAT4 */ + YYSYMBOL_F32VEC2 = 111, /* F32VEC2 */ + YYSYMBOL_F32VEC3 = 112, /* F32VEC3 */ + YYSYMBOL_F32VEC4 = 113, /* F32VEC4 */ + YYSYMBOL_F32MAT2 = 114, /* F32MAT2 */ + YYSYMBOL_F32MAT3 = 115, /* F32MAT3 */ + YYSYMBOL_F32MAT4 = 116, /* F32MAT4 */ + YYSYMBOL_F64VEC2 = 117, /* F64VEC2 */ + YYSYMBOL_F64VEC3 = 118, /* F64VEC3 */ + YYSYMBOL_F64VEC4 = 119, /* F64VEC4 */ + YYSYMBOL_F64MAT2 = 120, /* F64MAT2 */ + YYSYMBOL_F64MAT3 = 121, /* F64MAT3 */ + YYSYMBOL_F64MAT4 = 122, /* F64MAT4 */ + YYSYMBOL_DMAT2X2 = 123, /* DMAT2X2 */ + YYSYMBOL_DMAT2X3 = 124, /* DMAT2X3 */ + YYSYMBOL_DMAT2X4 = 125, /* DMAT2X4 */ + YYSYMBOL_DMAT3X2 = 126, /* DMAT3X2 */ + YYSYMBOL_DMAT3X3 = 127, /* DMAT3X3 */ + YYSYMBOL_DMAT3X4 = 128, /* DMAT3X4 */ + YYSYMBOL_DMAT4X2 = 129, /* DMAT4X2 */ + YYSYMBOL_DMAT4X3 = 130, /* DMAT4X3 */ + YYSYMBOL_DMAT4X4 = 131, /* DMAT4X4 */ + YYSYMBOL_F16MAT2X2 = 132, /* F16MAT2X2 */ + YYSYMBOL_F16MAT2X3 = 133, /* F16MAT2X3 */ + YYSYMBOL_F16MAT2X4 = 134, /* F16MAT2X4 */ + YYSYMBOL_F16MAT3X2 = 135, /* F16MAT3X2 */ + YYSYMBOL_F16MAT3X3 = 136, /* F16MAT3X3 */ + YYSYMBOL_F16MAT3X4 = 137, /* F16MAT3X4 */ + YYSYMBOL_F16MAT4X2 = 138, /* F16MAT4X2 */ + YYSYMBOL_F16MAT4X3 = 139, /* F16MAT4X3 */ + YYSYMBOL_F16MAT4X4 = 140, /* F16MAT4X4 */ + YYSYMBOL_F32MAT2X2 = 141, /* F32MAT2X2 */ + YYSYMBOL_F32MAT2X3 = 142, /* F32MAT2X3 */ + YYSYMBOL_F32MAT2X4 = 143, /* F32MAT2X4 */ + YYSYMBOL_F32MAT3X2 = 144, /* F32MAT3X2 */ + YYSYMBOL_F32MAT3X3 = 145, /* F32MAT3X3 */ + YYSYMBOL_F32MAT3X4 = 146, /* F32MAT3X4 */ + YYSYMBOL_F32MAT4X2 = 147, /* F32MAT4X2 */ + YYSYMBOL_F32MAT4X3 = 148, /* F32MAT4X3 */ + YYSYMBOL_F32MAT4X4 = 149, /* F32MAT4X4 */ + YYSYMBOL_F64MAT2X2 = 150, /* F64MAT2X2 */ + YYSYMBOL_F64MAT2X3 = 151, /* F64MAT2X3 */ + YYSYMBOL_F64MAT2X4 = 152, /* F64MAT2X4 */ + YYSYMBOL_F64MAT3X2 = 153, /* F64MAT3X2 */ + YYSYMBOL_F64MAT3X3 = 154, /* F64MAT3X3 */ + YYSYMBOL_F64MAT3X4 = 155, /* F64MAT3X4 */ + YYSYMBOL_F64MAT4X2 = 156, /* F64MAT4X2 */ + YYSYMBOL_F64MAT4X3 = 157, /* F64MAT4X3 */ + YYSYMBOL_F64MAT4X4 = 158, /* F64MAT4X4 */ + YYSYMBOL_ATOMIC_UINT = 159, /* ATOMIC_UINT */ + YYSYMBOL_ACCSTRUCTNV = 160, /* ACCSTRUCTNV */ + YYSYMBOL_ACCSTRUCTEXT = 161, /* ACCSTRUCTEXT */ + YYSYMBOL_RAYQUERYEXT = 162, /* RAYQUERYEXT */ + YYSYMBOL_FCOOPMATNV = 163, /* FCOOPMATNV */ + YYSYMBOL_ICOOPMATNV = 164, /* ICOOPMATNV */ + YYSYMBOL_UCOOPMATNV = 165, /* UCOOPMATNV */ + YYSYMBOL_SAMPLERCUBEARRAY = 166, /* SAMPLERCUBEARRAY */ + YYSYMBOL_SAMPLERCUBEARRAYSHADOW = 167, /* SAMPLERCUBEARRAYSHADOW */ + YYSYMBOL_ISAMPLERCUBEARRAY = 168, /* ISAMPLERCUBEARRAY */ + YYSYMBOL_USAMPLERCUBEARRAY = 169, /* USAMPLERCUBEARRAY */ + YYSYMBOL_SAMPLER1D = 170, /* SAMPLER1D */ + YYSYMBOL_SAMPLER1DARRAY = 171, /* SAMPLER1DARRAY */ + YYSYMBOL_SAMPLER1DARRAYSHADOW = 172, /* SAMPLER1DARRAYSHADOW */ + YYSYMBOL_ISAMPLER1D = 173, /* ISAMPLER1D */ + YYSYMBOL_SAMPLER1DSHADOW = 174, /* SAMPLER1DSHADOW */ + YYSYMBOL_SAMPLER2DRECT = 175, /* SAMPLER2DRECT */ + YYSYMBOL_SAMPLER2DRECTSHADOW = 176, /* SAMPLER2DRECTSHADOW */ + YYSYMBOL_ISAMPLER2DRECT = 177, /* ISAMPLER2DRECT */ + YYSYMBOL_USAMPLER2DRECT = 178, /* USAMPLER2DRECT */ + YYSYMBOL_SAMPLERBUFFER = 179, /* SAMPLERBUFFER */ + YYSYMBOL_ISAMPLERBUFFER = 180, /* ISAMPLERBUFFER */ + YYSYMBOL_USAMPLERBUFFER = 181, /* USAMPLERBUFFER */ + YYSYMBOL_SAMPLER2DMS = 182, /* SAMPLER2DMS */ + YYSYMBOL_ISAMPLER2DMS = 183, /* ISAMPLER2DMS */ + YYSYMBOL_USAMPLER2DMS = 184, /* USAMPLER2DMS */ + YYSYMBOL_SAMPLER2DMSARRAY = 185, /* SAMPLER2DMSARRAY */ + YYSYMBOL_ISAMPLER2DMSARRAY = 186, /* ISAMPLER2DMSARRAY */ + YYSYMBOL_USAMPLER2DMSARRAY = 187, /* USAMPLER2DMSARRAY */ + YYSYMBOL_SAMPLEREXTERNALOES = 188, /* SAMPLEREXTERNALOES */ + YYSYMBOL_SAMPLEREXTERNAL2DY2YEXT = 189, /* SAMPLEREXTERNAL2DY2YEXT */ + YYSYMBOL_ISAMPLER1DARRAY = 190, /* ISAMPLER1DARRAY */ + YYSYMBOL_USAMPLER1D = 191, /* USAMPLER1D */ + YYSYMBOL_USAMPLER1DARRAY = 192, /* USAMPLER1DARRAY */ + YYSYMBOL_F16SAMPLER1D = 193, /* F16SAMPLER1D */ + YYSYMBOL_F16SAMPLER2D = 194, /* F16SAMPLER2D */ + YYSYMBOL_F16SAMPLER3D = 195, /* F16SAMPLER3D */ + YYSYMBOL_F16SAMPLER2DRECT = 196, /* F16SAMPLER2DRECT */ + YYSYMBOL_F16SAMPLERCUBE = 197, /* F16SAMPLERCUBE */ + YYSYMBOL_F16SAMPLER1DARRAY = 198, /* F16SAMPLER1DARRAY */ + YYSYMBOL_F16SAMPLER2DARRAY = 199, /* F16SAMPLER2DARRAY */ + YYSYMBOL_F16SAMPLERCUBEARRAY = 200, /* F16SAMPLERCUBEARRAY */ + YYSYMBOL_F16SAMPLERBUFFER = 201, /* F16SAMPLERBUFFER */ + YYSYMBOL_F16SAMPLER2DMS = 202, /* F16SAMPLER2DMS */ + YYSYMBOL_F16SAMPLER2DMSARRAY = 203, /* F16SAMPLER2DMSARRAY */ + YYSYMBOL_F16SAMPLER1DSHADOW = 204, /* F16SAMPLER1DSHADOW */ + YYSYMBOL_F16SAMPLER2DSHADOW = 205, /* F16SAMPLER2DSHADOW */ + YYSYMBOL_F16SAMPLER1DARRAYSHADOW = 206, /* F16SAMPLER1DARRAYSHADOW */ + YYSYMBOL_F16SAMPLER2DARRAYSHADOW = 207, /* F16SAMPLER2DARRAYSHADOW */ + YYSYMBOL_F16SAMPLER2DRECTSHADOW = 208, /* F16SAMPLER2DRECTSHADOW */ + YYSYMBOL_F16SAMPLERCUBESHADOW = 209, /* F16SAMPLERCUBESHADOW */ + YYSYMBOL_F16SAMPLERCUBEARRAYSHADOW = 210, /* F16SAMPLERCUBEARRAYSHADOW */ + YYSYMBOL_IMAGE1D = 211, /* IMAGE1D */ + YYSYMBOL_IIMAGE1D = 212, /* IIMAGE1D */ + YYSYMBOL_UIMAGE1D = 213, /* UIMAGE1D */ + YYSYMBOL_IMAGE2D = 214, /* IMAGE2D */ + YYSYMBOL_IIMAGE2D = 215, /* IIMAGE2D */ + YYSYMBOL_UIMAGE2D = 216, /* UIMAGE2D */ + YYSYMBOL_IMAGE3D = 217, /* IMAGE3D */ + YYSYMBOL_IIMAGE3D = 218, /* IIMAGE3D */ + YYSYMBOL_UIMAGE3D = 219, /* UIMAGE3D */ + YYSYMBOL_IMAGE2DRECT = 220, /* IMAGE2DRECT */ + YYSYMBOL_IIMAGE2DRECT = 221, /* IIMAGE2DRECT */ + YYSYMBOL_UIMAGE2DRECT = 222, /* UIMAGE2DRECT */ + YYSYMBOL_IMAGECUBE = 223, /* IMAGECUBE */ + YYSYMBOL_IIMAGECUBE = 224, /* IIMAGECUBE */ + YYSYMBOL_UIMAGECUBE = 225, /* UIMAGECUBE */ + YYSYMBOL_IMAGEBUFFER = 226, /* IMAGEBUFFER */ + YYSYMBOL_IIMAGEBUFFER = 227, /* IIMAGEBUFFER */ + YYSYMBOL_UIMAGEBUFFER = 228, /* UIMAGEBUFFER */ + YYSYMBOL_IMAGE1DARRAY = 229, /* IMAGE1DARRAY */ + YYSYMBOL_IIMAGE1DARRAY = 230, /* IIMAGE1DARRAY */ + YYSYMBOL_UIMAGE1DARRAY = 231, /* UIMAGE1DARRAY */ + YYSYMBOL_IMAGE2DARRAY = 232, /* IMAGE2DARRAY */ + YYSYMBOL_IIMAGE2DARRAY = 233, /* IIMAGE2DARRAY */ + YYSYMBOL_UIMAGE2DARRAY = 234, /* UIMAGE2DARRAY */ + YYSYMBOL_IMAGECUBEARRAY = 235, /* IMAGECUBEARRAY */ + YYSYMBOL_IIMAGECUBEARRAY = 236, /* IIMAGECUBEARRAY */ + YYSYMBOL_UIMAGECUBEARRAY = 237, /* UIMAGECUBEARRAY */ + YYSYMBOL_IMAGE2DMS = 238, /* IMAGE2DMS */ + YYSYMBOL_IIMAGE2DMS = 239, /* IIMAGE2DMS */ + YYSYMBOL_UIMAGE2DMS = 240, /* UIMAGE2DMS */ + YYSYMBOL_IMAGE2DMSARRAY = 241, /* IMAGE2DMSARRAY */ + YYSYMBOL_IIMAGE2DMSARRAY = 242, /* IIMAGE2DMSARRAY */ + YYSYMBOL_UIMAGE2DMSARRAY = 243, /* UIMAGE2DMSARRAY */ + YYSYMBOL_F16IMAGE1D = 244, /* F16IMAGE1D */ + YYSYMBOL_F16IMAGE2D = 245, /* F16IMAGE2D */ + YYSYMBOL_F16IMAGE3D = 246, /* F16IMAGE3D */ + YYSYMBOL_F16IMAGE2DRECT = 247, /* F16IMAGE2DRECT */ + YYSYMBOL_F16IMAGECUBE = 248, /* F16IMAGECUBE */ + YYSYMBOL_F16IMAGE1DARRAY = 249, /* F16IMAGE1DARRAY */ + YYSYMBOL_F16IMAGE2DARRAY = 250, /* F16IMAGE2DARRAY */ + YYSYMBOL_F16IMAGECUBEARRAY = 251, /* F16IMAGECUBEARRAY */ + YYSYMBOL_F16IMAGEBUFFER = 252, /* F16IMAGEBUFFER */ + YYSYMBOL_F16IMAGE2DMS = 253, /* F16IMAGE2DMS */ + YYSYMBOL_F16IMAGE2DMSARRAY = 254, /* F16IMAGE2DMSARRAY */ + YYSYMBOL_I64IMAGE1D = 255, /* I64IMAGE1D */ + YYSYMBOL_U64IMAGE1D = 256, /* U64IMAGE1D */ + YYSYMBOL_I64IMAGE2D = 257, /* I64IMAGE2D */ + YYSYMBOL_U64IMAGE2D = 258, /* U64IMAGE2D */ + YYSYMBOL_I64IMAGE3D = 259, /* I64IMAGE3D */ + YYSYMBOL_U64IMAGE3D = 260, /* U64IMAGE3D */ + YYSYMBOL_I64IMAGE2DRECT = 261, /* I64IMAGE2DRECT */ + YYSYMBOL_U64IMAGE2DRECT = 262, /* U64IMAGE2DRECT */ + YYSYMBOL_I64IMAGECUBE = 263, /* I64IMAGECUBE */ + YYSYMBOL_U64IMAGECUBE = 264, /* U64IMAGECUBE */ + YYSYMBOL_I64IMAGEBUFFER = 265, /* I64IMAGEBUFFER */ + YYSYMBOL_U64IMAGEBUFFER = 266, /* U64IMAGEBUFFER */ + YYSYMBOL_I64IMAGE1DARRAY = 267, /* I64IMAGE1DARRAY */ + YYSYMBOL_U64IMAGE1DARRAY = 268, /* U64IMAGE1DARRAY */ + YYSYMBOL_I64IMAGE2DARRAY = 269, /* I64IMAGE2DARRAY */ + YYSYMBOL_U64IMAGE2DARRAY = 270, /* U64IMAGE2DARRAY */ + YYSYMBOL_I64IMAGECUBEARRAY = 271, /* I64IMAGECUBEARRAY */ + YYSYMBOL_U64IMAGECUBEARRAY = 272, /* U64IMAGECUBEARRAY */ + YYSYMBOL_I64IMAGE2DMS = 273, /* I64IMAGE2DMS */ + YYSYMBOL_U64IMAGE2DMS = 274, /* U64IMAGE2DMS */ + YYSYMBOL_I64IMAGE2DMSARRAY = 275, /* I64IMAGE2DMSARRAY */ + YYSYMBOL_U64IMAGE2DMSARRAY = 276, /* U64IMAGE2DMSARRAY */ + YYSYMBOL_TEXTURECUBEARRAY = 277, /* TEXTURECUBEARRAY */ + YYSYMBOL_ITEXTURECUBEARRAY = 278, /* ITEXTURECUBEARRAY */ + YYSYMBOL_UTEXTURECUBEARRAY = 279, /* UTEXTURECUBEARRAY */ + YYSYMBOL_TEXTURE1D = 280, /* TEXTURE1D */ + YYSYMBOL_ITEXTURE1D = 281, /* ITEXTURE1D */ + YYSYMBOL_UTEXTURE1D = 282, /* UTEXTURE1D */ + YYSYMBOL_TEXTURE1DARRAY = 283, /* TEXTURE1DARRAY */ + YYSYMBOL_ITEXTURE1DARRAY = 284, /* ITEXTURE1DARRAY */ + YYSYMBOL_UTEXTURE1DARRAY = 285, /* UTEXTURE1DARRAY */ + YYSYMBOL_TEXTURE2DRECT = 286, /* TEXTURE2DRECT */ + YYSYMBOL_ITEXTURE2DRECT = 287, /* ITEXTURE2DRECT */ + YYSYMBOL_UTEXTURE2DRECT = 288, /* UTEXTURE2DRECT */ + YYSYMBOL_TEXTUREBUFFER = 289, /* TEXTUREBUFFER */ + YYSYMBOL_ITEXTUREBUFFER = 290, /* ITEXTUREBUFFER */ + YYSYMBOL_UTEXTUREBUFFER = 291, /* UTEXTUREBUFFER */ + YYSYMBOL_TEXTURE2DMS = 292, /* TEXTURE2DMS */ + YYSYMBOL_ITEXTURE2DMS = 293, /* ITEXTURE2DMS */ + YYSYMBOL_UTEXTURE2DMS = 294, /* UTEXTURE2DMS */ + YYSYMBOL_TEXTURE2DMSARRAY = 295, /* TEXTURE2DMSARRAY */ + YYSYMBOL_ITEXTURE2DMSARRAY = 296, /* ITEXTURE2DMSARRAY */ + YYSYMBOL_UTEXTURE2DMSARRAY = 297, /* UTEXTURE2DMSARRAY */ + YYSYMBOL_F16TEXTURE1D = 298, /* F16TEXTURE1D */ + YYSYMBOL_F16TEXTURE2D = 299, /* F16TEXTURE2D */ + YYSYMBOL_F16TEXTURE3D = 300, /* F16TEXTURE3D */ + YYSYMBOL_F16TEXTURE2DRECT = 301, /* F16TEXTURE2DRECT */ + YYSYMBOL_F16TEXTURECUBE = 302, /* F16TEXTURECUBE */ + YYSYMBOL_F16TEXTURE1DARRAY = 303, /* F16TEXTURE1DARRAY */ + YYSYMBOL_F16TEXTURE2DARRAY = 304, /* F16TEXTURE2DARRAY */ + YYSYMBOL_F16TEXTURECUBEARRAY = 305, /* F16TEXTURECUBEARRAY */ + YYSYMBOL_F16TEXTUREBUFFER = 306, /* F16TEXTUREBUFFER */ + YYSYMBOL_F16TEXTURE2DMS = 307, /* F16TEXTURE2DMS */ + YYSYMBOL_F16TEXTURE2DMSARRAY = 308, /* F16TEXTURE2DMSARRAY */ + YYSYMBOL_SUBPASSINPUT = 309, /* SUBPASSINPUT */ + YYSYMBOL_SUBPASSINPUTMS = 310, /* SUBPASSINPUTMS */ + YYSYMBOL_ISUBPASSINPUT = 311, /* ISUBPASSINPUT */ + YYSYMBOL_ISUBPASSINPUTMS = 312, /* ISUBPASSINPUTMS */ + YYSYMBOL_USUBPASSINPUT = 313, /* USUBPASSINPUT */ + YYSYMBOL_USUBPASSINPUTMS = 314, /* USUBPASSINPUTMS */ + YYSYMBOL_F16SUBPASSINPUT = 315, /* F16SUBPASSINPUT */ + YYSYMBOL_F16SUBPASSINPUTMS = 316, /* F16SUBPASSINPUTMS */ + YYSYMBOL_LEFT_OP = 317, /* LEFT_OP */ + YYSYMBOL_RIGHT_OP = 318, /* RIGHT_OP */ + YYSYMBOL_INC_OP = 319, /* INC_OP */ + YYSYMBOL_DEC_OP = 320, /* DEC_OP */ + YYSYMBOL_LE_OP = 321, /* LE_OP */ + YYSYMBOL_GE_OP = 322, /* GE_OP */ + YYSYMBOL_EQ_OP = 323, /* EQ_OP */ + YYSYMBOL_NE_OP = 324, /* NE_OP */ + YYSYMBOL_AND_OP = 325, /* AND_OP */ + YYSYMBOL_OR_OP = 326, /* OR_OP */ + YYSYMBOL_XOR_OP = 327, /* XOR_OP */ + YYSYMBOL_MUL_ASSIGN = 328, /* MUL_ASSIGN */ + YYSYMBOL_DIV_ASSIGN = 329, /* DIV_ASSIGN */ + YYSYMBOL_ADD_ASSIGN = 330, /* ADD_ASSIGN */ + YYSYMBOL_MOD_ASSIGN = 331, /* MOD_ASSIGN */ + YYSYMBOL_LEFT_ASSIGN = 332, /* LEFT_ASSIGN */ + YYSYMBOL_RIGHT_ASSIGN = 333, /* RIGHT_ASSIGN */ + YYSYMBOL_AND_ASSIGN = 334, /* AND_ASSIGN */ + YYSYMBOL_XOR_ASSIGN = 335, /* XOR_ASSIGN */ + YYSYMBOL_OR_ASSIGN = 336, /* OR_ASSIGN */ + YYSYMBOL_SUB_ASSIGN = 337, /* SUB_ASSIGN */ + YYSYMBOL_STRING_LITERAL = 338, /* STRING_LITERAL */ + YYSYMBOL_LEFT_PAREN = 339, /* LEFT_PAREN */ + YYSYMBOL_RIGHT_PAREN = 340, /* RIGHT_PAREN */ + YYSYMBOL_LEFT_BRACKET = 341, /* LEFT_BRACKET */ + YYSYMBOL_RIGHT_BRACKET = 342, /* RIGHT_BRACKET */ + YYSYMBOL_LEFT_BRACE = 343, /* LEFT_BRACE */ + YYSYMBOL_RIGHT_BRACE = 344, /* RIGHT_BRACE */ + YYSYMBOL_DOT = 345, /* DOT */ + YYSYMBOL_COMMA = 346, /* COMMA */ + YYSYMBOL_COLON = 347, /* COLON */ + YYSYMBOL_EQUAL = 348, /* EQUAL */ + YYSYMBOL_SEMICOLON = 349, /* SEMICOLON */ + YYSYMBOL_BANG = 350, /* BANG */ + YYSYMBOL_DASH = 351, /* DASH */ + YYSYMBOL_TILDE = 352, /* TILDE */ + YYSYMBOL_PLUS = 353, /* PLUS */ + YYSYMBOL_STAR = 354, /* STAR */ + YYSYMBOL_SLASH = 355, /* SLASH */ + YYSYMBOL_PERCENT = 356, /* PERCENT */ + YYSYMBOL_LEFT_ANGLE = 357, /* LEFT_ANGLE */ + YYSYMBOL_RIGHT_ANGLE = 358, /* RIGHT_ANGLE */ + YYSYMBOL_VERTICAL_BAR = 359, /* VERTICAL_BAR */ + YYSYMBOL_CARET = 360, /* CARET */ + YYSYMBOL_AMPERSAND = 361, /* AMPERSAND */ + YYSYMBOL_QUESTION = 362, /* QUESTION */ + YYSYMBOL_INVARIANT = 363, /* INVARIANT */ + YYSYMBOL_HIGH_PRECISION = 364, /* HIGH_PRECISION */ + YYSYMBOL_MEDIUM_PRECISION = 365, /* MEDIUM_PRECISION */ + YYSYMBOL_LOW_PRECISION = 366, /* LOW_PRECISION */ + YYSYMBOL_PRECISION = 367, /* PRECISION */ + YYSYMBOL_PACKED = 368, /* PACKED */ + YYSYMBOL_RESOURCE = 369, /* RESOURCE */ + YYSYMBOL_SUPERP = 370, /* SUPERP */ + YYSYMBOL_FLOATCONSTANT = 371, /* FLOATCONSTANT */ + YYSYMBOL_INTCONSTANT = 372, /* INTCONSTANT */ + YYSYMBOL_UINTCONSTANT = 373, /* UINTCONSTANT */ + YYSYMBOL_BOOLCONSTANT = 374, /* BOOLCONSTANT */ + YYSYMBOL_IDENTIFIER = 375, /* IDENTIFIER */ + YYSYMBOL_TYPE_NAME = 376, /* TYPE_NAME */ + YYSYMBOL_CENTROID = 377, /* CENTROID */ + YYSYMBOL_IN = 378, /* IN */ + YYSYMBOL_OUT = 379, /* OUT */ + YYSYMBOL_INOUT = 380, /* INOUT */ + YYSYMBOL_STRUCT = 381, /* STRUCT */ + YYSYMBOL_VOID = 382, /* VOID */ + YYSYMBOL_WHILE = 383, /* WHILE */ + YYSYMBOL_BREAK = 384, /* BREAK */ + YYSYMBOL_CONTINUE = 385, /* CONTINUE */ + YYSYMBOL_DO = 386, /* DO */ + YYSYMBOL_ELSE = 387, /* ELSE */ + YYSYMBOL_FOR = 388, /* FOR */ + YYSYMBOL_IF = 389, /* IF */ + YYSYMBOL_DISCARD = 390, /* DISCARD */ + YYSYMBOL_RETURN = 391, /* RETURN */ + YYSYMBOL_SWITCH = 392, /* SWITCH */ + YYSYMBOL_CASE = 393, /* CASE */ + YYSYMBOL_DEFAULT = 394, /* DEFAULT */ + YYSYMBOL_TERMINATE_INVOCATION = 395, /* TERMINATE_INVOCATION */ + YYSYMBOL_TERMINATE_RAY = 396, /* TERMINATE_RAY */ + YYSYMBOL_IGNORE_INTERSECTION = 397, /* IGNORE_INTERSECTION */ + YYSYMBOL_UNIFORM = 398, /* UNIFORM */ + YYSYMBOL_SHARED = 399, /* SHARED */ + YYSYMBOL_BUFFER = 400, /* BUFFER */ + YYSYMBOL_FLAT = 401, /* FLAT */ + YYSYMBOL_SMOOTH = 402, /* SMOOTH */ + YYSYMBOL_LAYOUT = 403, /* LAYOUT */ + YYSYMBOL_DOUBLECONSTANT = 404, /* DOUBLECONSTANT */ + YYSYMBOL_INT16CONSTANT = 405, /* INT16CONSTANT */ + YYSYMBOL_UINT16CONSTANT = 406, /* UINT16CONSTANT */ + YYSYMBOL_FLOAT16CONSTANT = 407, /* FLOAT16CONSTANT */ + YYSYMBOL_INT32CONSTANT = 408, /* INT32CONSTANT */ + YYSYMBOL_UINT32CONSTANT = 409, /* UINT32CONSTANT */ + YYSYMBOL_INT64CONSTANT = 410, /* INT64CONSTANT */ + YYSYMBOL_UINT64CONSTANT = 411, /* UINT64CONSTANT */ + YYSYMBOL_SUBROUTINE = 412, /* SUBROUTINE */ + YYSYMBOL_DEMOTE = 413, /* DEMOTE */ + YYSYMBOL_PAYLOADNV = 414, /* PAYLOADNV */ + YYSYMBOL_PAYLOADINNV = 415, /* PAYLOADINNV */ + YYSYMBOL_HITATTRNV = 416, /* HITATTRNV */ + YYSYMBOL_CALLDATANV = 417, /* CALLDATANV */ + YYSYMBOL_CALLDATAINNV = 418, /* CALLDATAINNV */ + YYSYMBOL_PAYLOADEXT = 419, /* PAYLOADEXT */ + YYSYMBOL_PAYLOADINEXT = 420, /* PAYLOADINEXT */ + YYSYMBOL_HITATTREXT = 421, /* HITATTREXT */ + YYSYMBOL_CALLDATAEXT = 422, /* CALLDATAEXT */ + YYSYMBOL_CALLDATAINEXT = 423, /* CALLDATAINEXT */ + YYSYMBOL_PATCH = 424, /* PATCH */ + YYSYMBOL_SAMPLE = 425, /* SAMPLE */ + YYSYMBOL_NONUNIFORM = 426, /* NONUNIFORM */ + YYSYMBOL_COHERENT = 427, /* COHERENT */ + YYSYMBOL_VOLATILE = 428, /* VOLATILE */ + YYSYMBOL_RESTRICT = 429, /* RESTRICT */ + YYSYMBOL_READONLY = 430, /* READONLY */ + YYSYMBOL_WRITEONLY = 431, /* WRITEONLY */ + YYSYMBOL_DEVICECOHERENT = 432, /* DEVICECOHERENT */ + YYSYMBOL_QUEUEFAMILYCOHERENT = 433, /* QUEUEFAMILYCOHERENT */ + YYSYMBOL_WORKGROUPCOHERENT = 434, /* WORKGROUPCOHERENT */ + YYSYMBOL_SUBGROUPCOHERENT = 435, /* SUBGROUPCOHERENT */ + YYSYMBOL_NONPRIVATE = 436, /* NONPRIVATE */ + YYSYMBOL_SHADERCALLCOHERENT = 437, /* SHADERCALLCOHERENT */ + YYSYMBOL_NOPERSPECTIVE = 438, /* NOPERSPECTIVE */ + YYSYMBOL_EXPLICITINTERPAMD = 439, /* EXPLICITINTERPAMD */ + YYSYMBOL_PERVERTEXNV = 440, /* PERVERTEXNV */ + YYSYMBOL_PERPRIMITIVENV = 441, /* PERPRIMITIVENV */ + YYSYMBOL_PERVIEWNV = 442, /* PERVIEWNV */ + YYSYMBOL_PERTASKNV = 443, /* PERTASKNV */ + YYSYMBOL_PRECISE = 444, /* PRECISE */ + YYSYMBOL_YYACCEPT = 445, /* $accept */ + YYSYMBOL_variable_identifier = 446, /* variable_identifier */ + YYSYMBOL_primary_expression = 447, /* primary_expression */ + YYSYMBOL_postfix_expression = 448, /* postfix_expression */ + YYSYMBOL_integer_expression = 449, /* integer_expression */ + YYSYMBOL_function_call = 450, /* function_call */ + YYSYMBOL_function_call_or_method = 451, /* function_call_or_method */ + YYSYMBOL_function_call_generic = 452, /* function_call_generic */ + YYSYMBOL_function_call_header_no_parameters = 453, /* function_call_header_no_parameters */ + YYSYMBOL_function_call_header_with_parameters = 454, /* function_call_header_with_parameters */ + YYSYMBOL_function_call_header = 455, /* function_call_header */ + YYSYMBOL_function_identifier = 456, /* function_identifier */ + YYSYMBOL_unary_expression = 457, /* unary_expression */ + YYSYMBOL_unary_operator = 458, /* unary_operator */ + YYSYMBOL_multiplicative_expression = 459, /* multiplicative_expression */ + YYSYMBOL_additive_expression = 460, /* additive_expression */ + YYSYMBOL_shift_expression = 461, /* shift_expression */ + YYSYMBOL_relational_expression = 462, /* relational_expression */ + YYSYMBOL_equality_expression = 463, /* equality_expression */ + YYSYMBOL_and_expression = 464, /* and_expression */ + YYSYMBOL_exclusive_or_expression = 465, /* exclusive_or_expression */ + YYSYMBOL_inclusive_or_expression = 466, /* inclusive_or_expression */ + YYSYMBOL_logical_and_expression = 467, /* logical_and_expression */ + YYSYMBOL_logical_xor_expression = 468, /* logical_xor_expression */ + YYSYMBOL_logical_or_expression = 469, /* logical_or_expression */ + YYSYMBOL_conditional_expression = 470, /* conditional_expression */ + YYSYMBOL_471_1 = 471, /* $@1 */ + YYSYMBOL_assignment_expression = 472, /* assignment_expression */ + YYSYMBOL_assignment_operator = 473, /* assignment_operator */ + YYSYMBOL_expression = 474, /* expression */ + YYSYMBOL_constant_expression = 475, /* constant_expression */ + YYSYMBOL_declaration = 476, /* declaration */ + YYSYMBOL_block_structure = 477, /* block_structure */ + YYSYMBOL_478_2 = 478, /* $@2 */ + YYSYMBOL_identifier_list = 479, /* identifier_list */ + YYSYMBOL_function_prototype = 480, /* function_prototype */ + YYSYMBOL_function_declarator = 481, /* function_declarator */ + YYSYMBOL_function_header_with_parameters = 482, /* function_header_with_parameters */ + YYSYMBOL_function_header = 483, /* function_header */ + YYSYMBOL_parameter_declarator = 484, /* parameter_declarator */ + YYSYMBOL_parameter_declaration = 485, /* parameter_declaration */ + YYSYMBOL_parameter_type_specifier = 486, /* parameter_type_specifier */ + YYSYMBOL_init_declarator_list = 487, /* init_declarator_list */ + YYSYMBOL_single_declaration = 488, /* single_declaration */ + YYSYMBOL_fully_specified_type = 489, /* fully_specified_type */ + YYSYMBOL_invariant_qualifier = 490, /* invariant_qualifier */ + YYSYMBOL_interpolation_qualifier = 491, /* interpolation_qualifier */ + YYSYMBOL_layout_qualifier = 492, /* layout_qualifier */ + YYSYMBOL_layout_qualifier_id_list = 493, /* layout_qualifier_id_list */ + YYSYMBOL_layout_qualifier_id = 494, /* layout_qualifier_id */ + YYSYMBOL_precise_qualifier = 495, /* precise_qualifier */ + YYSYMBOL_type_qualifier = 496, /* type_qualifier */ + YYSYMBOL_single_type_qualifier = 497, /* single_type_qualifier */ + YYSYMBOL_storage_qualifier = 498, /* storage_qualifier */ + YYSYMBOL_non_uniform_qualifier = 499, /* non_uniform_qualifier */ + YYSYMBOL_type_name_list = 500, /* type_name_list */ + YYSYMBOL_type_specifier = 501, /* type_specifier */ + YYSYMBOL_array_specifier = 502, /* array_specifier */ + YYSYMBOL_type_parameter_specifier_opt = 503, /* type_parameter_specifier_opt */ + YYSYMBOL_type_parameter_specifier = 504, /* type_parameter_specifier */ + YYSYMBOL_type_parameter_specifier_list = 505, /* type_parameter_specifier_list */ + YYSYMBOL_type_specifier_nonarray = 506, /* type_specifier_nonarray */ + YYSYMBOL_precision_qualifier = 507, /* precision_qualifier */ + YYSYMBOL_struct_specifier = 508, /* struct_specifier */ + YYSYMBOL_509_3 = 509, /* $@3 */ + YYSYMBOL_510_4 = 510, /* $@4 */ + YYSYMBOL_struct_declaration_list = 511, /* struct_declaration_list */ + YYSYMBOL_struct_declaration = 512, /* struct_declaration */ + YYSYMBOL_struct_declarator_list = 513, /* struct_declarator_list */ + YYSYMBOL_struct_declarator = 514, /* struct_declarator */ + YYSYMBOL_initializer = 515, /* initializer */ + YYSYMBOL_initializer_list = 516, /* initializer_list */ + YYSYMBOL_declaration_statement = 517, /* declaration_statement */ + YYSYMBOL_statement = 518, /* statement */ + YYSYMBOL_simple_statement = 519, /* simple_statement */ + YYSYMBOL_demote_statement = 520, /* demote_statement */ + YYSYMBOL_compound_statement = 521, /* compound_statement */ + YYSYMBOL_522_5 = 522, /* $@5 */ + YYSYMBOL_523_6 = 523, /* $@6 */ + YYSYMBOL_statement_no_new_scope = 524, /* statement_no_new_scope */ + YYSYMBOL_statement_scoped = 525, /* statement_scoped */ + YYSYMBOL_526_7 = 526, /* $@7 */ + YYSYMBOL_527_8 = 527, /* $@8 */ + YYSYMBOL_compound_statement_no_new_scope = 528, /* compound_statement_no_new_scope */ + YYSYMBOL_statement_list = 529, /* statement_list */ + YYSYMBOL_expression_statement = 530, /* expression_statement */ + YYSYMBOL_selection_statement = 531, /* selection_statement */ + YYSYMBOL_selection_statement_nonattributed = 532, /* selection_statement_nonattributed */ + YYSYMBOL_selection_rest_statement = 533, /* selection_rest_statement */ + YYSYMBOL_condition = 534, /* condition */ + YYSYMBOL_switch_statement = 535, /* switch_statement */ + YYSYMBOL_switch_statement_nonattributed = 536, /* switch_statement_nonattributed */ + YYSYMBOL_537_9 = 537, /* $@9 */ + YYSYMBOL_switch_statement_list = 538, /* switch_statement_list */ + YYSYMBOL_case_label = 539, /* case_label */ + YYSYMBOL_iteration_statement = 540, /* iteration_statement */ + YYSYMBOL_iteration_statement_nonattributed = 541, /* iteration_statement_nonattributed */ + YYSYMBOL_542_10 = 542, /* $@10 */ + YYSYMBOL_543_11 = 543, /* $@11 */ + YYSYMBOL_544_12 = 544, /* $@12 */ + YYSYMBOL_for_init_statement = 545, /* for_init_statement */ + YYSYMBOL_conditionopt = 546, /* conditionopt */ + YYSYMBOL_for_rest_statement = 547, /* for_rest_statement */ + YYSYMBOL_jump_statement = 548, /* jump_statement */ + YYSYMBOL_translation_unit = 549, /* translation_unit */ + YYSYMBOL_external_declaration = 550, /* external_declaration */ + YYSYMBOL_function_definition = 551, /* function_definition */ + YYSYMBOL_552_13 = 552, /* $@13 */ + YYSYMBOL_attribute = 553, /* attribute */ + YYSYMBOL_attribute_list = 554, /* attribute_list */ + YYSYMBOL_single_attribute = 555 /* single_attribute */ }; - -typedef union YYSTYPE YYSTYPE; -# define YYSTYPE_IS_TRIVIAL 1 -# define YYSTYPE_IS_DECLARED 1 -#endif +typedef enum yysymbol_kind_t yysymbol_kind_t; - -int yyparse (glslang::TParseContext* pParseContext); - -#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */ - -/* Copy the second part of user declarations. */ -#line 133 "MachineIndependent/glslang.y" /* yacc.c:358 */ +/* Second part of user prologue. */ +#line 133 "MachineIndependent/glslang.y" /* windows only pragma */ @@ -615,34 +698,82 @@ int yyparse (glslang::TParseContext* pParseContext); extern int yylex(YYSTYPE*, TParseContext&); -#line 619 "MachineIndependent/glslang_tab.cpp" /* yacc.c:358 */ +#line 702 "MachineIndependent/glslang_tab.cpp" + #ifdef short # undef short #endif -#ifdef YYTYPE_UINT8 -typedef YYTYPE_UINT8 yytype_uint8; -#else -typedef unsigned char yytype_uint8; +/* On compilers that do not define __PTRDIFF_MAX__ etc., make sure + <limits.h> and (if available) <stdint.h> are included + so that the code can choose integer types of a good width. */ + +#ifndef __PTRDIFF_MAX__ +# include <limits.h> /* INFRINGES ON USER NAME SPACE */ +# if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ +# include <stdint.h> /* INFRINGES ON USER NAME SPACE */ +# define YY_STDINT_H +# endif #endif -#ifdef YYTYPE_INT8 -typedef YYTYPE_INT8 yytype_int8; +/* Narrow types that promote to a signed type and that can represent a + signed or unsigned integer of at least N bits. In tables they can + save space and decrease cache pressure. Promoting to a signed type + helps avoid bugs in integer arithmetic. */ + +#ifdef __INT_LEAST8_MAX__ +typedef __INT_LEAST8_TYPE__ yytype_int8; +#elif defined YY_STDINT_H +typedef int_least8_t yytype_int8; #else typedef signed char yytype_int8; #endif -#ifdef YYTYPE_UINT16 -typedef YYTYPE_UINT16 yytype_uint16; +#ifdef __INT_LEAST16_MAX__ +typedef __INT_LEAST16_TYPE__ yytype_int16; +#elif defined YY_STDINT_H +typedef int_least16_t yytype_int16; #else -typedef unsigned short int yytype_uint16; +typedef short yytype_int16; #endif -#ifdef YYTYPE_INT16 -typedef YYTYPE_INT16 yytype_int16; +#if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__ +typedef __UINT_LEAST8_TYPE__ yytype_uint8; +#elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \ + && UINT_LEAST8_MAX <= INT_MAX) +typedef uint_least8_t yytype_uint8; +#elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX +typedef unsigned char yytype_uint8; #else -typedef short int yytype_int16; +typedef short yytype_uint8; +#endif + +#if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__ +typedef __UINT_LEAST16_TYPE__ yytype_uint16; +#elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \ + && UINT_LEAST16_MAX <= INT_MAX) +typedef uint_least16_t yytype_uint16; +#elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX +typedef unsigned short yytype_uint16; +#else +typedef int yytype_uint16; +#endif + +#ifndef YYPTRDIFF_T +# if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__ +# define YYPTRDIFF_T __PTRDIFF_TYPE__ +# define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__ +# elif defined PTRDIFF_MAX +# ifndef ptrdiff_t +# include <stddef.h> /* INFRINGES ON USER NAME SPACE */ +# endif +# define YYPTRDIFF_T ptrdiff_t +# define YYPTRDIFF_MAXIMUM PTRDIFF_MAX +# else +# define YYPTRDIFF_T long +# define YYPTRDIFF_MAXIMUM LONG_MAX +# endif #endif #ifndef YYSIZE_T @@ -650,15 +781,28 @@ typedef short int yytype_int16; # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t -# elif ! defined YYSIZE_T +# elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else -# define YYSIZE_T unsigned int +# define YYSIZE_T unsigned # endif #endif -#define YYSIZE_MAXIMUM ((YYSIZE_T) -1) +#define YYSIZE_MAXIMUM \ + YY_CAST (YYPTRDIFF_T, \ + (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \ + ? YYPTRDIFF_MAXIMUM \ + : YY_CAST (YYSIZE_T, -1))) + +#define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X)) + + +/* Stored state numbers (used for stacks). */ +typedef yytype_int16 yy_state_t; + +/* State numbers in computations. */ +typedef int yy_state_fast_t; #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS @@ -672,30 +816,20 @@ typedef short int yytype_int16; # endif #endif -#ifndef YY_ATTRIBUTE -# if (defined __GNUC__ \ - && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ - || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C -# define YY_ATTRIBUTE(Spec) __attribute__(Spec) -# else -# define YY_ATTRIBUTE(Spec) /* empty */ -# endif -#endif #ifndef YY_ATTRIBUTE_PURE -# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) +# if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__) +# define YY_ATTRIBUTE_PURE __attribute__ ((__pure__)) +# else +# define YY_ATTRIBUTE_PURE +# endif #endif #ifndef YY_ATTRIBUTE_UNUSED -# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) -#endif - -#if !defined _Noreturn \ - && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) -# if defined _MSC_VER && 1200 <= _MSC_VER -# define _Noreturn __declspec (noreturn) +# if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__) +# define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__)) # else -# define _Noreturn YY_ATTRIBUTE ((__noreturn__)) +# define YY_ATTRIBUTE_UNUSED # endif #endif @@ -706,13 +840,13 @@ typedef short int yytype_int16; # define YYUSE(E) /* empty */ #endif -#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ +#if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ -# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ - _Pragma ("GCC diagnostic push") \ - _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") -# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value @@ -725,8 +859,22 @@ typedef short int yytype_int16; # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif +#if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__ +# define YY_IGNORE_USELESS_CAST_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"") +# define YY_IGNORE_USELESS_CAST_END \ + _Pragma ("GCC diagnostic pop") +#endif +#ifndef YY_IGNORE_USELESS_CAST_BEGIN +# define YY_IGNORE_USELESS_CAST_BEGIN +# define YY_IGNORE_USELESS_CAST_END +#endif -#if ! defined yyoverflow || YYERROR_VERBOSE + +#define YY_ASSERT(E) ((void) (0 && (E))) + +#if 1 /* The parser invokes alloca or malloc; define the necessary symbols. */ @@ -791,8 +939,7 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif -#endif /* ! defined yyoverflow || YYERROR_VERBOSE */ - +#endif /* 1 */ #if (! defined yyoverflow \ && (! defined __cplusplus \ @@ -801,17 +948,17 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */ /* A type that is properly aligned for any stack member. */ union yyalloc { - yytype_int16 yyss_alloc; + yy_state_t yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ -# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) +# define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ - ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 @@ -824,11 +971,11 @@ union yyalloc # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ - YYSIZE_T yynewbytes; \ + YYPTRDIFF_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ - yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ + yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / YYSIZEOF (*yyptr); \ } \ while (0) @@ -840,12 +987,12 @@ union yyalloc # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ - __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) + __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ - YYSIZE_T yyi; \ + YYPTRDIFF_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ @@ -855,30 +1002,33 @@ union yyalloc #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ -#define YYFINAL 394 +#define YYFINAL 416 /* YYLAST -- Last index in YYTABLE. */ -#define YYLAST 9550 +#define YYLAST 10112 /* YYNTOKENS -- Number of terminals. */ -#define YYNTOKENS 420 +#define YYNTOKENS 445 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 111 /* YYNRULES -- Number of rules. */ -#define YYNRULES 591 +#define YYNRULES 616 /* YYNSTATES -- Number of states. */ -#define YYNSTATES 736 +#define YYNSTATES 764 + +/* YYMAXUTOK -- Last valid token kind. */ +#define YYMAXUTOK 699 -/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned - by yylex, with out-of-bounds checking. */ -#define YYUNDEFTOK 2 -#define YYMAXUTOK 674 -#define YYTRANSLATE(YYX) \ - ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) +/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, with out-of-bounds checking. */ +#define YYTRANSLATE(YYX) \ + (0 <= (YYX) && (YYX) <= YYMAXUTOK \ + ? YY_CAST (yysymbol_kind_t, yytranslate[YYX]) \ + : YYSYMBOL_YYUNDEF) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM - as returned by yylex, without out-of-bounds checking. */ -static const yytype_uint16 yytranslate[] = + as returned by yylex. */ +static const yytype_int16 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -947,120 +1097,131 @@ static const yytype_uint16 yytranslate[] = 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419 + 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, 443, 444 }; #if YYDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ -static const yytype_uint16 yyrline[] = +static const yytype_int16 yyrline[] = { - 0, 357, 357, 363, 366, 371, 374, 377, 381, 385, - 388, 392, 396, 400, 404, 408, 412, 418, 426, 429, - 432, 435, 438, 443, 451, 458, 465, 471, 475, 482, - 485, 491, 498, 508, 516, 521, 549, 558, 564, 568, - 572, 592, 593, 594, 595, 601, 602, 607, 612, 621, - 622, 627, 635, 636, 642, 651, 652, 657, 662, 667, - 675, 676, 685, 697, 698, 707, 708, 717, 718, 727, - 728, 736, 737, 745, 746, 754, 755, 755, 773, 774, - 790, 794, 798, 802, 807, 811, 815, 819, 823, 827, - 831, 838, 841, 852, 859, 864, 869, 876, 880, 884, - 888, 893, 898, 907, 907, 918, 922, 929, 936, 939, - 946, 954, 974, 997, 1012, 1037, 1048, 1058, 1068, 1078, - 1087, 1090, 1094, 1098, 1103, 1111, 1118, 1123, 1128, 1133, - 1142, 1152, 1179, 1188, 1195, 1203, 1210, 1217, 1225, 1235, - 1242, 1253, 1259, 1262, 1269, 1273, 1277, 1286, 1296, 1299, - 1310, 1313, 1316, 1320, 1324, 1329, 1333, 1340, 1344, 1349, - 1355, 1361, 1368, 1373, 1381, 1387, 1399, 1413, 1419, 1424, - 1432, 1440, 1448, 1456, 1464, 1472, 1480, 1488, 1495, 1502, - 1506, 1511, 1516, 1521, 1526, 1531, 1536, 1540, 1544, 1548, - 1552, 1558, 1569, 1576, 1579, 1588, 1593, 1603, 1608, 1616, - 1620, 1630, 1633, 1639, 1645, 1652, 1662, 1666, 1670, 1674, - 1679, 1683, 1688, 1693, 1698, 1703, 1708, 1713, 1718, 1723, - 1728, 1734, 1740, 1746, 1751, 1756, 1761, 1766, 1771, 1776, - 1781, 1786, 1791, 1796, 1801, 1807, 1814, 1819, 1824, 1829, - 1834, 1839, 1844, 1849, 1854, 1859, 1864, 1869, 1877, 1885, - 1893, 1899, 1905, 1911, 1917, 1923, 1929, 1935, 1941, 1947, - 1953, 1959, 1965, 1971, 1977, 1983, 1989, 1995, 2001, 2007, - 2013, 2019, 2025, 2031, 2037, 2043, 2049, 2055, 2061, 2067, - 2073, 2079, 2085, 2091, 2099, 2107, 2115, 2123, 2131, 2139, - 2147, 2155, 2163, 2171, 2179, 2187, 2193, 2199, 2205, 2211, - 2217, 2223, 2229, 2235, 2241, 2247, 2253, 2259, 2265, 2271, - 2277, 2283, 2289, 2295, 2301, 2307, 2313, 2319, 2325, 2331, - 2337, 2343, 2349, 2355, 2361, 2367, 2373, 2379, 2385, 2391, - 2397, 2403, 2407, 2411, 2415, 2420, 2426, 2431, 2436, 2441, - 2446, 2451, 2456, 2462, 2467, 2472, 2477, 2482, 2487, 2493, - 2499, 2505, 2511, 2517, 2523, 2529, 2535, 2541, 2547, 2553, - 2559, 2565, 2571, 2576, 2581, 2586, 2591, 2596, 2601, 2607, - 2612, 2617, 2622, 2627, 2632, 2637, 2642, 2648, 2653, 2658, - 2663, 2668, 2673, 2678, 2683, 2688, 2693, 2698, 2703, 2708, - 2713, 2718, 2724, 2729, 2734, 2740, 2746, 2751, 2756, 2761, - 2767, 2772, 2777, 2782, 2788, 2793, 2798, 2803, 2809, 2814, - 2819, 2824, 2830, 2836, 2842, 2848, 2853, 2859, 2865, 2871, - 2876, 2881, 2886, 2891, 2896, 2902, 2907, 2912, 2917, 2923, - 2928, 2933, 2938, 2944, 2949, 2954, 2959, 2965, 2970, 2975, - 2980, 2986, 2991, 2996, 3001, 3007, 3012, 3017, 3022, 3028, - 3033, 3038, 3043, 3049, 3054, 3059, 3064, 3070, 3075, 3080, - 3085, 3091, 3096, 3101, 3106, 3112, 3117, 3122, 3127, 3133, - 3138, 3143, 3148, 3154, 3159, 3164, 3169, 3175, 3180, 3185, - 3190, 3196, 3201, 3206, 3212, 3218, 3224, 3230, 3237, 3244, - 3250, 3256, 3262, 3268, 3274, 3280, 3287, 3292, 3308, 3313, - 3318, 3326, 3326, 3337, 3337, 3347, 3350, 3363, 3385, 3412, - 3416, 3422, 3427, 3438, 3442, 3448, 3459, 3462, 3469, 3473, - 3474, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3488, 3494, - 3503, 3504, 3508, 3504, 3520, 3521, 3525, 3525, 3532, 3532, - 3546, 3549, 3557, 3565, 3576, 3577, 3581, 3585, 3592, 3599, - 3603, 3611, 3615, 3628, 3632, 3639, 3639, 3659, 3662, 3668, - 3680, 3692, 3696, 3703, 3703, 3718, 3718, 3734, 3734, 3755, - 3758, 3764, 3767, 3773, 3777, 3784, 3789, 3794, 3801, 3804, - 3813, 3817, 3826, 3829, 3833, 3842, 3842, 3884, 3890, 3893, - 3898, 3901 + 0, 371, 371, 377, 380, 385, 388, 391, 395, 399, + 402, 406, 410, 414, 418, 422, 426, 432, 440, 443, + 446, 449, 452, 457, 465, 472, 479, 485, 489, 496, + 499, 505, 512, 522, 530, 535, 563, 572, 578, 582, + 586, 606, 607, 608, 609, 615, 616, 621, 626, 635, + 636, 641, 649, 650, 656, 665, 666, 671, 676, 681, + 689, 690, 699, 711, 712, 721, 722, 731, 732, 741, + 742, 750, 751, 759, 760, 768, 769, 769, 787, 788, + 804, 808, 812, 816, 821, 825, 829, 833, 837, 841, + 845, 852, 855, 866, 873, 878, 883, 890, 894, 898, + 902, 907, 912, 921, 921, 932, 936, 943, 950, 953, + 960, 968, 988, 1011, 1026, 1051, 1062, 1072, 1082, 1092, + 1101, 1104, 1108, 1112, 1117, 1125, 1132, 1137, 1142, 1147, + 1156, 1166, 1193, 1202, 1209, 1217, 1224, 1231, 1239, 1249, + 1256, 1267, 1273, 1276, 1283, 1287, 1291, 1300, 1310, 1313, + 1324, 1327, 1330, 1334, 1338, 1343, 1347, 1354, 1358, 1363, + 1369, 1375, 1382, 1387, 1395, 1401, 1413, 1427, 1433, 1438, + 1446, 1454, 1462, 1470, 1478, 1486, 1494, 1502, 1509, 1516, + 1520, 1525, 1530, 1535, 1540, 1545, 1550, 1554, 1558, 1562, + 1566, 1572, 1583, 1590, 1593, 1602, 1607, 1617, 1622, 1630, + 1634, 1644, 1647, 1653, 1659, 1666, 1676, 1680, 1684, 1688, + 1693, 1697, 1702, 1707, 1712, 1717, 1722, 1727, 1732, 1737, + 1742, 1748, 1754, 1760, 1765, 1770, 1775, 1780, 1785, 1790, + 1795, 1800, 1805, 1810, 1815, 1821, 1828, 1833, 1838, 1843, + 1848, 1853, 1858, 1863, 1868, 1873, 1878, 1883, 1891, 1899, + 1907, 1913, 1919, 1925, 1931, 1937, 1943, 1949, 1955, 1961, + 1967, 1973, 1979, 1985, 1991, 1997, 2003, 2009, 2015, 2021, + 2027, 2033, 2039, 2045, 2051, 2057, 2063, 2069, 2075, 2081, + 2087, 2093, 2099, 2105, 2113, 2121, 2129, 2137, 2145, 2153, + 2161, 2169, 2177, 2185, 2193, 2201, 2207, 2213, 2219, 2225, + 2231, 2237, 2243, 2249, 2255, 2261, 2267, 2273, 2279, 2285, + 2291, 2297, 2303, 2309, 2315, 2321, 2327, 2333, 2339, 2345, + 2351, 2357, 2363, 2369, 2375, 2381, 2387, 2393, 2399, 2405, + 2411, 2417, 2421, 2425, 2429, 2434, 2440, 2445, 2450, 2455, + 2460, 2465, 2470, 2476, 2481, 2486, 2491, 2496, 2501, 2507, + 2513, 2519, 2525, 2531, 2537, 2543, 2549, 2555, 2561, 2567, + 2573, 2579, 2585, 2590, 2595, 2600, 2605, 2610, 2615, 2621, + 2626, 2631, 2636, 2641, 2646, 2651, 2656, 2662, 2667, 2672, + 2677, 2682, 2687, 2692, 2697, 2702, 2707, 2712, 2717, 2722, + 2727, 2732, 2738, 2743, 2748, 2754, 2760, 2765, 2770, 2775, + 2781, 2786, 2791, 2796, 2802, 2807, 2812, 2817, 2823, 2828, + 2833, 2838, 2844, 2850, 2856, 2862, 2867, 2873, 2879, 2885, + 2890, 2895, 2900, 2905, 2910, 2916, 2921, 2926, 2931, 2937, + 2942, 2947, 2952, 2958, 2963, 2968, 2973, 2979, 2984, 2989, + 2994, 3000, 3005, 3010, 3015, 3021, 3026, 3031, 3036, 3042, + 3047, 3052, 3057, 3063, 3068, 3073, 3078, 3084, 3089, 3094, + 3099, 3105, 3110, 3115, 3120, 3126, 3131, 3136, 3141, 3147, + 3152, 3157, 3162, 3168, 3173, 3178, 3183, 3189, 3194, 3199, + 3204, 3210, 3215, 3220, 3225, 3230, 3235, 3240, 3245, 3250, + 3255, 3260, 3265, 3270, 3275, 3280, 3285, 3290, 3295, 3300, + 3305, 3310, 3315, 3320, 3325, 3330, 3336, 3342, 3348, 3354, + 3361, 3368, 3374, 3380, 3386, 3392, 3398, 3404, 3411, 3416, + 3432, 3437, 3442, 3450, 3450, 3461, 3461, 3471, 3474, 3487, + 3509, 3536, 3540, 3546, 3551, 3562, 3566, 3572, 3583, 3586, + 3593, 3597, 3598, 3604, 3605, 3606, 3607, 3608, 3609, 3610, + 3612, 3618, 3627, 3628, 3632, 3628, 3644, 3645, 3649, 3649, + 3656, 3656, 3670, 3673, 3681, 3689, 3700, 3701, 3705, 3709, + 3716, 3723, 3727, 3735, 3739, 3752, 3756, 3763, 3763, 3783, + 3786, 3792, 3804, 3816, 3820, 3827, 3827, 3842, 3842, 3858, + 3858, 3879, 3882, 3888, 3891, 3897, 3901, 3908, 3913, 3918, + 3925, 3928, 3932, 3937, 3941, 3951, 3955, 3964, 3967, 3971, + 3980, 3980, 4022, 4028, 4031, 4036, 4039 }; #endif -#if YYDEBUG || YYERROR_VERBOSE || 1 +/** Accessing symbol of state STATE. */ +#define YY_ACCESSING_SYMBOL(State) YY_CAST (yysymbol_kind_t, yystos[State]) + +#if 1 +/* The user-facing name of the symbol whose (internal) number is + YYSYMBOL. No bounds checking. */ +static const char *yysymbol_name (yysymbol_kind_t yysymbol) YY_ATTRIBUTE_UNUSED; + /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { - "$end", "error", "$undefined", "CONST", "BOOL", "INT", "UINT", "FLOAT", - "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4", "UVEC2", "UVEC3", - "UVEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", "MAT4", "MAT2X2", - "MAT2X3", "MAT2X4", "MAT3X2", "MAT3X3", "MAT3X4", "MAT4X2", "MAT4X3", - "MAT4X4", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE", "SAMPLER2DSHADOW", - "SAMPLERCUBESHADOW", "SAMPLER2DARRAY", "SAMPLER2DARRAYSHADOW", - "ISAMPLER2D", "ISAMPLER3D", "ISAMPLERCUBE", "ISAMPLER2DARRAY", - "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", "USAMPLER2DARRAY", "SAMPLER", - "SAMPLERSHADOW", "TEXTURE2D", "TEXTURE3D", "TEXTURECUBE", - "TEXTURE2DARRAY", "ITEXTURE2D", "ITEXTURE3D", "ITEXTURECUBE", - "ITEXTURE2DARRAY", "UTEXTURE2D", "UTEXTURE3D", "UTEXTURECUBE", - "UTEXTURE2DARRAY", "ATTRIBUTE", "VARYING", "FLOAT16_T", "FLOAT32_T", - "DOUBLE", "FLOAT64_T", "INT64_T", "UINT64_T", "INT32_T", "UINT32_T", - "INT16_T", "UINT16_T", "INT8_T", "UINT8_T", "I64VEC2", "I64VEC3", - "I64VEC4", "U64VEC2", "U64VEC3", "U64VEC4", "I32VEC2", "I32VEC3", - "I32VEC4", "U32VEC2", "U32VEC3", "U32VEC4", "I16VEC2", "I16VEC3", - "I16VEC4", "U16VEC2", "U16VEC3", "U16VEC4", "I8VEC2", "I8VEC3", "I8VEC4", - "U8VEC2", "U8VEC3", "U8VEC4", "DVEC2", "DVEC3", "DVEC4", "DMAT2", - "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4", "F16MAT2", "F16MAT3", - "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4", "F32MAT2", "F32MAT3", - "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4", "F64MAT2", "F64MAT3", - "F64MAT4", "DMAT2X2", "DMAT2X3", "DMAT2X4", "DMAT3X2", "DMAT3X3", - "DMAT3X4", "DMAT4X2", "DMAT4X3", "DMAT4X4", "F16MAT2X2", "F16MAT2X3", - "F16MAT2X4", "F16MAT3X2", "F16MAT3X3", "F16MAT3X4", "F16MAT4X2", - "F16MAT4X3", "F16MAT4X4", "F32MAT2X2", "F32MAT2X3", "F32MAT2X4", - "F32MAT3X2", "F32MAT3X3", "F32MAT3X4", "F32MAT4X2", "F32MAT4X3", - "F32MAT4X4", "F64MAT2X2", "F64MAT2X3", "F64MAT2X4", "F64MAT3X2", - "F64MAT3X3", "F64MAT3X4", "F64MAT4X2", "F64MAT4X3", "F64MAT4X4", - "ATOMIC_UINT", "ACCSTRUCTNV", "ACCSTRUCTEXT", "RAYQUERYEXT", - "FCOOPMATNV", "ICOOPMATNV", "UCOOPMATNV", "SAMPLERCUBEARRAY", - "SAMPLERCUBEARRAYSHADOW", "ISAMPLERCUBEARRAY", "USAMPLERCUBEARRAY", - "SAMPLER1D", "SAMPLER1DARRAY", "SAMPLER1DARRAYSHADOW", "ISAMPLER1D", - "SAMPLER1DSHADOW", "SAMPLER2DRECT", "SAMPLER2DRECTSHADOW", - "ISAMPLER2DRECT", "USAMPLER2DRECT", "SAMPLERBUFFER", "ISAMPLERBUFFER", - "USAMPLERBUFFER", "SAMPLER2DMS", "ISAMPLER2DMS", "USAMPLER2DMS", - "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY", "USAMPLER2DMSARRAY", - "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT", "ISAMPLER1DARRAY", - "USAMPLER1D", "USAMPLER1DARRAY", "F16SAMPLER1D", "F16SAMPLER2D", - "F16SAMPLER3D", "F16SAMPLER2DRECT", "F16SAMPLERCUBE", + "\"end of file\"", "error", "\"invalid token\"", "CONST", "BOOL", "INT", + "UINT", "FLOAT", "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4", + "UVEC2", "UVEC3", "UVEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", + "MAT4", "MAT2X2", "MAT2X3", "MAT2X4", "MAT3X2", "MAT3X3", "MAT3X4", + "MAT4X2", "MAT4X3", "MAT4X4", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE", + "SAMPLER2DSHADOW", "SAMPLERCUBESHADOW", "SAMPLER2DARRAY", + "SAMPLER2DARRAYSHADOW", "ISAMPLER2D", "ISAMPLER3D", "ISAMPLERCUBE", + "ISAMPLER2DARRAY", "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", + "USAMPLER2DARRAY", "SAMPLER", "SAMPLERSHADOW", "TEXTURE2D", "TEXTURE3D", + "TEXTURECUBE", "TEXTURE2DARRAY", "ITEXTURE2D", "ITEXTURE3D", + "ITEXTURECUBE", "ITEXTURE2DARRAY", "UTEXTURE2D", "UTEXTURE3D", + "UTEXTURECUBE", "UTEXTURE2DARRAY", "ATTRIBUTE", "VARYING", "FLOAT16_T", + "FLOAT32_T", "DOUBLE", "FLOAT64_T", "INT64_T", "UINT64_T", "INT32_T", + "UINT32_T", "INT16_T", "UINT16_T", "INT8_T", "UINT8_T", "I64VEC2", + "I64VEC3", "I64VEC4", "U64VEC2", "U64VEC3", "U64VEC4", "I32VEC2", + "I32VEC3", "I32VEC4", "U32VEC2", "U32VEC3", "U32VEC4", "I16VEC2", + "I16VEC3", "I16VEC4", "U16VEC2", "U16VEC3", "U16VEC4", "I8VEC2", + "I8VEC3", "I8VEC4", "U8VEC2", "U8VEC3", "U8VEC4", "DVEC2", "DVEC3", + "DVEC4", "DMAT2", "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4", + "F16MAT2", "F16MAT3", "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4", + "F32MAT2", "F32MAT3", "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4", + "F64MAT2", "F64MAT3", "F64MAT4", "DMAT2X2", "DMAT2X3", "DMAT2X4", + "DMAT3X2", "DMAT3X3", "DMAT3X4", "DMAT4X2", "DMAT4X3", "DMAT4X4", + "F16MAT2X2", "F16MAT2X3", "F16MAT2X4", "F16MAT3X2", "F16MAT3X3", + "F16MAT3X4", "F16MAT4X2", "F16MAT4X3", "F16MAT4X4", "F32MAT2X2", + "F32MAT2X3", "F32MAT2X4", "F32MAT3X2", "F32MAT3X3", "F32MAT3X4", + "F32MAT4X2", "F32MAT4X3", "F32MAT4X4", "F64MAT2X2", "F64MAT2X3", + "F64MAT2X4", "F64MAT3X2", "F64MAT3X3", "F64MAT3X4", "F64MAT4X2", + "F64MAT4X3", "F64MAT4X4", "ATOMIC_UINT", "ACCSTRUCTNV", "ACCSTRUCTEXT", + "RAYQUERYEXT", "FCOOPMATNV", "ICOOPMATNV", "UCOOPMATNV", + "SAMPLERCUBEARRAY", "SAMPLERCUBEARRAYSHADOW", "ISAMPLERCUBEARRAY", + "USAMPLERCUBEARRAY", "SAMPLER1D", "SAMPLER1DARRAY", + "SAMPLER1DARRAYSHADOW", "ISAMPLER1D", "SAMPLER1DSHADOW", "SAMPLER2DRECT", + "SAMPLER2DRECTSHADOW", "ISAMPLER2DRECT", "USAMPLER2DRECT", + "SAMPLERBUFFER", "ISAMPLERBUFFER", "USAMPLERBUFFER", "SAMPLER2DMS", + "ISAMPLER2DMS", "USAMPLER2DMS", "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY", + "USAMPLER2DMSARRAY", "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT", + "ISAMPLER1DARRAY", "USAMPLER1D", "USAMPLER1DARRAY", "F16SAMPLER1D", + "F16SAMPLER2D", "F16SAMPLER3D", "F16SAMPLER2DRECT", "F16SAMPLERCUBE", "F16SAMPLER1DARRAY", "F16SAMPLER2DARRAY", "F16SAMPLERCUBEARRAY", "F16SAMPLERBUFFER", "F16SAMPLER2DMS", "F16SAMPLER2DMSARRAY", "F16SAMPLER1DSHADOW", "F16SAMPLER2DSHADOW", "F16SAMPLER1DARRAYSHADOW", @@ -1076,7 +1237,13 @@ static const char *const yytname[] = "F16IMAGE1D", "F16IMAGE2D", "F16IMAGE3D", "F16IMAGE2DRECT", "F16IMAGECUBE", "F16IMAGE1DARRAY", "F16IMAGE2DARRAY", "F16IMAGECUBEARRAY", "F16IMAGEBUFFER", "F16IMAGE2DMS", - "F16IMAGE2DMSARRAY", "TEXTURECUBEARRAY", "ITEXTURECUBEARRAY", + "F16IMAGE2DMSARRAY", "I64IMAGE1D", "U64IMAGE1D", "I64IMAGE2D", + "U64IMAGE2D", "I64IMAGE3D", "U64IMAGE3D", "I64IMAGE2DRECT", + "U64IMAGE2DRECT", "I64IMAGECUBE", "U64IMAGECUBE", "I64IMAGEBUFFER", + "U64IMAGEBUFFER", "I64IMAGE1DARRAY", "U64IMAGE1DARRAY", + "I64IMAGE2DARRAY", "U64IMAGE2DARRAY", "I64IMAGECUBEARRAY", + "U64IMAGECUBEARRAY", "I64IMAGE2DMS", "U64IMAGE2DMS", "I64IMAGE2DMSARRAY", + "U64IMAGE2DMSARRAY", "TEXTURECUBEARRAY", "ITEXTURECUBEARRAY", "UTEXTURECUBEARRAY", "TEXTURE1D", "ITEXTURE1D", "UTEXTURE1D", "TEXTURE1DARRAY", "ITEXTURE1DARRAY", "UTEXTURE1DARRAY", "TEXTURE2DRECT", "ITEXTURE2DRECT", "UTEXTURE2DRECT", "TEXTUREBUFFER", "ITEXTUREBUFFER", @@ -1100,8 +1267,9 @@ static const char *const yytname[] = "FLOATCONSTANT", "INTCONSTANT", "UINTCONSTANT", "BOOLCONSTANT", "IDENTIFIER", "TYPE_NAME", "CENTROID", "IN", "OUT", "INOUT", "STRUCT", "VOID", "WHILE", "BREAK", "CONTINUE", "DO", "ELSE", "FOR", "IF", - "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT", "UNIFORM", "SHARED", - "BUFFER", "FLAT", "SMOOTH", "LAYOUT", "DOUBLECONSTANT", "INT16CONSTANT", + "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT", "TERMINATE_INVOCATION", + "TERMINATE_RAY", "IGNORE_INTERSECTION", "UNIFORM", "SHARED", "BUFFER", + "FLAT", "SMOOTH", "LAYOUT", "DOUBLECONSTANT", "INT16CONSTANT", "UINT16CONSTANT", "FLOAT16CONSTANT", "INT32CONSTANT", "UINT32CONSTANT", "INT64CONSTANT", "UINT64CONSTANT", "SUBROUTINE", "DEMOTE", "PAYLOADNV", "PAYLOADINNV", "HITATTRNV", "CALLDATANV", "CALLDATAINNV", "PAYLOADEXT", @@ -1151,12 +1319,18 @@ static const char *const yytname[] = "function_definition", "$@13", "attribute", "attribute_list", "single_attribute", YY_NULLPTR }; + +static const char * +yysymbol_name (yysymbol_kind_t yysymbol) +{ + return yytname[yysymbol]; +} #endif -# ifdef YYPRINT +#ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ -static const yytype_uint16 yytoknum[] = +static const yytype_int16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, @@ -1199,104 +1373,110 @@ static const yytype_uint16 yytoknum[] = 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, - 665, 666, 667, 668, 669, 670, 671, 672, 673, 674 + 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, + 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, + 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, + 695, 696, 697, 698, 699 }; -# endif +#endif -#define YYPACT_NINF -457 +#define YYPACT_NINF (-732) -#define yypact_value_is_default(Yystate) \ - (!!((Yystate) == (-457))) +#define yypact_value_is_default(Yyn) \ + ((Yyn) == YYPACT_NINF) -#define YYTABLE_NINF -537 +#define YYTABLE_NINF (-559) -#define yytable_value_is_error(Yytable_value) \ +#define yytable_value_is_error(Yyn) \ 0 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { - 4075, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, 132, -457, - -457, -457, -457, -457, -1, -457, -457, -457, -457, -457, - -457, -301, -298, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, 11, -249, 17, 30, 6160, - 20, -457, 28, -457, -457, -457, -457, 4492, -457, -457, - -457, -457, 50, -457, -457, 739, -457, -457, 16, -457, - 81, -29, 69, -457, -313, -457, 111, -457, 6160, -457, - -457, -457, 6160, 103, 106, -457, -314, -457, 72, -457, - -457, 8566, 142, -457, -457, -457, 136, 6160, -457, 144, - -457, 53, -457, -457, 76, 6974, -457, -312, 1156, -457, - -457, -457, -457, 142, -309, -457, 7372, -308, -457, 119, - -457, 65, 8566, 8566, -457, 8566, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, -457, 36, -457, -457, -457, 171, - 85, 8964, 173, -457, 8566, -457, -457, -323, 174, -457, - 6160, 139, 4909, -457, 6160, 8566, -457, -29, -457, 141, - -457, -457, 145, 99, 35, 26, 71, 156, 159, 161, - 196, 195, 23, 181, 7770, -457, 183, 182, -457, -457, - 186, 179, 180, -457, 191, 192, 187, 8168, 193, 8566, - 188, 189, 127, -457, -457, 96, -457, -249, 200, 201, - -457, -457, -457, -457, -457, 1573, -457, -457, -457, -457, - -457, -457, -457, -457, -457, -24, 174, 7372, 13, 7372, - -457, -457, 7372, 6160, -457, 166, -457, -457, -457, 86, - -457, -457, 8566, 168, -457, -457, 8566, 205, -457, -457, - -457, 8566, -457, 139, 142, 124, -457, -457, -457, 5326, - -457, -457, -457, -457, 8566, 8566, 8566, 8566, 8566, 8566, - 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, - 8566, 8566, 8566, -457, -457, -457, 206, 172, -457, 1990, - -457, -457, -457, 1990, -457, 8566, -457, -457, 130, 8566, - 125, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, -457, -457, 8566, 8566, -457, -457, -457, -457, - -457, -457, -457, 7372, -457, 94, -457, 5743, -457, -457, - 207, 204, -457, -457, -457, 131, 174, 139, -457, -457, - -457, -457, -457, 145, 145, 99, 99, 35, 35, 35, - 35, 26, 26, 71, 156, 159, 161, 196, 195, 8566, - -457, 212, 60, -457, 1990, 3658, 169, 3241, 87, -457, - 89, -457, -457, -457, -457, -457, 6576, -457, -457, -457, - -457, 143, 8566, 211, 172, 210, 204, 184, 6160, 217, - 219, -457, -457, 3658, 218, -457, -457, -457, 8566, 220, - -457, -457, -457, 214, 2407, 8566, -457, 216, 223, 185, - 224, 2824, -457, 225, -457, -457, 7372, -457, -457, -457, - 97, 8566, 2407, 218, -457, -457, 1990, -457, 222, 204, - -457, -457, 1990, 229, -457, -457 + 4303, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + 109, -732, -732, -732, -732, -732, 1, -732, -732, -732, + -732, -732, -732, -324, -261, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, 11, 44, 22, + 7, 6513, -332, -732, -10, -732, -732, -732, -732, 4745, + -732, -732, -732, -732, 46, -732, -732, 767, -732, -732, + 16, -732, 69, -5, 47, -732, -338, -732, 91, -732, + 6513, -732, -732, -732, 6513, 72, 80, -732, 13, -732, + 74, -732, -732, 9069, 126, -732, -732, -732, 127, 6513, + -732, 144, -732, 17, -732, -732, 61, 7377, -732, 10, + 1209, -732, -732, -732, -732, 126, 25, -732, 7800, 26, + -732, 119, -732, 78, 9069, 9069, -732, 9069, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, 36, -732, -732, + -732, 164, 65, 9492, 171, -732, 9069, -732, -732, -340, + 173, -732, 6513, 140, 5187, -732, 6513, 9069, -732, -5, + -732, 141, -732, -732, 124, 130, 179, 27, 117, 156, + 158, 160, 195, 194, 20, 181, 8223, -732, 183, 182, + -732, -732, 186, 178, 180, -732, 189, 192, 184, 8646, + 193, 9069, 187, 188, 190, 196, 197, 129, -732, -732, + 89, -732, 44, 199, 204, -732, -732, -732, -732, -732, + 1651, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -376, 173, 7800, 54, 7800, -732, -732, 7800, 6513, -732, + 161, -732, -732, -732, 70, -732, -732, 9069, 169, -732, + -732, 9069, 207, -732, -732, -732, 9069, -732, 140, 126, + 103, -732, -732, -732, 5629, -732, -732, -732, -732, 9069, + 9069, 9069, 9069, 9069, 9069, 9069, 9069, 9069, 9069, 9069, + 9069, 9069, 9069, 9069, 9069, 9069, 9069, 9069, -732, -732, + -732, 209, 177, -732, 2093, -732, -732, -732, 2093, -732, + 9069, -732, -732, 122, 9069, 152, -732, -732, -732, -732, + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, 9069, 9069, -732, -732, -732, -732, -732, -732, + -732, 7800, -732, 143, -732, 6071, -732, -732, 211, 208, + -732, -732, -732, 123, 173, 140, -732, -732, -732, -732, + -732, 124, 124, 130, 130, 179, 179, 179, 179, 27, + 27, 117, 156, 158, 160, 195, 194, 9069, -732, 216, + 87, -732, 2093, 3861, 174, 3419, 75, -732, 85, -732, + -732, -732, -732, -732, 6954, -732, -732, -732, -732, 154, + 9069, 217, 177, 191, 208, 185, 6513, 221, 223, -732, + -732, 3861, 220, -732, -732, -732, 9069, 224, -732, -732, + -732, 218, 2535, 9069, -732, 219, 225, 198, 226, 2977, + -732, 227, -732, -732, 7800, -732, -732, -732, 86, 9069, + 2535, 220, -732, -732, 2093, -732, 222, 208, -732, -732, + 2093, 228, -732, -732 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ -static const yytype_uint16 yydefact[] = +static const yytype_int16 yydefact[] = { 0, 157, 210, 208, 209, 207, 214, 215, 216, 217, 218, 219, 220, 221, 222, 211, 212, 213, 223, 224, @@ -1314,98 +1494,101 @@ static const yytype_uint16 yydefact[] = 298, 299, 300, 301, 302, 303, 304, 305, 306, 310, 311, 312, 313, 314, 315, 316, 317, 318, 322, 323, 324, 325, 326, 327, 328, 329, 330, 334, 331, 332, - 333, 493, 494, 495, 346, 347, 370, 373, 335, 344, + 333, 515, 516, 517, 346, 347, 370, 373, 335, 344, 345, 361, 343, 392, 393, 396, 397, 398, 400, 401, - 402, 404, 405, 406, 408, 409, 483, 484, 369, 371, + 402, 404, 405, 406, 408, 409, 505, 506, 369, 371, 372, 348, 349, 350, 394, 351, 355, 356, 359, 399, 403, 407, 352, 353, 357, 358, 395, 354, 360, 439, 441, 442, 443, 445, 446, 447, 449, 450, 451, 453, 454, 455, 457, 458, 459, 461, 462, 463, 465, 466, 467, 469, 470, 471, 473, 474, 475, 477, 478, 479, 481, 482, 440, 444, 448, 452, 456, 464, 468, 472, - 460, 476, 480, 374, 375, 376, 410, 419, 421, 415, - 420, 422, 423, 425, 426, 427, 429, 430, 431, 433, - 434, 435, 437, 438, 411, 412, 413, 424, 414, 416, - 417, 418, 428, 432, 436, 485, 486, 489, 490, 491, - 492, 487, 488, 584, 132, 498, 499, 500, 0, 497, - 161, 159, 160, 158, 0, 206, 162, 163, 164, 134, - 133, 0, 190, 171, 173, 169, 175, 177, 172, 174, - 170, 176, 178, 167, 168, 192, 179, 186, 187, 188, - 189, 180, 181, 182, 183, 184, 185, 135, 136, 137, - 138, 139, 140, 147, 583, 0, 585, 0, 109, 108, - 0, 120, 125, 154, 153, 151, 155, 0, 148, 150, - 156, 130, 202, 152, 496, 0, 580, 582, 0, 503, - 0, 0, 0, 97, 0, 94, 0, 107, 0, 116, - 110, 118, 0, 119, 0, 95, 126, 100, 0, 149, - 131, 0, 195, 201, 1, 581, 0, 0, 501, 144, - 146, 0, 142, 193, 0, 0, 98, 0, 0, 586, - 111, 115, 117, 113, 121, 112, 0, 127, 103, 0, - 101, 0, 0, 0, 9, 0, 43, 42, 44, 41, - 5, 6, 7, 8, 2, 16, 14, 15, 17, 10, - 11, 12, 13, 3, 18, 37, 20, 25, 26, 0, - 0, 30, 0, 204, 0, 36, 34, 0, 196, 96, - 0, 0, 0, 505, 0, 0, 141, 0, 191, 0, - 197, 45, 49, 52, 55, 60, 63, 65, 67, 69, - 71, 73, 75, 0, 0, 99, 0, 531, 540, 544, - 0, 0, 0, 565, 0, 0, 0, 0, 0, 0, - 0, 0, 45, 78, 91, 0, 518, 0, 156, 130, - 521, 542, 520, 528, 519, 0, 522, 523, 546, 524, - 553, 525, 526, 561, 527, 0, 114, 0, 122, 0, - 513, 129, 0, 0, 105, 0, 102, 38, 39, 0, - 22, 23, 0, 0, 28, 27, 0, 206, 31, 33, - 40, 0, 203, 0, 511, 0, 509, 504, 506, 0, - 93, 145, 143, 194, 0, 0, 0, 0, 0, 0, + 460, 476, 480, 483, 484, 485, 486, 487, 488, 489, + 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, + 500, 501, 502, 503, 504, 374, 375, 376, 410, 419, + 421, 415, 420, 422, 423, 425, 426, 427, 429, 430, + 431, 433, 434, 435, 437, 438, 411, 412, 413, 424, + 414, 416, 417, 418, 428, 432, 436, 507, 508, 511, + 512, 513, 514, 509, 510, 609, 132, 520, 521, 522, + 0, 519, 161, 159, 160, 158, 0, 206, 162, 163, + 164, 134, 133, 0, 190, 171, 173, 169, 175, 177, + 172, 174, 170, 176, 178, 167, 168, 192, 179, 186, + 187, 188, 189, 180, 181, 182, 183, 184, 185, 135, + 136, 137, 138, 139, 140, 147, 608, 0, 610, 0, + 109, 108, 0, 120, 125, 154, 153, 151, 155, 0, + 148, 150, 156, 130, 202, 152, 518, 0, 605, 607, + 0, 525, 0, 0, 0, 97, 0, 94, 0, 107, + 0, 116, 110, 118, 0, 119, 0, 95, 126, 100, + 0, 149, 131, 0, 195, 201, 1, 606, 0, 0, + 523, 144, 146, 0, 142, 193, 0, 0, 98, 0, + 0, 611, 111, 115, 117, 113, 121, 112, 0, 127, + 103, 0, 101, 0, 0, 0, 9, 0, 43, 42, + 44, 41, 5, 6, 7, 8, 2, 16, 14, 15, + 17, 10, 11, 12, 13, 3, 18, 37, 20, 25, + 26, 0, 0, 30, 0, 204, 0, 36, 34, 0, + 196, 96, 0, 0, 0, 527, 0, 0, 141, 0, + 191, 0, 197, 45, 49, 52, 55, 60, 63, 65, + 67, 69, 71, 73, 75, 0, 0, 99, 0, 553, + 562, 566, 0, 0, 0, 587, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 45, 78, 91, + 0, 540, 0, 156, 130, 543, 564, 542, 550, 541, + 0, 544, 545, 568, 546, 575, 547, 548, 583, 549, + 0, 114, 0, 122, 0, 535, 129, 0, 0, 105, + 0, 102, 38, 39, 0, 22, 23, 0, 0, 28, + 27, 0, 206, 31, 33, 40, 0, 203, 0, 533, + 0, 531, 526, 528, 0, 93, 145, 143, 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 76, 198, 199, 0, 0, 530, 0, - 563, 576, 575, 0, 567, 0, 579, 577, 0, 0, - 0, 560, 529, 81, 82, 84, 83, 86, 87, 88, - 89, 90, 85, 80, 0, 0, 545, 541, 543, 547, - 554, 562, 124, 0, 516, 0, 128, 0, 106, 4, - 0, 24, 21, 32, 205, 0, 512, 0, 507, 502, - 46, 47, 48, 51, 50, 53, 54, 58, 59, 56, - 57, 61, 62, 64, 66, 68, 70, 72, 74, 0, - 200, 590, 0, 588, 532, 0, 0, 0, 0, 578, - 0, 559, 79, 92, 123, 514, 0, 104, 19, 508, - 510, 0, 0, 0, 0, 0, 551, 0, 0, 0, - 0, 570, 569, 572, 538, 555, 515, 517, 0, 0, - 587, 589, 533, 0, 0, 0, 571, 0, 0, 550, - 0, 0, 548, 0, 77, 591, 0, 535, 564, 534, - 0, 573, 0, 538, 537, 539, 557, 552, 0, 574, - 568, 549, 558, 0, 566, 556 + 0, 0, 0, 0, 0, 0, 0, 0, 76, 198, + 199, 0, 0, 552, 0, 585, 598, 597, 0, 589, + 0, 601, 599, 0, 0, 0, 582, 602, 603, 604, + 551, 81, 82, 84, 83, 86, 87, 88, 89, 90, + 85, 80, 0, 0, 567, 563, 565, 569, 576, 584, + 124, 0, 538, 0, 128, 0, 106, 4, 0, 24, + 21, 32, 205, 0, 534, 0, 529, 524, 46, 47, + 48, 51, 50, 53, 54, 58, 59, 56, 57, 61, + 62, 64, 66, 68, 70, 72, 74, 0, 200, 615, + 0, 613, 554, 0, 0, 0, 0, 600, 0, 581, + 79, 92, 123, 536, 0, 104, 19, 530, 532, 0, + 0, 0, 0, 0, 573, 0, 0, 0, 0, 592, + 591, 594, 560, 577, 537, 539, 0, 0, 612, 614, + 555, 0, 0, 0, 593, 0, 0, 572, 0, 0, + 570, 0, 77, 616, 0, 557, 586, 556, 0, 595, + 0, 560, 559, 561, 579, 574, 0, 596, 590, 571, + 580, 0, 588, 578 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { - -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, - -457, -457, 8868, -457, -87, -84, -127, -93, -33, -31, - -27, -25, -28, -26, -457, -86, -457, -103, -457, -111, - -125, 2, -457, -457, -457, 4, -457, -457, -457, 176, - 194, 178, -457, -457, -337, -457, -457, -457, -457, 95, - -457, -37, -46, -457, 9, -457, 0, -63, -457, -457, - -457, -457, 263, -457, -457, -457, -456, -140, 10, -73, - -211, -457, -102, -198, -321, -457, -144, -457, -457, -155, - -154, -457, -457, 198, -274, -97, -457, 46, -457, -118, - -457, 51, -457, -457, -457, -457, 52, -457, -457, -457, - -457, -457, -457, -457, -457, 213, -457, -457, -457, -457, - -105 + -732, -732, -732, -732, -732, -732, -732, -732, -732, -732, + -732, -732, 9402, -732, -90, -89, -153, -92, -29, -28, + -27, -26, -30, -25, -732, -88, -732, -101, -732, -113, + -132, 2, -732, -732, -732, 4, -732, -732, -732, 200, + 201, 202, -732, -732, -343, -732, -732, -732, -732, 92, + -732, -36, -46, -732, 9, -732, 0, -67, -732, -732, + -732, -732, 263, -732, -732, -732, -481, -142, 8, -78, + -214, -732, -107, -204, -731, -732, -149, -732, -732, -160, + -159, -732, -732, 212, -269, -104, -732, 45, -732, -127, + -732, 48, -732, -732, -732, -732, 49, -732, -732, -732, + -732, -732, -732, -732, -732, 210, -732, -732, -732, -732, + -116 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { - -1, 443, 444, 445, 630, 446, 447, 448, 449, 450, - 451, 452, 502, 454, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 503, 659, 504, 614, 505, - 561, 506, 345, 533, 421, 507, 347, 348, 349, 379, - 380, 381, 350, 351, 352, 353, 354, 355, 401, 402, - 356, 357, 358, 359, 455, 404, 456, 407, 392, 393, - 457, 362, 363, 364, 464, 397, 462, 463, 555, 556, - 531, 625, 510, 511, 512, 513, 514, 589, 685, 718, - 709, 710, 711, 719, 515, 516, 517, 518, 712, 689, - 519, 520, 713, 733, 521, 522, 523, 665, 593, 667, - 693, 707, 708, 524, 365, 366, 367, 376, 525, 662, - 663 + -1, 465, 466, 467, 658, 468, 469, 470, 471, 472, + 473, 474, 527, 476, 494, 495, 496, 497, 498, 499, + 500, 501, 502, 503, 504, 528, 687, 529, 642, 530, + 586, 531, 367, 558, 443, 532, 369, 370, 371, 401, + 402, 403, 372, 373, 374, 375, 376, 377, 423, 424, + 378, 379, 380, 381, 477, 426, 478, 429, 414, 415, + 479, 384, 385, 386, 486, 419, 484, 485, 580, 581, + 556, 653, 535, 536, 537, 538, 539, 614, 713, 746, + 737, 738, 739, 747, 540, 541, 542, 543, 740, 717, + 544, 545, 741, 761, 546, 547, 548, 693, 618, 695, + 721, 735, 736, 549, 387, 388, 389, 398, 550, 690, + 691 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If @@ -1413,14 +1596,14 @@ static const yytype_int16 yydefgoto[] = number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { - 361, 551, 344, 415, 346, 405, 405, 484, 559, 360, - 405, 484, 416, 552, 406, 485, 371, 527, 532, 372, + 383, 745, 366, 427, 368, 584, 576, 512, 753, 382, + 515, 428, 516, 517, 406, 393, 520, 407, 577, 745, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 627, 375, 61, + 52, 53, 54, 55, 56, 57, 58, 655, 394, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, @@ -1444,50 +1627,97 @@ static const yytype_int16 yytable[] = 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 389, 382, 530, 539, 664, 622, 618, 624, 483, - 369, 626, 558, 417, 399, 571, 572, 582, 687, 458, - 569, 570, 484, 540, 541, 377, 389, 490, 373, 623, - 493, 382, 494, 495, 384, 400, 498, 385, 548, 383, - 526, 528, 370, -35, 378, 542, 687, 390, 360, 543, - 460, 573, 574, 583, 374, 361, 360, 344, 396, 346, - 299, 466, 575, 576, 360, 304, 305, 467, 383, 560, - 683, 386, 383, 717, 684, 391, 598, 360, 600, 535, - 725, 360, 536, 418, 468, 666, 419, 461, 586, 420, - 469, 717, 398, 545, 629, 694, 360, 695, 509, 546, - 615, 615, 674, 615, 389, 728, 675, 508, 676, 558, - 615, 615, 403, 616, 530, 460, 530, 460, 567, 530, - 568, 631, 408, 603, 604, 605, 606, 607, 608, 609, - 610, 611, 612, 633, 647, 648, 649, 650, 637, 615, - 671, 638, 732, 613, 615, 637, 413, 669, 679, 414, - 553, 405, 461, 459, 461, 697, 618, 615, 698, 360, - 465, 360, 534, 360, 295, 296, 297, 564, 565, 566, - 643, 644, 651, 652, 668, 645, 646, 558, 670, 544, - 549, 636, 554, 484, 563, 577, 460, 578, 579, 580, - 581, 584, 587, 590, 588, 727, 591, 592, 594, 595, - 599, 672, 673, 601, 596, 509, 602, -36, -34, 628, - 530, 632, 460, -29, 508, 661, 660, 678, 615, 682, - 690, 700, 702, 461, 618, 704, 705, 703, 715, -536, - 716, 722, 360, 721, 653, 487, 726, 654, 681, 734, - 723, 735, 655, 657, 686, 656, 658, 699, 411, 461, - 412, 368, 562, 635, 680, 691, 724, 730, 360, 731, - 692, 619, 410, 530, 409, 706, 620, 621, 395, 701, - 0, 0, 686, 0, 0, 0, 0, 0, 0, 509, - 460, 0, 0, 509, 720, 714, 560, 0, 508, 0, - 0, 0, 508, 0, 0, 0, 0, 0, 0, 0, - 729, 0, 0, 530, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 461, 688, 0, - 0, 0, 0, 0, 0, 0, 360, 0, 0, 0, - 0, 0, 389, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 688, 0, 0, 0, - 0, 0, 0, 0, 509, 509, 0, 509, 0, 0, - 0, 0, 0, 508, 508, 0, 508, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 390, 0, - 0, 0, 0, 509, 0, 0, 0, 360, 0, 0, - 0, 0, 508, 0, 509, 0, 0, 0, 0, 0, - 0, 509, 0, 508, 0, 0, 0, 0, 0, 0, - 508, 0, 509, 0, 0, 0, 509, 0, 0, 0, - 0, 508, 509, 0, 0, 508, 0, 0, 0, 394, - 0, 508, 1, 2, 3, 4, 5, 6, 7, 8, + 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 411, 564, 404, 646, 555, 650, 505, + 652, 439, 583, 654, 391, 692, 607, 480, 596, 597, + 715, 506, 437, 400, 427, 565, 566, 488, 411, 507, + 395, 438, 399, 489, 404, 408, 427, 506, 551, 553, + 421, 405, 573, 552, 557, -35, 392, 567, 715, 412, + 382, 568, 608, 482, 598, 599, 396, 383, 382, 366, + 418, 368, 321, 397, 422, 506, 382, 326, 327, 585, + 405, 490, 651, 413, 405, 570, 623, 491, 625, 382, + 657, 571, 420, 382, 694, 722, 643, 440, 611, 483, + 441, 643, 425, 442, 560, 723, 756, 561, 382, 711, + 534, 643, 643, 712, 430, 643, 411, 702, 644, 533, + 600, 601, 583, 675, 676, 677, 678, 435, 482, 665, + 482, 555, 666, 555, 659, 436, 555, 631, 632, 633, + 634, 635, 636, 637, 638, 639, 640, 427, 643, 665, + 661, 697, 707, 317, 318, 319, 481, 641, 589, 590, + 591, 592, 578, 593, 483, 760, 483, 703, 646, 704, + 725, 382, 487, 382, 559, 382, 594, 595, 643, 699, + 643, 726, 671, 672, 569, 673, 674, 696, 679, 680, + 574, 698, 664, 583, 506, 579, 588, 602, 603, 604, + 605, 606, 482, 609, 612, 615, 613, 616, 619, 617, + 755, 620, 624, 621, 626, 730, 656, 627, -36, 628, + 534, 700, 701, -34, 660, 629, 630, -29, 482, 533, + 555, 688, 689, 706, 643, 710, 646, 718, 483, 728, + 731, 732, 733, -558, 743, 750, 744, 382, 749, 509, + 754, 762, 763, 681, 709, 682, 685, 683, 727, 684, + 714, 587, 686, 390, 483, 751, 663, 708, 719, 752, + 758, 720, 759, 382, 734, 647, 729, 417, 648, 649, + 0, 432, 0, 555, 433, 0, 434, 0, 714, 0, + 431, 0, 0, 0, 534, 0, 0, 0, 534, 482, + 748, 0, 585, 533, 0, 742, 0, 533, 0, 0, + 0, 0, 0, 0, 0, 0, 757, 0, 0, 0, + 0, 0, 0, 555, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 483, 0, 716, 0, 0, + 0, 0, 0, 0, 382, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 716, 0, 0, 0, 0, + 0, 0, 534, 534, 0, 534, 0, 0, 0, 0, + 0, 533, 533, 0, 533, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 412, 0, 0, 0, + 0, 534, 0, 0, 0, 382, 0, 0, 0, 0, + 533, 0, 534, 0, 0, 0, 0, 0, 0, 534, + 0, 533, 0, 0, 0, 0, 0, 0, 533, 0, + 534, 0, 0, 0, 534, 0, 0, 0, 0, 533, + 534, 0, 0, 533, 0, 0, 0, 416, 0, 533, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 315, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 316, 317, 318, 319, 320, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 322, 323, 324, 325, 326, 327, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 328, 329, 330, 331, 332, + 333, 0, 0, 0, 0, 0, 0, 0, 0, 334, + 0, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, + 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, @@ -1516,61 +1746,66 @@ static const yytype_int16 yytable[] = 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, - 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 293, 0, 0, 0, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 294, 295, 296, 297, 298, 0, 0, 0, 0, 0, - 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, - 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, - 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, - 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, - 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, - 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 424, 425, 0, 486, 0, 487, 488, 0, - 0, 0, 0, 489, 426, 427, 428, 429, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, - 297, 298, 0, 0, 0, 430, 431, 432, 433, 434, - 299, 300, 301, 302, 303, 304, 305, 490, 491, 492, - 493, 0, 494, 495, 496, 497, 498, 499, 500, 306, - 307, 308, 309, 310, 311, 435, 436, 437, 438, 439, - 440, 441, 442, 312, 501, 313, 314, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, - 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, - 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 0, 0, 0, 0, 0, 0, 0, 446, 447, 0, + 508, 0, 509, 510, 0, 0, 0, 0, 511, 448, + 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 316, 317, 318, 319, 320, 0, 0, 0, + 452, 453, 454, 455, 456, 321, 322, 323, 324, 325, + 326, 327, 512, 513, 514, 515, 0, 516, 517, 518, + 519, 520, 521, 522, 523, 524, 525, 328, 329, 330, + 331, 332, 333, 457, 458, 459, 460, 461, 462, 463, + 464, 334, 526, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, + 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, + 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 0, 0, + 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 446, + 447, 0, 508, 0, 509, 645, 0, 0, 0, 0, + 511, 448, 449, 450, 451, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 316, 317, 318, 319, 320, 0, + 0, 0, 452, 453, 454, 455, 456, 321, 322, 323, + 324, 325, 326, 327, 512, 513, 514, 515, 0, 516, + 517, 518, 519, 520, 521, 522, 523, 524, 525, 328, + 329, 330, 331, 332, 333, 457, 458, 459, 460, 461, + 462, 463, 464, 334, 526, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, @@ -1599,61 +1834,66 @@ static const yytype_int16 yytable[] = 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, - 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, - 422, 423, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 424, - 425, 0, 486, 0, 487, 617, 0, 0, 0, 0, - 489, 426, 427, 428, 429, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 294, 295, 296, 297, 298, 0, - 0, 0, 430, 431, 432, 433, 434, 299, 300, 301, - 302, 303, 304, 305, 490, 491, 492, 493, 0, 494, - 495, 496, 497, 498, 499, 500, 306, 307, 308, 309, - 310, 311, 435, 436, 437, 438, 439, 440, 441, 442, - 312, 501, 313, 314, 315, 316, 317, 318, 319, 320, - 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, - 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, - 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, - 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, - 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, - 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, - 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, - 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 446, 447, 0, 508, 0, 509, 0, 0, 0, + 0, 0, 511, 448, 449, 450, 451, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 316, 317, 318, 319, + 320, 0, 0, 0, 452, 453, 454, 455, 456, 321, + 322, 323, 324, 325, 326, 327, 512, 513, 514, 515, + 0, 516, 517, 518, 519, 520, 521, 522, 523, 524, + 525, 328, 329, 330, 331, 332, 333, 457, 458, 459, + 460, 461, 462, 463, 464, 334, 526, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, + 358, 359, 360, 361, 362, 363, 364, 365, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 424, 425, 0, 486, - 0, 487, 0, 0, 0, 0, 0, 489, 426, 427, - 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 294, 295, 296, 297, 298, 0, 0, 0, 430, - 431, 432, 433, 434, 299, 300, 301, 302, 303, 304, - 305, 490, 491, 492, 493, 0, 494, 495, 496, 497, - 498, 499, 500, 306, 307, 308, 309, 310, 311, 435, - 436, 437, 438, 439, 440, 441, 442, 312, 501, 313, - 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, - 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 0, 0, 0, 446, 447, 0, 508, 0, 430, 0, + 0, 0, 0, 0, 511, 448, 449, 450, 451, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 316, 317, + 318, 319, 320, 0, 0, 0, 452, 453, 454, 455, + 456, 321, 322, 323, 324, 325, 326, 327, 512, 513, + 514, 515, 0, 516, 517, 518, 519, 520, 521, 522, + 523, 524, 525, 328, 329, 330, 331, 332, 333, 457, + 458, 459, 460, 461, 462, 463, 464, 334, 526, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, + 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, + 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, @@ -1683,61 +1923,66 @@ static const yytype_int16 yytable[] = 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, - 291, 292, 0, 0, 422, 423, 0, 0, 0, 0, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 424, 425, 0, 486, 0, 408, 0, - 0, 0, 0, 0, 489, 426, 427, 428, 429, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 294, 295, - 296, 297, 298, 0, 0, 0, 430, 431, 432, 433, - 434, 299, 300, 301, 302, 303, 304, 305, 490, 491, - 492, 493, 0, 494, 495, 496, 497, 498, 499, 500, - 306, 307, 308, 309, 310, 311, 435, 436, 437, 438, - 439, 440, 441, 442, 312, 501, 313, 314, 315, 316, - 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, - 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, - 337, 338, 339, 340, 341, 342, 343, 1, 2, 3, - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, - 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 446, 447, 0, 508, 0, + 0, 0, 0, 0, 0, 0, 511, 448, 449, 450, + 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 316, 317, 318, 319, 320, 0, 0, 0, 452, 453, + 454, 455, 456, 321, 322, 323, 324, 325, 326, 327, + 512, 513, 514, 515, 0, 516, 517, 518, 519, 520, + 521, 522, 523, 524, 525, 328, 329, 330, 331, 332, + 333, 457, 458, 459, 460, 461, 462, 463, 464, 334, + 526, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, + 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 424, 425, 0, 486, 0, 0, 0, 0, 0, 0, - 0, 489, 426, 427, 428, 429, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 294, 295, 296, 297, 298, - 0, 0, 0, 430, 431, 432, 433, 434, 299, 300, - 301, 302, 303, 304, 305, 490, 491, 492, 493, 0, - 494, 495, 496, 497, 498, 499, 500, 306, 307, 308, - 309, 310, 311, 435, 436, 437, 438, 439, 440, 441, - 442, 312, 501, 313, 314, 315, 316, 317, 318, 319, - 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, - 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, - 340, 341, 342, 343, 1, 2, 3, 4, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 446, 447, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 511, 448, + 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 316, 317, 318, 319, 320, 0, 0, 0, + 452, 453, 454, 455, 456, 321, 322, 323, 324, 325, + 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 328, 329, 330, + 331, 332, 333, 457, 458, 459, 460, 461, 462, 463, + 464, 334, 0, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, + 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, + 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, @@ -1766,61 +2011,66 @@ static const yytype_int16 yytable[] = 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, - 287, 288, 289, 290, 291, 292, 0, 0, 422, 423, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 0, 0, + 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 446, + 447, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 448, 449, 450, 451, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 316, 317, 318, 319, 0, 0, + 0, 0, 452, 453, 454, 455, 456, 321, 322, 323, + 324, 325, 326, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 328, + 329, 330, 331, 332, 333, 457, 458, 459, 460, 461, + 462, 463, 464, 334, 0, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 424, 425, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 489, 426, - 427, 428, 429, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 294, 295, 296, 297, 298, 0, 0, 0, - 430, 431, 432, 433, 434, 299, 300, 301, 302, 303, - 304, 305, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 306, 307, 308, 309, 310, 311, - 435, 436, 437, 438, 439, 440, 441, 442, 312, 0, - 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, - 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, - 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, - 343, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, - 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, - 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, - 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 426, 427, 428, 429, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 294, - 295, 296, 297, 0, 0, 0, 0, 430, 431, 432, - 433, 434, 299, 300, 301, 302, 303, 304, 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 306, 307, 308, 309, 310, 311, 435, 436, 437, - 438, 439, 440, 441, 442, 312, 0, 313, 314, 315, - 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, - 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, - 336, 337, 338, 339, 340, 341, 342, 343, 1, 2, + 0, 0, 315, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 316, 317, 318, 319, + 320, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 322, 323, 324, 325, 326, 327, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 328, 329, 330, 331, 332, 333, 0, 0, 0, + 0, 0, 0, 0, 0, 334, 0, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, + 358, 359, 360, 361, 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -1850,61 +2100,66 @@ static const yytype_int16 yytable[] = 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 409, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 316, 317, + 318, 319, 0, 0, 0, 0, 0, 0, 0, 0, + 410, 321, 322, 323, 324, 325, 326, 327, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 328, 329, 330, 331, 332, 333, 0, + 0, 0, 0, 0, 0, 0, 0, 334, 0, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, + 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, + 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 293, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 294, 295, 296, 297, - 298, 0, 0, 0, 0, 0, 0, 0, 0, 299, - 300, 301, 302, 303, 304, 305, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 306, 307, - 308, 309, 310, 311, 0, 0, 0, 0, 0, 0, - 0, 0, 312, 0, 313, 314, 315, 316, 317, 318, - 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, - 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, - 339, 340, 341, 342, 343, 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, - 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, - 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 582, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 387, + 316, 317, 318, 319, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 322, 323, 324, 325, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 294, 295, 296, 297, 0, 0, 0, - 0, 0, 0, 0, 0, 388, 299, 300, 301, 302, - 303, 304, 305, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 306, 307, 308, 309, 310, - 311, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 0, 313, 314, 315, 316, 317, 318, 319, 320, 321, - 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, - 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, - 342, 343, 1, 2, 3, 4, 5, 6, 7, 8, + 0, 0, 0, 0, 0, 328, 329, 330, 331, 332, + 333, 0, 0, 0, 0, 0, 0, 0, 0, 334, + 0, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, + 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, @@ -1933,61 +2188,66 @@ static const yytype_int16 yytable[] = 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, - 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 557, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 294, 295, 296, 297, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, + 0, 0, 316, 317, 318, 319, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 322, 323, 324, 325, + 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 328, 329, 330, + 331, 332, 333, 0, 0, 0, 0, 0, 0, 0, + 0, 334, 0, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, + 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, + 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, - 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, - 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, - 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, - 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, - 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 639, 0, + 0, 0, 0, 0, 0, 705, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, - 297, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 299, 300, 301, 302, 303, 304, 305, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 306, - 307, 308, 309, 310, 311, 0, 0, 0, 0, 0, - 0, 0, 0, 312, 0, 313, 314, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, - 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, - 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 0, 0, 0, 0, 316, 317, 318, 319, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 322, 323, + 324, 325, 326, 327, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 328, + 329, 330, 331, 332, 333, 0, 0, 0, 0, 0, + 0, 0, 0, 334, 0, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, @@ -2016,101 +2276,22 @@ static const yytype_int16 yytable[] = 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, - 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 677, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 294, 295, 296, 297, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 299, 300, 301, - 302, 303, 304, 305, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 306, 307, 308, 309, - 310, 311, 0, 0, 0, 0, 0, 0, 0, 0, - 312, 0, 313, 314, 315, 316, 317, 318, 319, 320, - 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, - 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, - 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, - 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, - 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, - 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, - 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, - 288, 289, 290, 291, 292, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 294, 295, 296, 297, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 299, 300, 301, 302, 303, 304, - 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 306, 307, 308, 309, 310, 311, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 0, 313, - 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, - 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 0, 0, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, - 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 424, 425, 0, 0, 0, 529, 696, 0, - 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, - 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 316, 317, 318, 319, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 322, 323, 324, 325, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, - 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, + 0, 328, 329, 330, 331, 332, 333, 0, 0, 0, + 0, 0, 0, 0, 0, 334, 0, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, + 358, 359, 360, 361, 362, 363, 364, 365, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, @@ -2139,58 +2320,63 @@ static const yytype_int16 yytable[] = 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, - 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, + 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 424, 425, 0, 0, 470, 0, 0, 0, 0, 0, - 0, 0, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 446, 447, 0, 0, 0, 554, 724, 0, + 0, 0, 0, 0, 448, 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, - 0, 0, 0, 304, 305, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 452, 453, 454, 455, 456, + 321, 0, 0, 0, 0, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, - 442, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 325, 2, 3, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 0, 0, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, - 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, - 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 0, 0, 422, - 423, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 424, 425, - 0, 0, 0, 529, 0, 0, 0, 0, 0, 0, - 426, 427, 428, 429, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 457, 458, + 459, 460, 461, 462, 463, 464, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 347, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 0, 0, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 430, 431, 432, 433, 434, 299, 0, 0, 0, - 0, 304, 305, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 446, 447, 0, 0, 492, + 0, 0, 0, 0, 0, 0, 0, 448, 449, 450, + 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 452, 453, + 454, 455, 456, 321, 0, 0, 0, 0, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 435, 436, 437, 438, 439, 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 325, 2, 3, 4, 5, 6, 7, + 0, 457, 458, 459, 460, 461, 462, 463, 464, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 347, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, @@ -2219,57 +2405,62 @@ static const yytype_int16 yytable[] = 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, - 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 0, 0, 444, + 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 446, 447, + 0, 0, 0, 554, 0, 0, 0, 0, 0, 0, + 448, 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 424, 425, 0, 0, - 585, 0, 0, 0, 0, 0, 0, 0, 426, 427, - 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 430, - 431, 432, 433, 434, 299, 0, 0, 0, 0, 304, - 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 435, - 436, 437, 438, 439, 440, 441, 442, 0, 0, 0, + 0, 452, 453, 454, 455, 456, 321, 0, 0, 0, + 0, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 325, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 0, - 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, - 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, - 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, - 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, + 0, 0, 0, 0, 457, 458, 459, 460, 461, 462, + 463, 464, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 347, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 0, 0, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 597, 426, 427, 428, 429, + 0, 446, 447, 0, 0, 610, 0, 0, 0, 0, + 0, 0, 0, 448, 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 430, 431, 432, - 433, 434, 299, 0, 0, 0, 0, 304, 305, 0, + 0, 0, 0, 0, 452, 453, 454, 455, 456, 321, + 0, 0, 0, 0, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 435, 436, 437, - 438, 439, 440, 441, 442, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, + 0, 0, 0, 0, 0, 0, 0, 457, 458, 459, + 460, 461, 462, 463, 464, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 347, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -2299,88 +2490,136 @@ static const yytype_int16 yytable[] = 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 424, 425, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 446, 447, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 622, 448, 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, - 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 452, 453, 454, + 455, 456, 321, 0, 0, 0, 0, 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, - 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 0, 0, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 453, - 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, - 424, 425, 0, 0, 0, 0, 0, 0, 0, 0, - 537, 538, 426, 427, 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, - 0, 0, 550, 304, 547, 0, 0, 0, 0, 0, - 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, - 442, 0, 471, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 325, 0, 0, 0, 0, + 457, 458, 459, 460, 461, 462, 463, 464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 347, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 0, 0, 444, 445, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 446, 447, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 448, + 449, 450, 451, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 452, 453, 454, 455, 456, 321, 0, 0, 0, 0, + 326, 327, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 457, 458, 459, 460, 461, 462, 463, + 464, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 347, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 0, 0, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, + 306, 307, 308, 309, 310, 311, 312, 313, 314, 0, + 0, 444, 445, 0, 0, 475, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 493, + 446, 447, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 448, 449, 450, 451, 562, 563, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 452, 453, 454, 455, 456, 321, 0, + 0, 0, 0, 326, 572, 0, 0, 0, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 493, + 0, 0, 0, 0, 0, 0, 457, 458, 459, 460, + 461, 462, 463, 464, 0, 0, 0, 0, 493, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 347, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 634, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 640, 641, 642, 471, 471, 471, 471, 471, - 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, - 471, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 662, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 668, 669, 670, 493, 493, 493, 493, 493, 493, + 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 471 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 493 }; static const yytype_int16 yycheck[] = { - 0, 324, 0, 317, 0, 319, 319, 319, 464, 0, - 319, 319, 326, 336, 327, 327, 317, 326, 326, 317, + 0, 732, 0, 341, 0, 486, 346, 383, 739, 0, + 386, 349, 388, 389, 346, 339, 392, 349, 358, 750, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 533, 327, 63, + 54, 55, 56, 57, 58, 59, 60, 558, 339, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, @@ -2404,50 +2643,97 @@ static const yytype_int16 yycheck[] = 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, 357, 349, 416, 425, 589, 527, 515, 529, 405, - 321, 532, 462, 386, 353, 299, 300, 304, 665, 392, - 295, 296, 319, 297, 298, 318, 382, 361, 327, 326, - 364, 378, 366, 367, 324, 374, 370, 327, 451, 349, - 413, 414, 353, 317, 324, 319, 693, 357, 349, 323, - 397, 335, 336, 340, 353, 365, 357, 365, 368, 365, - 354, 318, 301, 302, 365, 359, 360, 324, 378, 465, - 320, 353, 382, 704, 324, 335, 497, 378, 499, 324, - 711, 382, 327, 321, 318, 593, 324, 397, 484, 327, - 324, 722, 321, 318, 318, 318, 397, 318, 408, 324, - 324, 324, 623, 324, 460, 318, 322, 408, 324, 559, - 324, 324, 353, 327, 527, 462, 529, 464, 329, 532, - 331, 542, 321, 306, 307, 308, 309, 310, 311, 312, - 313, 314, 315, 546, 571, 572, 573, 574, 324, 324, - 325, 327, 726, 326, 324, 324, 353, 327, 327, 353, - 460, 319, 462, 327, 464, 676, 664, 324, 325, 460, - 326, 462, 353, 464, 342, 343, 344, 332, 333, 334, - 567, 568, 575, 576, 595, 569, 570, 627, 599, 318, - 317, 554, 353, 319, 353, 339, 533, 338, 337, 303, - 305, 320, 319, 317, 322, 716, 327, 327, 317, 317, - 317, 614, 615, 325, 327, 515, 327, 317, 317, 353, - 623, 353, 559, 318, 515, 353, 320, 320, 324, 317, - 361, 320, 322, 533, 732, 318, 317, 353, 318, 321, - 326, 318, 533, 327, 577, 321, 321, 578, 659, 327, - 365, 322, 579, 581, 665, 580, 582, 682, 382, 559, - 382, 298, 467, 553, 637, 667, 710, 722, 559, 723, - 667, 525, 378, 676, 376, 693, 525, 525, 365, 684, - -1, -1, 693, -1, -1, -1, -1, -1, -1, 589, - 627, -1, -1, 593, 705, 698, 682, -1, 589, -1, - -1, -1, 593, -1, -1, -1, -1, -1, -1, -1, - 721, -1, -1, 716, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 627, 665, -1, - -1, -1, -1, -1, -1, -1, 627, -1, -1, -1, - -1, -1, 688, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 693, -1, -1, -1, - -1, -1, -1, -1, 664, 665, -1, 667, -1, -1, - -1, -1, -1, 664, 665, -1, 667, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 688, -1, - -1, -1, -1, 693, -1, -1, -1, 688, -1, -1, - -1, -1, 693, -1, 704, -1, -1, -1, -1, -1, - -1, 711, -1, 704, -1, -1, -1, -1, -1, -1, - 711, -1, 722, -1, -1, -1, 726, -1, -1, -1, - -1, 722, 732, -1, -1, 726, -1, -1, -1, 0, - -1, 732, 3, 4, 5, 6, 7, 8, 9, 10, + 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, + 314, 315, 316, 379, 447, 371, 540, 438, 552, 427, + 554, 408, 484, 557, 343, 614, 326, 414, 321, 322, + 693, 341, 339, 346, 341, 319, 320, 340, 404, 349, + 349, 348, 340, 346, 400, 375, 341, 341, 435, 436, + 375, 371, 473, 348, 348, 339, 375, 341, 721, 379, + 371, 345, 362, 419, 357, 358, 375, 387, 379, 387, + 390, 387, 376, 349, 399, 341, 387, 381, 382, 487, + 400, 340, 348, 357, 404, 340, 519, 346, 521, 400, + 340, 346, 343, 404, 618, 340, 346, 343, 506, 419, + 346, 346, 375, 349, 346, 340, 340, 349, 419, 342, + 430, 346, 346, 346, 343, 346, 482, 651, 349, 430, + 323, 324, 584, 596, 597, 598, 599, 375, 484, 346, + 486, 552, 349, 554, 567, 375, 557, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 341, 346, 346, + 571, 349, 349, 364, 365, 366, 349, 348, 354, 355, + 356, 351, 482, 353, 484, 754, 486, 344, 692, 346, + 704, 482, 348, 484, 375, 486, 317, 318, 346, 347, + 346, 347, 592, 593, 340, 594, 595, 620, 600, 601, + 339, 624, 579, 655, 341, 375, 375, 361, 360, 359, + 325, 327, 558, 342, 341, 339, 344, 349, 339, 349, + 744, 339, 339, 349, 347, 344, 375, 349, 339, 349, + 540, 642, 643, 339, 375, 349, 349, 340, 584, 540, + 651, 342, 375, 342, 346, 339, 760, 383, 558, 342, + 375, 340, 339, 343, 340, 340, 348, 558, 349, 343, + 343, 349, 344, 602, 687, 603, 606, 604, 710, 605, + 693, 489, 607, 320, 584, 387, 578, 665, 695, 738, + 750, 695, 751, 584, 721, 550, 712, 387, 550, 550, + -1, 400, -1, 704, 404, -1, 404, -1, 721, -1, + 398, -1, -1, -1, 614, -1, -1, -1, 618, 655, + 733, -1, 710, 614, -1, 726, -1, 618, -1, -1, + -1, -1, -1, -1, -1, -1, 749, -1, -1, -1, + -1, -1, -1, 744, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 655, -1, 693, -1, -1, + -1, -1, -1, -1, 655, -1, -1, -1, -1, -1, + 716, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 721, -1, -1, -1, -1, + -1, -1, 692, 693, -1, 695, -1, -1, -1, -1, + -1, 692, 693, -1, 695, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 716, -1, -1, -1, + -1, 721, -1, -1, -1, 716, -1, -1, -1, -1, + 721, -1, 732, -1, -1, -1, -1, -1, -1, 739, + -1, 732, -1, -1, -1, -1, -1, -1, 739, -1, + 750, -1, -1, -1, 754, -1, -1, -1, -1, 750, + 760, -1, -1, 754, -1, -1, -1, 0, -1, 760, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 316, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 349, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 363, 364, 365, 366, 367, -1, -1, -1, -1, -1, + -1, -1, -1, 376, 377, 378, 379, 380, 381, 382, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 398, 399, 400, 401, 402, + 403, -1, -1, -1, -1, -1, -1, -1, -1, 412, + -1, 414, 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, + 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, @@ -2476,61 +2762,66 @@ static const yytype_int16 yycheck[] = 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, - 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 327, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 341, 342, 343, 344, 345, -1, -1, -1, -1, -1, - -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, - -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, + -1, -1, -1, -1, -1, -1, -1, 338, 339, -1, + 341, -1, 343, 344, -1, -1, -1, -1, 349, 350, + 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 363, 364, 365, 366, 367, -1, -1, -1, + 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, + 381, 382, 383, 384, 385, 386, -1, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 316, 317, -1, 319, -1, 321, 322, -1, - -1, -1, -1, 327, 328, 329, 330, 331, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, - 344, 345, -1, -1, -1, 349, 350, 351, 352, 353, - 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, - 364, -1, 366, 367, 368, 369, 370, 371, 372, 373, - 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, - 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, - 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, - 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, - 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, + 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, + 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, + 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, -1, -1, + 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 338, + 339, -1, 341, -1, 343, 344, -1, -1, -1, -1, + 349, 350, 351, 352, 353, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 363, 364, 365, 366, 367, -1, + -1, -1, 371, 372, 373, 374, 375, 376, 377, 378, + 379, 380, 381, 382, 383, 384, 385, 386, -1, 388, + 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, + 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, + 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, + 439, 440, 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, @@ -2559,61 +2850,66 @@ static const yytype_int16 yycheck[] = 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, - 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, - 297, 298, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 316, - 317, -1, 319, -1, 321, 322, -1, -1, -1, -1, - 327, 328, 329, 330, 331, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 341, 342, 343, 344, 345, -1, - -1, -1, 349, 350, 351, 352, 353, 354, 355, 356, - 357, 358, 359, 360, 361, 362, 363, 364, -1, 366, - 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, + -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 338, 339, -1, 341, -1, 343, -1, -1, -1, + -1, -1, 349, 350, 351, 352, 353, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 363, 364, 365, 366, + 367, -1, -1, -1, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, - 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, + -1, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, - 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, - 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, - 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, - 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, + 437, 438, 439, 440, 441, 442, 443, 444, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 316, 317, -1, 319, - -1, 321, -1, -1, -1, -1, -1, 327, 328, 329, - 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 341, 342, 343, 344, 345, -1, -1, -1, 349, - 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, - 360, 361, 362, 363, 364, -1, 366, 367, 368, 369, - 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, - 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, - 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, - 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, - 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + -1, -1, -1, 338, 339, -1, 341, -1, 343, -1, + -1, -1, -1, -1, 349, 350, 351, 352, 353, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 363, 364, + 365, 366, 367, -1, -1, -1, 371, 372, 373, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, -1, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, @@ -2643,61 +2939,66 @@ static const yytype_int16 yycheck[] = 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, - 293, 294, -1, -1, 297, 298, -1, -1, -1, -1, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 316, 317, -1, 319, -1, 321, -1, - -1, -1, -1, -1, 327, 328, 329, 330, 331, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 341, 342, - 343, 344, 345, -1, -1, -1, 349, 350, 351, 352, - 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, - 363, 364, -1, 366, 367, 368, 369, 370, 371, 372, + -1, -1, -1, -1, -1, 338, 339, -1, 341, -1, + -1, -1, -1, -1, -1, -1, 349, 350, 351, 352, + 353, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 363, 364, 365, 366, 367, -1, -1, -1, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, - 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, + 383, 384, 385, 386, -1, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, - 413, 414, 415, 416, 417, 418, 419, 3, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, - 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, - 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, - -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, + 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 316, 317, -1, 319, -1, -1, -1, -1, -1, -1, - -1, 327, 328, 329, 330, 331, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 341, 342, 343, 344, 345, - -1, -1, -1, 349, 350, 351, 352, 353, 354, 355, - 356, 357, 358, 359, 360, 361, 362, 363, 364, -1, - 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, - 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, - 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, - 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, - 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, - 416, 417, 418, 419, 3, 4, 5, 6, 7, 8, + -1, -1, -1, -1, -1, -1, -1, 338, 339, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 349, 350, + 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 363, 364, 365, 366, 367, -1, -1, -1, + 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, + 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, -1, 414, 415, 416, 417, 418, 419, 420, + 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, + 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, + 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, @@ -2726,61 +3027,66 @@ static const yytype_int16 yycheck[] = 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, - 289, 290, 291, 292, 293, 294, -1, -1, 297, 298, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 316, 317, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 327, 328, - 329, 330, 331, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 341, 342, 343, 344, 345, -1, -1, -1, - 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, - 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 373, 374, 375, 376, 377, 378, - 379, 380, 381, 382, 383, 384, 385, 386, 387, -1, - 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, -1, -1, + 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 338, + 339, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 350, 351, 352, 353, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 363, 364, 365, 366, -1, -1, + -1, -1, 371, 372, 373, 374, 375, 376, 377, 378, + 379, 380, 381, 382, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, - 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, - 419, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, - 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + 409, 410, 411, 412, -1, 414, 415, 416, 417, 418, + 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, + 439, 440, 441, 442, 443, 444, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 328, 329, 330, 331, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 341, - 342, 343, 344, -1, -1, -1, -1, 349, 350, 351, - 352, 353, 354, 355, 356, 357, 358, 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 373, 374, 375, 376, 377, 378, 379, 380, 381, - 382, 383, 384, 385, 386, 387, -1, 389, 390, 391, - 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, - 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, - 412, 413, 414, 415, 416, 417, 418, 419, 3, 4, + -1, -1, 349, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 363, 364, 365, 366, + 367, -1, -1, -1, -1, -1, -1, -1, -1, 376, + 377, 378, 379, 380, 381, 382, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 398, 399, 400, 401, 402, 403, -1, -1, -1, + -1, -1, -1, -1, -1, 412, -1, 414, 415, 416, + 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, + 437, 438, 439, 440, 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, @@ -2810,61 +3116,66 @@ static const yytype_int16 yycheck[] = 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 349, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 363, 364, + 365, 366, -1, -1, -1, -1, -1, -1, -1, -1, + 375, 376, 377, 378, 379, 380, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 327, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 341, 342, 343, 344, - 345, -1, -1, -1, -1, -1, -1, -1, -1, 354, - 355, 356, 357, 358, 359, 360, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 373, 374, - 375, 376, 377, 378, -1, -1, -1, -1, -1, -1, - -1, -1, 387, -1, 389, 390, 391, 392, 393, 394, - 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, - 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, - 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, - 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, - 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, - 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, - 288, 289, 290, 291, 292, 293, 294, -1, -1, -1, + -1, -1, -1, 398, 399, 400, 401, 402, 403, -1, + -1, -1, -1, -1, -1, -1, -1, 412, -1, 414, + 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 327, + -1, 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 341, 342, 343, 344, -1, -1, -1, - -1, -1, -1, -1, -1, 353, 354, 355, 356, 357, - 358, 359, 360, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 373, 374, 375, 376, 377, - 378, -1, -1, -1, -1, -1, -1, -1, -1, 387, - -1, 389, 390, 391, 392, 393, 394, 395, 396, 397, - 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, - 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, - 418, 419, 3, 4, 5, 6, 7, 8, 9, 10, + 363, 364, 365, 366, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 376, 377, 378, 379, 380, 381, 382, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 398, 399, 400, 401, 402, + 403, -1, -1, -1, -1, -1, -1, -1, -1, 412, + -1, 414, 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, + 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, @@ -2893,61 +3204,66 @@ static const yytype_int16 yycheck[] = 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, - 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 322, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 341, 342, 343, 344, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, + -1, -1, 363, 364, 365, 366, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 376, 377, 378, 379, 380, + 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 398, 399, 400, + 401, 402, 403, -1, -1, -1, -1, -1, -1, -1, + -1, 412, -1, 414, 415, 416, 417, 418, 419, 420, + 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, + 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, + 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, + 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, - -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, - 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, - 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 322, -1, + -1, -1, -1, -1, -1, 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, - 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 354, 355, 356, 357, 358, 359, 360, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 373, - 374, 375, 376, 377, 378, -1, -1, -1, -1, -1, - -1, -1, -1, 387, -1, 389, 390, 391, 392, 393, - 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, - 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, - 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + -1, -1, -1, -1, 363, 364, 365, 366, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 376, 377, 378, + 379, 380, 381, 382, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 398, + 399, 400, 401, 402, 403, -1, -1, -1, -1, -1, + -1, -1, -1, 412, -1, 414, 415, 416, 417, 418, + 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, + 439, 440, 441, 442, 443, 444, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, @@ -2976,101 +3292,22 @@ static const yytype_int16 yycheck[] = 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, - 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 322, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 341, 342, 343, 344, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 354, 355, 356, - 357, 358, 359, 360, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 373, 374, 375, 376, - 377, 378, -1, -1, -1, -1, -1, -1, -1, -1, - 387, -1, 389, 390, 391, 392, 393, 394, 395, 396, - 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, - 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, - 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, - 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, - 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, - 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, - 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293, 294, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 341, 342, 343, 344, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 354, 355, 356, 357, 358, 359, - 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 373, 374, 375, 376, 377, 378, -1, - -1, -1, -1, -1, -1, -1, -1, 387, -1, 389, - 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, - 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, - 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, -1, -1, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, - 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, - 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 316, 317, -1, -1, -1, 321, 322, -1, - -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, - 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 363, 364, 365, 366, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 376, + 377, 378, 379, 380, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, - 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, + -1, 398, 399, 400, 401, 402, 403, -1, -1, -1, + -1, -1, -1, -1, -1, 412, -1, 414, 415, 416, + 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, + 437, 438, 439, 440, 441, 442, 443, 444, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, @@ -3099,58 +3336,63 @@ static const yytype_int16 yycheck[] = 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, - -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, + 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, + 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 316, 317, -1, -1, 320, -1, -1, -1, -1, -1, - -1, -1, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, 338, 339, -1, -1, -1, 343, 344, -1, + -1, -1, -1, -1, 350, 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, - -1, -1, -1, 359, 360, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 371, 372, 373, 374, 375, + 376, -1, -1, -1, -1, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, - 386, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 401, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, -1, -1, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, - 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, - 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, - 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, - 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, - 288, 289, 290, 291, 292, 293, 294, -1, -1, 297, - 298, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, 316, 317, - -1, -1, -1, 321, -1, -1, -1, -1, -1, -1, - 328, 329, 330, 331, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 404, 405, + 406, 407, 408, 409, 410, 411, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 426, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, -1, -1, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 316, -1, -1, 319, 320, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 338, 339, -1, -1, 342, + -1, -1, -1, -1, -1, -1, -1, 350, 351, 352, + 353, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 371, 372, + 373, 374, 375, 376, -1, -1, -1, -1, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 349, 350, 351, 352, 353, 354, -1, -1, -1, - -1, 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 379, 380, 381, 382, 383, 384, 385, 386, -1, + -1, 404, 405, 406, 407, 408, 409, 410, 411, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 401, 4, 5, 6, 7, 8, 9, + -1, -1, -1, 426, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, @@ -3179,57 +3421,62 @@ static const yytype_int16 yycheck[] = 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, + 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, -1, -1, 319, + 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 338, 339, + -1, -1, -1, 343, -1, -1, -1, -1, -1, -1, + 350, 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, 316, 317, -1, -1, - 320, -1, -1, -1, -1, -1, -1, -1, 328, 329, - 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 349, - 350, 351, 352, 353, 354, -1, -1, -1, -1, 359, - 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 379, - 380, 381, 382, 383, 384, 385, 386, -1, -1, -1, + -1, 371, 372, 373, 374, 375, 376, -1, -1, -1, + -1, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 401, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, -1, - -1, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, - 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, - 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + -1, -1, -1, -1, 404, 405, 406, 407, 408, 409, + 410, 411, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 426, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, -1, -1, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, + -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 327, 328, 329, 330, 331, + -1, 338, 339, -1, -1, 342, -1, -1, -1, -1, + -1, -1, -1, 350, 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 349, 350, 351, - 352, 353, 354, -1, -1, -1, -1, 359, 360, -1, + -1, -1, -1, -1, 371, 372, 373, 374, 375, 376, + -1, -1, -1, -1, 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 379, 380, 381, - 382, 383, 384, 385, 386, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 401, + -1, -1, -1, -1, -1, -1, -1, 404, 405, 406, + 407, 408, 409, 410, 411, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 426, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, @@ -3259,66 +3506,113 @@ static const yytype_int16 yycheck[] = 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 316, 317, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, + 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, + 314, 315, 316, -1, -1, 319, 320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 338, 339, -1, -1, -1, -1, -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, - 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, - 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, -1, -1, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, - 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, - 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, - 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, 294, 391, - -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 405, -1, -1, -1, -1, -1, -1, - 316, 317, -1, -1, -1, -1, -1, -1, -1, -1, - 422, 423, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 371, 372, 373, + 374, 375, 376, -1, -1, -1, -1, 381, 382, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 404, 405, 406, 407, 408, 409, 410, 411, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 426, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + -1, -1, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, -1, -1, 319, 320, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 338, 339, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 350, + 351, 352, 353, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 371, 372, 373, 374, 375, 376, -1, -1, -1, -1, + 381, 382, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 404, 405, 406, 407, 408, 409, 410, + 411, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 426, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, -1, -1, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 315, 316, -1, + -1, 319, 320, -1, -1, 413, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 427, + 338, 339, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 350, 351, 352, 353, 444, 445, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 371, 372, 373, 374, 375, 376, -1, + -1, -1, -1, 381, 382, -1, -1, -1, 476, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 487, + -1, -1, -1, -1, -1, -1, 404, 405, 406, 407, + 408, 409, 410, 411, -1, -1, -1, -1, 506, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 426, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, - -1, -1, 454, 359, 360, -1, -1, -1, -1, -1, - -1, -1, -1, 465, -1, -1, -1, -1, -1, -1, - -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, - 386, -1, 484, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 401, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, 551, + -1, -1, -1, -1, -1, -1, -1, -1, 576, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, 564, 565, 566, 567, 568, 569, 570, 571, - 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, - 582, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 589, 590, 591, 592, 593, 594, 595, 596, 597, + 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3328,12 +3622,13 @@ static const yytype_int16 yycheck[] = -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 682 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 710 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ -static const yytype_uint16 yystos[] = +static const yytype_int16 yystos[] = { 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, @@ -3364,120 +3659,125 @@ static const yytype_uint16 yystos[] = 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, - 292, 293, 294, 327, 341, 342, 343, 344, 345, 354, - 355, 356, 357, 358, 359, 360, 373, 374, 375, 376, - 377, 378, 387, 389, 390, 391, 392, 393, 394, 395, - 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, - 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, - 416, 417, 418, 419, 451, 452, 455, 456, 457, 458, - 462, 463, 464, 465, 466, 467, 470, 471, 472, 473, - 474, 476, 481, 482, 483, 524, 525, 526, 482, 321, - 353, 317, 317, 327, 353, 327, 527, 318, 324, 459, - 460, 461, 471, 476, 324, 327, 353, 327, 353, 472, - 476, 335, 478, 479, 0, 525, 476, 485, 321, 353, - 374, 468, 469, 353, 475, 319, 327, 477, 321, 503, - 460, 459, 461, 353, 353, 317, 326, 477, 321, 324, - 327, 454, 297, 298, 316, 317, 328, 329, 330, 331, - 349, 350, 351, 352, 353, 379, 380, 381, 382, 383, - 384, 385, 386, 421, 422, 423, 425, 426, 427, 428, - 429, 430, 431, 432, 433, 474, 476, 480, 477, 327, - 471, 476, 486, 487, 484, 326, 318, 324, 318, 324, - 320, 432, 434, 435, 436, 437, 438, 439, 440, 441, - 442, 443, 444, 445, 319, 327, 319, 321, 322, 327, - 361, 362, 363, 364, 366, 367, 368, 369, 370, 371, - 372, 388, 432, 445, 447, 449, 451, 455, 474, 476, - 492, 493, 494, 495, 496, 504, 505, 506, 507, 510, - 511, 514, 515, 516, 523, 528, 477, 326, 477, 321, - 447, 490, 326, 453, 353, 324, 327, 432, 432, 449, - 297, 298, 319, 323, 318, 318, 324, 360, 447, 317, - 432, 324, 336, 476, 353, 488, 489, 322, 487, 486, - 445, 450, 469, 353, 332, 333, 334, 329, 331, 295, - 296, 299, 300, 335, 336, 301, 302, 339, 338, 337, - 303, 305, 304, 340, 320, 320, 445, 319, 322, 497, - 317, 327, 327, 518, 317, 317, 327, 327, 449, 317, - 449, 325, 327, 306, 307, 308, 309, 310, 311, 312, - 313, 314, 315, 326, 448, 324, 327, 322, 493, 507, - 511, 516, 490, 326, 490, 491, 490, 486, 353, 318, - 424, 449, 353, 447, 432, 488, 477, 324, 327, 322, - 432, 432, 432, 434, 434, 435, 435, 436, 436, 436, - 436, 437, 437, 438, 439, 440, 441, 442, 443, 446, - 320, 353, 529, 530, 504, 517, 493, 519, 449, 327, - 449, 325, 447, 447, 490, 322, 324, 322, 320, 327, - 489, 449, 317, 320, 324, 498, 449, 464, 471, 509, - 361, 492, 505, 520, 318, 318, 322, 490, 325, 450, - 320, 530, 322, 353, 318, 317, 509, 521, 522, 500, - 501, 502, 508, 512, 447, 318, 326, 494, 499, 503, - 449, 327, 318, 365, 496, 494, 321, 490, 318, 449, - 499, 500, 504, 513, 327, 322 + 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 349, 363, 364, 365, 366, + 367, 376, 377, 378, 379, 380, 381, 382, 398, 399, + 400, 401, 402, 403, 412, 414, 415, 416, 417, 418, + 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, + 439, 440, 441, 442, 443, 444, 476, 477, 480, 481, + 482, 483, 487, 488, 489, 490, 491, 492, 495, 496, + 497, 498, 499, 501, 506, 507, 508, 549, 550, 551, + 507, 343, 375, 339, 339, 349, 375, 349, 552, 340, + 346, 484, 485, 486, 496, 501, 346, 349, 375, 349, + 375, 497, 501, 357, 503, 504, 0, 550, 501, 510, + 343, 375, 399, 493, 494, 375, 500, 341, 349, 502, + 343, 528, 485, 484, 486, 375, 375, 339, 348, 502, + 343, 346, 349, 479, 319, 320, 338, 339, 350, 351, + 352, 353, 371, 372, 373, 374, 375, 404, 405, 406, + 407, 408, 409, 410, 411, 446, 447, 448, 450, 451, + 452, 453, 454, 455, 456, 457, 458, 499, 501, 505, + 502, 349, 496, 501, 511, 512, 509, 348, 340, 346, + 340, 346, 342, 457, 459, 460, 461, 462, 463, 464, + 465, 466, 467, 468, 469, 470, 341, 349, 341, 343, + 344, 349, 383, 384, 385, 386, 388, 389, 390, 391, + 392, 393, 394, 395, 396, 397, 413, 457, 470, 472, + 474, 476, 480, 499, 501, 517, 518, 519, 520, 521, + 529, 530, 531, 532, 535, 536, 539, 540, 541, 548, + 553, 502, 348, 502, 343, 472, 515, 348, 478, 375, + 346, 349, 457, 457, 474, 319, 320, 341, 345, 340, + 340, 346, 382, 472, 339, 457, 346, 358, 501, 375, + 513, 514, 344, 512, 511, 470, 475, 494, 375, 354, + 355, 356, 351, 353, 317, 318, 321, 322, 357, 358, + 323, 324, 361, 360, 359, 325, 327, 326, 362, 342, + 342, 470, 341, 344, 522, 339, 349, 349, 543, 339, + 339, 349, 349, 474, 339, 474, 347, 349, 349, 349, + 349, 328, 329, 330, 331, 332, 333, 334, 335, 336, + 337, 348, 473, 346, 349, 344, 518, 532, 536, 541, + 515, 348, 515, 516, 515, 511, 375, 340, 449, 474, + 375, 472, 457, 513, 502, 346, 349, 344, 457, 457, + 457, 459, 459, 460, 460, 461, 461, 461, 461, 462, + 462, 463, 464, 465, 466, 467, 468, 471, 342, 375, + 554, 555, 529, 542, 518, 544, 474, 349, 474, 347, + 472, 472, 515, 344, 346, 344, 342, 349, 514, 474, + 339, 342, 346, 523, 474, 489, 496, 534, 383, 517, + 530, 545, 340, 340, 344, 515, 347, 475, 342, 555, + 344, 375, 340, 339, 534, 546, 547, 525, 526, 527, + 533, 537, 472, 340, 348, 519, 524, 528, 474, 349, + 340, 387, 521, 519, 343, 515, 340, 474, 524, 525, + 529, 538, 349, 344 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ -static const yytype_uint16 yyr1[] = +static const yytype_int16 yyr1[] = { - 0, 420, 421, 422, 422, 422, 422, 422, 422, 422, - 422, 422, 422, 422, 422, 422, 422, 422, 423, 423, - 423, 423, 423, 423, 424, 425, 426, 427, 427, 428, - 428, 429, 429, 430, 431, 431, 431, 432, 432, 432, - 432, 433, 433, 433, 433, 434, 434, 434, 434, 435, - 435, 435, 436, 436, 436, 437, 437, 437, 437, 437, - 438, 438, 438, 439, 439, 440, 440, 441, 441, 442, - 442, 443, 443, 444, 444, 445, 446, 445, 447, 447, - 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, - 448, 449, 449, 450, 451, 451, 451, 451, 451, 451, - 451, 451, 451, 453, 452, 454, 454, 455, 456, 456, - 457, 457, 458, 459, 459, 460, 460, 460, 460, 461, - 462, 462, 462, 462, 462, 463, 463, 463, 463, 463, - 464, 464, 465, 466, 466, 466, 466, 466, 466, 466, - 466, 467, 468, 468, 469, 469, 469, 470, 471, 471, - 472, 472, 472, 472, 472, 472, 472, 473, 473, 473, - 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, - 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 0, 445, 446, 447, 447, 447, 447, 447, 447, 447, + 447, 447, 447, 447, 447, 447, 447, 447, 448, 448, + 448, 448, 448, 448, 449, 450, 451, 452, 452, 453, + 453, 454, 454, 455, 456, 456, 456, 457, 457, 457, + 457, 458, 458, 458, 458, 459, 459, 459, 459, 460, + 460, 460, 461, 461, 461, 462, 462, 462, 462, 462, + 463, 463, 463, 464, 464, 465, 465, 466, 466, 467, + 467, 468, 468, 469, 469, 470, 471, 470, 472, 472, 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, - 473, 473, 474, 475, 475, 476, 476, 477, 477, 477, - 477, 478, 478, 479, 480, 480, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, - 481, 481, 481, 481, 481, 481, 481, 481, 482, 482, - 482, 484, 483, 485, 483, 486, 486, 487, 487, 488, - 488, 489, 489, 490, 490, 490, 491, 491, 492, 493, - 493, 494, 494, 494, 494, 494, 494, 494, 494, 495, - 496, 497, 498, 496, 499, 499, 501, 500, 502, 500, - 503, 503, 504, 504, 505, 505, 506, 506, 507, 508, - 508, 509, 509, 510, 510, 512, 511, 513, 513, 514, - 514, 515, 515, 517, 516, 518, 516, 519, 516, 520, - 520, 521, 521, 522, 522, 523, 523, 523, 523, 523, - 524, 524, 525, 525, 525, 527, 526, 528, 529, 529, - 530, 530 + 473, 474, 474, 475, 476, 476, 476, 476, 476, 476, + 476, 476, 476, 478, 477, 479, 479, 480, 481, 481, + 482, 482, 483, 484, 484, 485, 485, 485, 485, 486, + 487, 487, 487, 487, 487, 488, 488, 488, 488, 488, + 489, 489, 490, 491, 491, 491, 491, 491, 491, 491, + 491, 492, 493, 493, 494, 494, 494, 495, 496, 496, + 497, 497, 497, 497, 497, 497, 497, 498, 498, 498, + 498, 498, 498, 498, 498, 498, 498, 498, 498, 498, + 498, 498, 498, 498, 498, 498, 498, 498, 498, 498, + 498, 498, 498, 498, 498, 498, 498, 498, 498, 498, + 498, 498, 499, 500, 500, 501, 501, 502, 502, 502, + 502, 503, 503, 504, 505, 505, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 506, 506, 506, 506, 506, 506, 506, 506, 506, 506, + 507, 507, 507, 509, 508, 510, 508, 511, 511, 512, + 512, 513, 513, 514, 514, 515, 515, 515, 516, 516, + 517, 518, 518, 519, 519, 519, 519, 519, 519, 519, + 519, 520, 521, 522, 523, 521, 524, 524, 526, 525, + 527, 525, 528, 528, 529, 529, 530, 530, 531, 531, + 532, 533, 533, 534, 534, 535, 535, 537, 536, 538, + 538, 539, 539, 540, 540, 542, 541, 543, 541, 544, + 541, 545, 545, 546, 546, 547, 547, 548, 548, 548, + 548, 548, 548, 548, 548, 549, 549, 550, 550, 550, + 552, 551, 553, 554, 554, 555, 555 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ -static const yytype_uint8 yyr2[] = +static const yytype_int8 yyr2[] = { 0, 2, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, @@ -3529,23 +3829,25 @@ static const yytype_uint8 yyr2[] = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 6, 0, 5, 1, 2, 3, 4, 1, - 3, 1, 2, 1, 3, 4, 1, 3, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 2, 0, 0, 5, 1, 1, 0, 2, 0, 2, - 2, 3, 1, 2, 1, 2, 1, 2, 5, 3, - 1, 1, 4, 1, 2, 0, 8, 0, 1, 3, - 2, 1, 2, 0, 6, 0, 8, 0, 7, 1, - 1, 1, 0, 2, 3, 2, 2, 2, 3, 2, - 1, 2, 1, 1, 1, 0, 3, 5, 1, 3, - 1, 4 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 6, 0, 5, 1, 2, 3, + 4, 1, 3, 1, 2, 1, 3, 4, 1, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 2, 2, 0, 0, 5, 1, 1, 0, 2, + 0, 2, 2, 3, 1, 2, 1, 2, 1, 2, + 5, 3, 1, 1, 4, 1, 2, 0, 8, 0, + 1, 3, 2, 1, 2, 0, 6, 0, 8, 0, + 7, 1, 1, 1, 0, 2, 3, 2, 2, 2, + 3, 2, 2, 2, 2, 1, 2, 1, 1, 1, + 0, 3, 5, 1, 3, 1, 4 }; +enum { YYENOMEM = -2 }; + #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab @@ -3554,27 +3856,26 @@ static const yytype_uint8 yyr2[] = #define YYRECOVERING() (!!yyerrstatus) -#define YYBACKUP(Token, Value) \ -do \ - if (yychar == YYEMPTY) \ - { \ - yychar = (Token); \ - yylval = (Value); \ - YYPOPSTACK (yylen); \ - yystate = *yyssp; \ - goto yybackup; \ - } \ - else \ - { \ - yyerror (pParseContext, YY_("syntax error: cannot back up")); \ - YYERROR; \ - } \ -while (0) - -/* Error token number */ -#define YYTERROR 1 -#define YYERRCODE 256 - +#define YYBACKUP(Token, Value) \ + do \ + if (yychar == YYEMPTY) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + YYPOPSTACK (yylen); \ + yystate = *yyssp; \ + goto yybackup; \ + } \ + else \ + { \ + yyerror (pParseContext, YY_("syntax error: cannot back up")); \ + YYERROR; \ + } \ + while (0) + +/* Backward compatibility with an undocumented macro. + Use YYerror or YYUNDEF. */ +#define YYERRCODE YYUNDEF /* Enable debugging if requested. */ @@ -3592,55 +3893,59 @@ do { \ } while (0) /* This macro is provided for backward compatibility. */ -#ifndef YY_LOCATION_PRINT -# define YY_LOCATION_PRINT(File, Loc) ((void) 0) -#endif +# ifndef YY_LOCATION_PRINT +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +# endif -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ - Type, Value, pParseContext); \ + Kind, Value, pParseContext); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) -/*----------------------------------------. -| Print this symbol's value on YYOUTPUT. | -`----------------------------------------*/ +/*-----------------------------------. +| Print this symbol's value on YYO. | +`-----------------------------------*/ static void -yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +yy_symbol_value_print (FILE *yyo, + yysymbol_kind_t yykind, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) { - FILE *yyo = yyoutput; - YYUSE (yyo); + FILE *yyoutput = yyo; + YYUSE (yyoutput); YYUSE (pParseContext); if (!yyvaluep) return; # ifdef YYPRINT - if (yytype < YYNTOKENS) - YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); + if (yykind < YYNTOKENS) + YYPRINT (yyo, yytoknum[yykind], *yyvaluep); # endif - YYUSE (yytype); + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE (yykind); + YY_IGNORE_MAYBE_UNINITIALIZED_END } -/*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ +/*---------------------------. +| Print this symbol on YYO. | +`---------------------------*/ static void -yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +yy_symbol_print (FILE *yyo, + yysymbol_kind_t yykind, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) { - YYFPRINTF (yyoutput, "%s %s (", - yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); + YYFPRINTF (yyo, "%s %s (", + yykind < YYNTOKENS ? "token" : "nterm", yysymbol_name (yykind)); - yy_symbol_value_print (yyoutput, yytype, yyvaluep, pParseContext); - YYFPRINTF (yyoutput, ")"); + yy_symbol_value_print (yyo, yykind, yyvaluep, pParseContext); + YYFPRINTF (yyo, ")"); } /*------------------------------------------------------------------. @@ -3649,7 +3954,7 @@ yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, gls `------------------------------------------------------------------*/ static void -yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) +yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) @@ -3672,21 +3977,21 @@ do { \ `------------------------------------------------*/ static void -yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, glslang::TParseContext* pParseContext) +yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp, + int yyrule, glslang::TParseContext* pParseContext) { - unsigned long int yylno = yyrline[yyrule]; + int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; - YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", + YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, - yystos[yyssp[yyi + 1 - yynrhs]], - &(yyvsp[(yyi + 1) - (yynrhs)]) - , pParseContext); + YY_ACCESSING_SYMBOL (+yyssp[yyi + 1 - yynrhs]), + &yyvsp[(yyi + 1) - (yynrhs)], pParseContext); YYFPRINTF (stderr, "\n"); } } @@ -3701,8 +4006,8 @@ do { \ multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ -# define YYDPRINTF(Args) -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) +# define YYDPRINTF(Args) ((void) 0) +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ @@ -3725,28 +4030,76 @@ int yydebug; #endif -#if YYERROR_VERBOSE +/* Context of a parse error. */ +typedef struct +{ + yy_state_t *yyssp; + yysymbol_kind_t yytoken; +} yypcontext_t; + +/* Put in YYARG at most YYARGN of the expected tokens given the + current YYCTX, and return the number of tokens stored in YYARG. If + YYARG is null, return the number of expected tokens (guaranteed to + be less than YYNTOKENS). Return YYENOMEM on memory exhaustion. + Return 0 if there are more than YYARGN expected tokens, yet fill + YYARG up to YYARGN. */ +static int +yypcontext_expected_tokens (const yypcontext_t *yyctx, + yysymbol_kind_t yyarg[], int yyargn) +{ + /* Actual size of YYARG. */ + int yycount = 0; + int yyn = yypact[+*yyctx->yyssp]; + if (!yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != YYSYMBOL_YYerror + && !yytable_value_is_error (yytable[yyx + yyn])) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = YY_CAST (yysymbol_kind_t, yyx); + } + } + if (yyarg && yycount == 0 && 0 < yyargn) + yyarg[0] = YYSYMBOL_YYEMPTY; + return yycount; +} -# ifndef yystrlen -# if defined __GLIBC__ && defined _STRING_H -# define yystrlen strlen -# else + + + +#ifndef yystrlen +# if defined __GLIBC__ && defined _STRING_H +# define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S))) +# else /* Return the length of YYSTR. */ -static YYSIZE_T +static YYPTRDIFF_T yystrlen (const char *yystr) { - YYSIZE_T yylen; + YYPTRDIFF_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } -# endif # endif +#endif -# ifndef yystpcpy -# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE -# define yystpcpy stpcpy -# else +#ifndef yystpcpy +# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +# define yystpcpy stpcpy +# else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * @@ -3760,10 +4113,10 @@ yystpcpy (char *yydest, const char *yysrc) return yyd - 1; } -# endif # endif +#endif -# ifndef yytnamerr +#ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string @@ -3771,14 +4124,13 @@ yystpcpy (char *yydest, const char *yysrc) backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ -static YYSIZE_T +static YYPTRDIFF_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { - YYSIZE_T yyn = 0; + YYPTRDIFF_T yyn = 0; char const *yyp = yystr; - for (;;) switch (*++yyp) { @@ -3789,7 +4141,10 @@ yytnamerr (char *yyres, const char *yystr) case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; - /* Fall through. */ + else + goto append; + + append: default: if (yyres) yyres[yyn] = *yyp; @@ -3804,36 +4159,20 @@ yytnamerr (char *yyres, const char *yystr) do_not_strip_quotes: ; } - if (! yyres) + if (yyres) + return yystpcpy (yyres, yystr) - yyres; + else return yystrlen (yystr); - - return yystpcpy (yyres, yystr) - yyres; } -# endif +#endif -/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message - about the unexpected token YYTOKEN for the state stack whose top is - YYSSP. - Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is - not large enough to hold the message. In that case, also set - *YYMSG_ALLOC to the required number of bytes. Return 2 if the - required number of bytes is too large to store. */ static int -yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, - yytype_int16 *yyssp, int yytoken) +yy_syntax_error_arguments (const yypcontext_t *yyctx, + yysymbol_kind_t yyarg[], int yyargn) { - YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); - YYSIZE_T yysize = yysize0; - enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; - /* Internationalized format string. */ - const char *yyformat = YY_NULLPTR; - /* Arguments of yyformat. */ - char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; - /* Number of reported tokens (one for the "unexpected", one per - "expected"). */ + /* Actual size of YYARG. */ int yycount = 0; - /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action @@ -3857,63 +4196,78 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ - if (yytoken != YYEMPTY) - { - int yyn = yypact[*yyssp]; - yyarg[yycount++] = yytname[yytoken]; - if (!yypact_value_is_default (yyn)) - { - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. In other words, skip the first -YYN actions for - this state because they are default actions. */ - int yyxbegin = yyn < 0 ? -yyn : 0; - /* Stay within bounds of both yycheck and yytname. */ - int yychecklim = YYLAST - yyn + 1; - int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; - int yyx; - - for (yyx = yyxbegin; yyx < yyxend; ++yyx) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR - && !yytable_value_is_error (yytable[yyx + yyn])) - { - if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) - { - yycount = 1; - yysize = yysize0; - break; - } - yyarg[yycount++] = yytname[yyx]; - { - YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); - if (! (yysize <= yysize1 - && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) - return 2; - yysize = yysize1; - } - } - } + if (yyctx->yytoken != YYSYMBOL_YYEMPTY) + { + int yyn; + if (yyarg) + yyarg[yycount] = yyctx->yytoken; + ++yycount; + yyn = yypcontext_expected_tokens (yyctx, + yyarg ? yyarg + 1 : yyarg, yyargn - 1); + if (yyn == YYENOMEM) + return YYENOMEM; + else + yycount += yyn; } + return yycount; +} + +/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message + about the unexpected token YYTOKEN for the state stack whose top is + YYSSP. + + Return 0 if *YYMSG was successfully written. Return -1 if *YYMSG is + not large enough to hold the message. In that case, also set + *YYMSG_ALLOC to the required number of bytes. Return YYENOMEM if the + required number of bytes is too large to store. */ +static int +yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg, + const yypcontext_t *yyctx) +{ + enum { YYARGS_MAX = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULLPTR; + /* Arguments of yyformat: reported tokens (one for the "unexpected", + one per "expected"). */ + yysymbol_kind_t yyarg[YYARGS_MAX]; + /* Cumulated lengths of YYARG. */ + YYPTRDIFF_T yysize = 0; + + /* Actual size of YYARG. */ + int yycount = yy_syntax_error_arguments (yyctx, yyarg, YYARGS_MAX); + if (yycount == YYENOMEM) + return YYENOMEM; switch (yycount) { -# define YYCASE_(N, S) \ +#define YYCASE_(N, S) \ case N: \ yyformat = S; \ - break + break + default: /* Avoid compiler warnings. */ YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); -# undef YYCASE_ +#undef YYCASE_ } + /* Compute error message size. Don't count the "%s"s, but reserve + room for the terminator. */ + yysize = yystrlen (yyformat) - 2 * yycount + 1; { - YYSIZE_T yysize1 = yysize + yystrlen (yyformat); - if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) - return 2; - yysize = yysize1; + int yyi; + for (yyi = 0; yyi < yycount; ++yyi) + { + YYPTRDIFF_T yysize1 + = yysize + yytnamerr (YY_NULLPTR, yytname[yyarg[yyi]]); + if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) + yysize = yysize1; + else + return YYENOMEM; + } } if (*yymsg_alloc < yysize) @@ -3922,7 +4276,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; - return 1; + return -1; } /* Avoid sprintf, as that infringes on the user's name space. @@ -3934,40 +4288,43 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { - yyp += yytnamerr (yyp, yyarg[yyi++]); + yyp += yytnamerr (yyp, yytname[yyarg[yyi++]]); yyformat += 2; } else { - yyp++; - yyformat++; + ++yyp; + ++yyformat; } } return 0; } -#endif /* YYERROR_VERBOSE */ + /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void -yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext) +yydestruct (const char *yymsg, + yysymbol_kind_t yykind, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext) { YYUSE (yyvaluep); YYUSE (pParseContext); if (!yymsg) yymsg = "Deleting"; - YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + YY_SYMBOL_PRINT (yymsg, yykind, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN - YYUSE (yytype); + YYUSE (yykind); YY_IGNORE_MAYBE_UNINITIALIZED_END } + + /*----------. | yyparse. | `----------*/ @@ -3975,7 +4332,7 @@ yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseCon int yyparse (glslang::TParseContext* pParseContext) { -/* The lookahead symbol. */ +/* Lookahead token kind. */ int yychar; @@ -3986,45 +4343,41 @@ YY_INITIAL_VALUE (static YYSTYPE yyval_default;) YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); /* Number of syntax errors so far. */ - int yynerrs; + int yynerrs = 0; - int yystate; + yy_state_fast_t yystate = 0; /* Number of tokens to shift before error messages enabled. */ - int yyerrstatus; - - /* The stacks and their tools: - 'yyss': related to states. - 'yyvs': related to semantic values. + int yyerrstatus = 0; - Refer to the stacks through separate pointers, to allow yyoverflow + /* Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ - /* The state stack. */ - yytype_int16 yyssa[YYINITDEPTH]; - yytype_int16 *yyss; - yytype_int16 *yyssp; + /* Their size. */ + YYPTRDIFF_T yystacksize = YYINITDEPTH; - /* The semantic value stack. */ - YYSTYPE yyvsa[YYINITDEPTH]; - YYSTYPE *yyvs; - YYSTYPE *yyvsp; + /* The state stack: array, bottom, top. */ + yy_state_t yyssa[YYINITDEPTH]; + yy_state_t *yyss = yyssa; + yy_state_t *yyssp = yyss; - YYSIZE_T yystacksize; + /* The semantic value stack: array, bottom, top. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs = yyvsa; + YYSTYPE *yyvsp = yyvs; int yyn; + /* The return value of yyparse. */ int yyresult; - /* Lookahead token as an internal (translated) token number. */ - int yytoken = 0; + /* Lookahead symbol kind. */ + yysymbol_kind_t yytoken = YYSYMBOL_YYEMPTY; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; -#if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; - YYSIZE_T yymsg_alloc = sizeof yymsgbuf; -#endif + YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf; #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) @@ -4032,58 +4385,60 @@ YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); Keep to zero when no symbol should be popped. */ int yylen = 0; - yyssp = yyss = yyssa; - yyvsp = yyvs = yyvsa; - yystacksize = YYINITDEPTH; - YYDPRINTF ((stderr, "Starting parse\n")); - yystate = 0; - yyerrstatus = 0; - yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; + /*------------------------------------------------------------. -| yynewstate -- Push a new state, which is found in yystate. | +| yynewstate -- push a new state, which is found in yystate. | `------------------------------------------------------------*/ - yynewstate: +yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; - yysetstate: - *yyssp = yystate; + +/*--------------------------------------------------------------------. +| yysetstate -- set current state (the top of the stack) to yystate. | +`--------------------------------------------------------------------*/ +yysetstate: + YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + YY_ASSERT (0 <= yystate && yystate < YYNSTATES); + YY_IGNORE_USELESS_CAST_BEGIN + *yyssp = YY_CAST (yy_state_t, yystate); + YY_IGNORE_USELESS_CAST_END + YY_STACK_PRINT (yyss, yyssp); if (yyss + yystacksize - 1 <= yyssp) +#if !defined yyoverflow && !defined YYSTACK_RELOCATE + goto yyexhaustedlab; +#else { /* Get the current used size of the three stacks, in elements. */ - YYSIZE_T yysize = yyssp - yyss + 1; + YYPTRDIFF_T yysize = yyssp - yyss + 1; -#ifdef yyoverflow +# if defined yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ + yy_state_t *yyss1 = yyss; YYSTYPE *yyvs1 = yyvs; - yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), + &yyss1, yysize * YYSIZEOF (*yyssp), + &yyvs1, yysize * YYSIZEOF (*yyvsp), &yystacksize); - yyss = yyss1; yyvs = yyvs1; } -#else /* no yyoverflow */ -# ifndef YYSTACK_RELOCATE - goto yyexhaustedlab; -# else +# else /* defined YYSTACK_RELOCATE */ /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; @@ -4092,9 +4447,10 @@ YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); yystacksize = YYMAXDEPTH; { - yytype_int16 *yyss1 = yyss; + yy_state_t *yyss1 = yyss; union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); + YY_CAST (union yyalloc *, + YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize)))); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); @@ -4104,30 +4460,30 @@ YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); YYSTACK_FREE (yyss1); } # endif -#endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; - YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); + YY_IGNORE_USELESS_CAST_BEGIN + YYDPRINTF ((stderr, "Stack size increased to %ld\n", + YY_CAST (long, yystacksize))); + YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) YYABORT; } - - YYDPRINTF ((stderr, "Entering state %d\n", yystate)); +#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */ if (yystate == YYFINAL) YYACCEPT; goto yybackup; + /*-----------. | yybackup. | `-----------*/ yybackup: - /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ @@ -4138,18 +4494,29 @@ yybackup: /* Not known => get a lookahead token if don't already have one. */ - /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ + /* YYCHAR is either empty, or end-of-input, or a valid lookahead. */ if (yychar == YYEMPTY) { - YYDPRINTF ((stderr, "Reading a token: ")); + YYDPRINTF ((stderr, "Reading a token\n")); yychar = yylex (&yylval, parseContext); } if (yychar <= YYEOF) { - yychar = yytoken = YYEOF; + yychar = YYEOF; + yytoken = YYSYMBOL_YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } + else if (yychar == YYerror) + { + /* The scanner already issued an error message, process directly + to error recovery. But do not keep the error token as + lookahead, it is too special and may lead us to an endless + loop in error recovery. */ + yychar = YYUNDEF; + yytoken = YYSYMBOL_YYerror; + goto yyerrlab1; + } else { yytoken = YYTRANSLATE (yychar); @@ -4177,15 +4544,13 @@ yybackup: /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); - - /* Discard the shifted token. */ - yychar = YYEMPTY; - yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END + /* Discard the shifted token. */ + yychar = YYEMPTY; goto yynewstate; @@ -4200,7 +4565,7 @@ yydefault: /*-----------------------------. -| yyreduce -- Do a reduction. | +| yyreduce -- do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ @@ -4220,304 +4585,304 @@ yyreduce: YY_REDUCE_PRINT (yyn); switch (yyn) { - case 2: -#line 357 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 2: /* variable_identifier: IDENTIFIER */ +#line 371 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleVariable((yyvsp[0].lex).loc, (yyvsp[0].lex).symbol, (yyvsp[0].lex).string); } -#line 4229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4594 "MachineIndependent/glslang_tab.cpp" break; - case 3: -#line 363 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 3: /* primary_expression: variable_identifier */ +#line 377 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4237 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4602 "MachineIndependent/glslang_tab.cpp" break; - case 4: -#line 366 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 4: /* primary_expression: LEFT_PAREN expression RIGHT_PAREN */ +#line 380 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); if ((yyval.interm.intermTypedNode)->getAsConstantUnion()) (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); } -#line 4247 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4612 "MachineIndependent/glslang_tab.cpp" break; - case 5: -#line 371 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 5: /* primary_expression: FLOATCONSTANT */ +#line 385 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat, (yyvsp[0].lex).loc, true); } -#line 4255 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4620 "MachineIndependent/glslang_tab.cpp" break; - case 6: -#line 374 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 6: /* primary_expression: INTCONSTANT */ +#line 388 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); } -#line 4263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4628 "MachineIndependent/glslang_tab.cpp" break; - case 7: -#line 377 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 7: /* primary_expression: UINTCONSTANT */ +#line 391 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); } -#line 4272 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4637 "MachineIndependent/glslang_tab.cpp" break; - case 8: -#line 381 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 8: /* primary_expression: BOOLCONSTANT */ +#line 395 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).b, (yyvsp[0].lex).loc, true); } -#line 4280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4645 "MachineIndependent/glslang_tab.cpp" break; - case 9: -#line 385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 9: /* primary_expression: STRING_LITERAL */ +#line 399 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).string, (yyvsp[0].lex).loc, true); } -#line 4288 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4653 "MachineIndependent/glslang_tab.cpp" break; - case 10: -#line 388 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 10: /* primary_expression: INT32CONSTANT */ +#line 402 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); } -#line 4297 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4662 "MachineIndependent/glslang_tab.cpp" break; - case 11: -#line 392 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 11: /* primary_expression: UINT32CONSTANT */ +#line 406 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); } -#line 4306 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4671 "MachineIndependent/glslang_tab.cpp" break; - case 12: -#line 396 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 12: /* primary_expression: INT64CONSTANT */ +#line 410 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i64, (yyvsp[0].lex).loc, true); } -#line 4315 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4680 "MachineIndependent/glslang_tab.cpp" break; - case 13: -#line 400 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 13: /* primary_expression: UINT64CONSTANT */ +#line 414 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u64, (yyvsp[0].lex).loc, true); } -#line 4324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4689 "MachineIndependent/glslang_tab.cpp" break; - case 14: -#line 404 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 14: /* primary_expression: INT16CONSTANT */ +#line 418 "MachineIndependent/glslang.y" + { parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit integer literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((short)(yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); } -#line 4333 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4698 "MachineIndependent/glslang_tab.cpp" break; - case 15: -#line 408 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 15: /* primary_expression: UINT16CONSTANT */ +#line 422 "MachineIndependent/glslang.y" + { parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit unsigned integer literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((unsigned short)(yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); } -#line 4342 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4707 "MachineIndependent/glslang_tab.cpp" break; - case 16: -#line 412 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 16: /* primary_expression: DOUBLECONSTANT */ +#line 426 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double literal"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtDouble, (yyvsp[0].lex).loc, true); } -#line 4353 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4718 "MachineIndependent/glslang_tab.cpp" break; - case 17: -#line 418 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 17: /* primary_expression: FLOAT16CONSTANT */ +#line 432 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float literal"); (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat16, (yyvsp[0].lex).loc, true); } -#line 4362 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4727 "MachineIndependent/glslang_tab.cpp" break; - case 18: -#line 426 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 18: /* postfix_expression: primary_expression */ +#line 440 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4370 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4735 "MachineIndependent/glslang_tab.cpp" break; - case 19: -#line 429 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 19: /* postfix_expression: postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET */ +#line 443 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBracketDereference((yyvsp[-2].lex).loc, (yyvsp[-3].interm.intermTypedNode), (yyvsp[-1].interm.intermTypedNode)); } -#line 4378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4743 "MachineIndependent/glslang_tab.cpp" break; - case 20: -#line 432 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 20: /* postfix_expression: function_call */ +#line 446 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4386 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4751 "MachineIndependent/glslang_tab.cpp" break; - case 21: -#line 435 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 21: /* postfix_expression: postfix_expression DOT IDENTIFIER */ +#line 449 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleDotDereference((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode), *(yyvsp[0].lex).string); } -#line 4394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4759 "MachineIndependent/glslang_tab.cpp" break; - case 22: -#line 438 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 22: /* postfix_expression: postfix_expression INC_OP */ +#line 452 "MachineIndependent/glslang.y" + { parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "++", (yyvsp[-1].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "++", EOpPostIncrement, (yyvsp[-1].interm.intermTypedNode)); } -#line 4404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4769 "MachineIndependent/glslang_tab.cpp" break; - case 23: -#line 443 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 23: /* postfix_expression: postfix_expression DEC_OP */ +#line 457 "MachineIndependent/glslang.y" + { parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "--", (yyvsp[-1].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "--", EOpPostDecrement, (yyvsp[-1].interm.intermTypedNode)); } -#line 4414 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4779 "MachineIndependent/glslang_tab.cpp" break; - case 24: -#line 451 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 24: /* integer_expression: expression */ +#line 465 "MachineIndependent/glslang.y" + { parseContext.integerCheck((yyvsp[0].interm.intermTypedNode), "[]"); (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4423 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4788 "MachineIndependent/glslang_tab.cpp" break; - case 25: -#line 458 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 25: /* function_call: function_call_or_method */ +#line 472 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleFunctionCall((yyvsp[0].interm).loc, (yyvsp[0].interm).function, (yyvsp[0].interm).intermNode); delete (yyvsp[0].interm).function; } -#line 4432 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4797 "MachineIndependent/glslang_tab.cpp" break; - case 26: -#line 465 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 26: /* function_call_or_method: function_call_generic */ +#line 479 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); } -#line 4440 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4805 "MachineIndependent/glslang_tab.cpp" break; - case 27: -#line 471 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 27: /* function_call_generic: function_call_header_with_parameters RIGHT_PAREN */ +#line 485 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-1].interm); (yyval.interm).loc = (yyvsp[0].lex).loc; } -#line 4449 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4814 "MachineIndependent/glslang_tab.cpp" break; - case 28: -#line 475 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 28: /* function_call_generic: function_call_header_no_parameters RIGHT_PAREN */ +#line 489 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-1].interm); (yyval.interm).loc = (yyvsp[0].lex).loc; } -#line 4458 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4823 "MachineIndependent/glslang_tab.cpp" break; - case 29: -#line 482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 29: /* function_call_header_no_parameters: function_call_header VOID */ +#line 496 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-1].interm); } -#line 4466 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4831 "MachineIndependent/glslang_tab.cpp" break; - case 30: -#line 485 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 30: /* function_call_header_no_parameters: function_call_header */ +#line 499 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); } -#line 4474 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4839 "MachineIndependent/glslang_tab.cpp" break; - case 31: -#line 491 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 31: /* function_call_header_with_parameters: function_call_header assignment_expression */ +#line 505 "MachineIndependent/glslang.y" + { TParameter param = { 0, new TType }; param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); (yyvsp[-1].interm).function->addParameter(param); (yyval.interm).function = (yyvsp[-1].interm).function; (yyval.interm).intermNode = (yyvsp[0].interm.intermTypedNode); } -#line 4486 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4851 "MachineIndependent/glslang_tab.cpp" break; - case 32: -#line 498 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 32: /* function_call_header_with_parameters: function_call_header_with_parameters COMMA assignment_expression */ +#line 512 "MachineIndependent/glslang.y" + { TParameter param = { 0, new TType }; param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); (yyvsp[-2].interm).function->addParameter(param); (yyval.interm).function = (yyvsp[-2].interm).function; (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); } -#line 4498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4863 "MachineIndependent/glslang_tab.cpp" break; - case 33: -#line 508 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 33: /* function_call_header: function_identifier LEFT_PAREN */ +#line 522 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-1].interm); } -#line 4506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4871 "MachineIndependent/glslang_tab.cpp" break; - case 34: -#line 516 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 34: /* function_identifier: type_specifier */ +#line 530 "MachineIndependent/glslang.y" + { // Constructor (yyval.interm).intermNode = 0; (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); } -#line 4516 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4881 "MachineIndependent/glslang_tab.cpp" break; - case 35: -#line 521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 35: /* function_identifier: postfix_expression */ +#line 535 "MachineIndependent/glslang.y" + { // // Should be a method or subroutine call, but we haven't recognized the arguments yet. // @@ -4544,51 +4909,51 @@ yyreduce: (yyval.interm).function = new TFunction(empty, TType(EbtVoid), EOpNull); } } -#line 4548 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4913 "MachineIndependent/glslang_tab.cpp" break; - case 36: -#line 549 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 36: /* function_identifier: non_uniform_qualifier */ +#line 563 "MachineIndependent/glslang.y" + { // Constructor (yyval.interm).intermNode = 0; (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); } -#line 4558 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4923 "MachineIndependent/glslang_tab.cpp" break; - case 37: -#line 558 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 37: /* unary_expression: postfix_expression */ +#line 572 "MachineIndependent/glslang.y" + { parseContext.variableCheck((yyvsp[0].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); if (TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode()) parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); } -#line 4569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4934 "MachineIndependent/glslang_tab.cpp" break; - case 38: -#line 564 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 38: /* unary_expression: INC_OP unary_expression */ +#line 578 "MachineIndependent/glslang.y" + { parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "++", (yyvsp[0].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "++", EOpPreIncrement, (yyvsp[0].interm.intermTypedNode)); } -#line 4578 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4943 "MachineIndependent/glslang_tab.cpp" break; - case 39: -#line 568 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 39: /* unary_expression: DEC_OP unary_expression */ +#line 582 "MachineIndependent/glslang.y" + { parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "--", (yyvsp[0].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "--", EOpPreDecrement, (yyvsp[0].interm.intermTypedNode)); } -#line 4587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4952 "MachineIndependent/glslang_tab.cpp" break; - case 40: -#line 572 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 40: /* unary_expression: unary_operator unary_expression */ +#line 586 "MachineIndependent/glslang.y" + { if ((yyvsp[-1].interm).op != EOpNull) { char errorOp[2] = {0, 0}; switch((yyvsp[-1].interm).op) { @@ -4604,180 +4969,180 @@ yyreduce: (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); } } -#line 4608 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4973 "MachineIndependent/glslang_tab.cpp" break; - case 41: -#line 592 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; } -#line 4614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 41: /* unary_operator: PLUS */ +#line 606 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; } +#line 4979 "MachineIndependent/glslang_tab.cpp" break; - case 42: -#line 593 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; } -#line 4620 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 42: /* unary_operator: DASH */ +#line 607 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; } +#line 4985 "MachineIndependent/glslang_tab.cpp" break; - case 43: -#line 594 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; } -#line 4626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 43: /* unary_operator: BANG */ +#line 608 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; } +#line 4991 "MachineIndependent/glslang_tab.cpp" break; - case 44: -#line 595 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot; + case 44: /* unary_operator: TILDE */ +#line 609 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot; parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise not"); } -#line 4633 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 4998 "MachineIndependent/glslang_tab.cpp" break; - case 45: -#line 601 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4639 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 45: /* multiplicative_expression: unary_expression */ +#line 615 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5004 "MachineIndependent/glslang_tab.cpp" break; - case 46: -#line 602 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 46: /* multiplicative_expression: multiplicative_expression STAR unary_expression */ +#line 616 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "*", EOpMul, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4649 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5014 "MachineIndependent/glslang_tab.cpp" break; - case 47: -#line 607 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 47: /* multiplicative_expression: multiplicative_expression SLASH unary_expression */ +#line 621 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "/", EOpDiv, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4659 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5024 "MachineIndependent/glslang_tab.cpp" break; - case 48: -#line 612 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 48: /* multiplicative_expression: multiplicative_expression PERCENT unary_expression */ +#line 626 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "%"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "%", EOpMod, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5035 "MachineIndependent/glslang_tab.cpp" break; - case 49: -#line 621 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 49: /* additive_expression: multiplicative_expression */ +#line 635 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5041 "MachineIndependent/glslang_tab.cpp" break; - case 50: -#line 622 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 50: /* additive_expression: additive_expression PLUS multiplicative_expression */ +#line 636 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "+", EOpAdd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5051 "MachineIndependent/glslang_tab.cpp" break; - case 51: -#line 627 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 51: /* additive_expression: additive_expression DASH multiplicative_expression */ +#line 641 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "-", EOpSub, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5061 "MachineIndependent/glslang_tab.cpp" break; - case 52: -#line 635 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 52: /* shift_expression: additive_expression */ +#line 649 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5067 "MachineIndependent/glslang_tab.cpp" break; - case 53: -#line 636 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 53: /* shift_expression: shift_expression LEFT_OP additive_expression */ +#line 650 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift left"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<<", EOpLeftShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5078 "MachineIndependent/glslang_tab.cpp" break; - case 54: -#line 642 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 54: /* shift_expression: shift_expression RIGHT_OP additive_expression */ +#line 656 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift right"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">>", EOpRightShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4724 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5089 "MachineIndependent/glslang_tab.cpp" break; - case 55: -#line 651 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4730 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 55: /* relational_expression: shift_expression */ +#line 665 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5095 "MachineIndependent/glslang_tab.cpp" break; - case 56: -#line 652 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 56: /* relational_expression: relational_expression LEFT_ANGLE shift_expression */ +#line 666 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<", EOpLessThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4740 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5105 "MachineIndependent/glslang_tab.cpp" break; - case 57: -#line 657 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 57: /* relational_expression: relational_expression RIGHT_ANGLE shift_expression */ +#line 671 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">", EOpGreaterThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4750 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5115 "MachineIndependent/glslang_tab.cpp" break; - case 58: -#line 662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 58: /* relational_expression: relational_expression LE_OP shift_expression */ +#line 676 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<=", EOpLessThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5125 "MachineIndependent/glslang_tab.cpp" break; - case 59: -#line 667 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 59: /* relational_expression: relational_expression GE_OP shift_expression */ +#line 681 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">=", EOpGreaterThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4770 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5135 "MachineIndependent/glslang_tab.cpp" break; - case 60: -#line 675 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4776 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 60: /* equality_expression: relational_expression */ +#line 689 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5141 "MachineIndependent/glslang_tab.cpp" break; - case 61: -#line 676 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 61: /* equality_expression: equality_expression EQ_OP relational_expression */ +#line 690 "MachineIndependent/glslang.y" + { parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); @@ -4786,12 +5151,12 @@ yyreduce: if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4790 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5155 "MachineIndependent/glslang_tab.cpp" break; - case 62: -#line 685 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 62: /* equality_expression: equality_expression NE_OP relational_expression */ +#line 699 "MachineIndependent/glslang.y" + { parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); @@ -4800,125 +5165,125 @@ yyreduce: if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4804 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5169 "MachineIndependent/glslang_tab.cpp" break; - case 63: -#line 697 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 63: /* and_expression: equality_expression */ +#line 711 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5175 "MachineIndependent/glslang_tab.cpp" break; - case 64: -#line 698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 64: /* and_expression: and_expression AMPERSAND equality_expression */ +#line 712 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise and"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&", EOpAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4821 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5186 "MachineIndependent/glslang_tab.cpp" break; - case 65: -#line 707 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4827 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 65: /* exclusive_or_expression: and_expression */ +#line 721 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5192 "MachineIndependent/glslang_tab.cpp" break; - case 66: -#line 708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 66: /* exclusive_or_expression: exclusive_or_expression CARET and_expression */ +#line 722 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise exclusive or"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^", EOpExclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4838 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5203 "MachineIndependent/glslang_tab.cpp" break; - case 67: -#line 717 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4844 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 67: /* inclusive_or_expression: exclusive_or_expression */ +#line 731 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5209 "MachineIndependent/glslang_tab.cpp" break; - case 68: -#line 718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 68: /* inclusive_or_expression: inclusive_or_expression VERTICAL_BAR exclusive_or_expression */ +#line 732 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise inclusive or"); (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "|", EOpInclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 4855 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5220 "MachineIndependent/glslang_tab.cpp" break; - case 69: -#line 727 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4861 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 69: /* logical_and_expression: inclusive_or_expression */ +#line 741 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5226 "MachineIndependent/glslang_tab.cpp" break; - case 70: -#line 728 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 70: /* logical_and_expression: logical_and_expression AND_OP inclusive_or_expression */ +#line 742 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&&", EOpLogicalAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4871 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5236 "MachineIndependent/glslang_tab.cpp" break; - case 71: -#line 736 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4877 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 71: /* logical_xor_expression: logical_and_expression */ +#line 750 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5242 "MachineIndependent/glslang_tab.cpp" break; - case 72: -#line 737 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 72: /* logical_xor_expression: logical_xor_expression XOR_OP logical_and_expression */ +#line 751 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^^", EOpLogicalXor, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4887 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5252 "MachineIndependent/glslang_tab.cpp" break; - case 73: -#line 745 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4893 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 73: /* logical_or_expression: logical_xor_expression */ +#line 759 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5258 "MachineIndependent/glslang_tab.cpp" break; - case 74: -#line 746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 74: /* logical_or_expression: logical_or_expression OR_OP logical_xor_expression */ +#line 760 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "||", EOpLogicalOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); if ((yyval.interm.intermTypedNode) == 0) (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); } -#line 4903 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5268 "MachineIndependent/glslang_tab.cpp" break; - case 75: -#line 754 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4909 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 75: /* conditional_expression: logical_or_expression */ +#line 768 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5274 "MachineIndependent/glslang_tab.cpp" break; - case 76: -#line 755 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 76: /* $@1: %empty */ +#line 769 "MachineIndependent/glslang.y" + { ++parseContext.controlFlowNestingLevel; } -#line 4917 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5282 "MachineIndependent/glslang_tab.cpp" break; - case 77: -#line 758 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 77: /* conditional_expression: logical_or_expression QUESTION $@1 expression COLON assignment_expression */ +#line 772 "MachineIndependent/glslang.y" + { --parseContext.controlFlowNestingLevel; parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-5].interm.intermTypedNode)); parseContext.rValueErrorCheck((yyvsp[-4].lex).loc, "?", (yyvsp[-5].interm.intermTypedNode)); @@ -4930,18 +5295,18 @@ yyreduce: (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } } -#line 4934 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5299 "MachineIndependent/glslang_tab.cpp" break; - case 78: -#line 773 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 4940 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 78: /* assignment_expression: conditional_expression */ +#line 787 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 5305 "MachineIndependent/glslang_tab.cpp" break; - case 79: -#line 774 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 79: /* assignment_expression: unary_expression assignment_operator assignment_expression */ +#line 788 "MachineIndependent/glslang.y" + { parseContext.arrayObjectCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array assignment"); parseContext.opaqueCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); parseContext.storage16BitAssignmentCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); @@ -4954,120 +5319,120 @@ yyreduce: (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } } -#line 4958 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5323 "MachineIndependent/glslang_tab.cpp" break; - case 80: -#line 790 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 80: /* assignment_operator: EQUAL */ +#line 804 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAssign; } -#line 4967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5332 "MachineIndependent/glslang_tab.cpp" break; - case 81: -#line 794 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 81: /* assignment_operator: MUL_ASSIGN */ +#line 808 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpMulAssign; } -#line 4976 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5341 "MachineIndependent/glslang_tab.cpp" break; - case 82: -#line 798 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 82: /* assignment_operator: DIV_ASSIGN */ +#line 812 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpDivAssign; } -#line 4985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5350 "MachineIndependent/glslang_tab.cpp" break; - case 83: -#line 802 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 83: /* assignment_operator: MOD_ASSIGN */ +#line 816 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "%="); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpModAssign; } -#line 4995 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5360 "MachineIndependent/glslang_tab.cpp" break; - case 84: -#line 807 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 84: /* assignment_operator: ADD_ASSIGN */ +#line 821 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAddAssign; } -#line 5004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5369 "MachineIndependent/glslang_tab.cpp" break; - case 85: -#line 811 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 85: /* assignment_operator: SUB_ASSIGN */ +#line 825 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpSubAssign; } -#line 5013 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5378 "MachineIndependent/glslang_tab.cpp" break; - case 86: -#line 815 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 86: /* assignment_operator: LEFT_ASSIGN */ +#line 829 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift left assign"); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLeftShiftAssign; } -#line 5022 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5387 "MachineIndependent/glslang_tab.cpp" break; - case 87: -#line 819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 87: /* assignment_operator: RIGHT_ASSIGN */ +#line 833 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift right assign"); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpRightShiftAssign; } -#line 5031 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5396 "MachineIndependent/glslang_tab.cpp" break; - case 88: -#line 823 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 88: /* assignment_operator: AND_ASSIGN */ +#line 837 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-and assign"); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAndAssign; } -#line 5040 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5405 "MachineIndependent/glslang_tab.cpp" break; - case 89: -#line 827 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 89: /* assignment_operator: XOR_ASSIGN */ +#line 841 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-xor assign"); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpExclusiveOrAssign; } -#line 5049 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5414 "MachineIndependent/glslang_tab.cpp" break; - case 90: -#line 831 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 90: /* assignment_operator: OR_ASSIGN */ +#line 845 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-or assign"); (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpInclusiveOrAssign; } -#line 5058 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5423 "MachineIndependent/glslang_tab.cpp" break; - case 91: -#line 838 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 91: /* expression: assignment_expression */ +#line 852 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 5066 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5431 "MachineIndependent/glslang_tab.cpp" break; - case 92: -#line 841 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 92: /* expression: expression COMMA assignment_expression */ +#line 855 "MachineIndependent/glslang.y" + { parseContext.samplerConstructorLocationCheck((yyvsp[-1].lex).loc, ",", (yyvsp[0].interm.intermTypedNode)); (yyval.interm.intermTypedNode) = parseContext.intermediate.addComma((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); if ((yyval.interm.intermTypedNode) == 0) { @@ -5075,118 +5440,118 @@ yyreduce: (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } } -#line 5079 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5444 "MachineIndependent/glslang_tab.cpp" break; - case 93: -#line 852 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 93: /* constant_expression: conditional_expression */ +#line 866 "MachineIndependent/glslang.y" + { parseContext.constantValueCheck((yyvsp[0].interm.intermTypedNode), ""); (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 5088 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5453 "MachineIndependent/glslang_tab.cpp" break; - case 94: -#line 859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 94: /* declaration: function_prototype SEMICOLON */ +#line 873 "MachineIndependent/glslang.y" + { parseContext.handleFunctionDeclarator((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).function, true /* prototype */); (yyval.interm.intermNode) = 0; // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature } -#line 5098 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5463 "MachineIndependent/glslang_tab.cpp" break; - case 95: -#line 864 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 95: /* declaration: init_declarator_list SEMICOLON */ +#line 878 "MachineIndependent/glslang.y" + { if ((yyvsp[-1].interm).intermNode && (yyvsp[-1].interm).intermNode->getAsAggregate()) (yyvsp[-1].interm).intermNode->getAsAggregate()->setOperator(EOpSequence); (yyval.interm.intermNode) = (yyvsp[-1].interm).intermNode; } -#line 5108 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5473 "MachineIndependent/glslang_tab.cpp" break; - case 96: -#line 869 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 96: /* declaration: PRECISION precision_qualifier type_specifier SEMICOLON */ +#line 883 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyvsp[-3].lex).loc, ENoProfile, 130, 0, "precision statement"); // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); parseContext.setDefaultPrecision((yyvsp[-3].lex).loc, (yyvsp[-1].interm.type), (yyvsp[-2].interm.type).qualifier.precision); (yyval.interm.intermNode) = 0; } -#line 5120 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5485 "MachineIndependent/glslang_tab.cpp" break; - case 97: -#line 876 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 97: /* declaration: block_structure SEMICOLON */ +#line 890 "MachineIndependent/glslang.y" + { parseContext.declareBlock((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).typeList); (yyval.interm.intermNode) = 0; } -#line 5129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5494 "MachineIndependent/glslang_tab.cpp" break; - case 98: -#line 880 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 98: /* declaration: block_structure IDENTIFIER SEMICOLON */ +#line 894 "MachineIndependent/glslang.y" + { parseContext.declareBlock((yyvsp[-2].interm).loc, *(yyvsp[-2].interm).typeList, (yyvsp[-1].lex).string); (yyval.interm.intermNode) = 0; } -#line 5138 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5503 "MachineIndependent/glslang_tab.cpp" break; - case 99: -#line 884 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 99: /* declaration: block_structure IDENTIFIER array_specifier SEMICOLON */ +#line 898 "MachineIndependent/glslang.y" + { parseContext.declareBlock((yyvsp[-3].interm).loc, *(yyvsp[-3].interm).typeList, (yyvsp[-2].lex).string, (yyvsp[-1].interm).arraySizes); (yyval.interm.intermNode) = 0; } -#line 5147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5512 "MachineIndependent/glslang_tab.cpp" break; - case 100: -#line 888 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 100: /* declaration: type_qualifier SEMICOLON */ +#line 902 "MachineIndependent/glslang.y" + { parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); parseContext.updateStandaloneQualifierDefaults((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type)); (yyval.interm.intermNode) = 0; } -#line 5157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5522 "MachineIndependent/glslang_tab.cpp" break; - case 101: -#line 893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 101: /* declaration: type_qualifier IDENTIFIER SEMICOLON */ +#line 907 "MachineIndependent/glslang.y" + { parseContext.checkNoShaderLayouts((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).shaderQualifiers); parseContext.addQualifierToExisting((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, *(yyvsp[-1].lex).string); (yyval.interm.intermNode) = 0; } -#line 5167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5532 "MachineIndependent/glslang_tab.cpp" break; - case 102: -#line 898 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 102: /* declaration: type_qualifier IDENTIFIER identifier_list SEMICOLON */ +#line 912 "MachineIndependent/glslang.y" + { parseContext.checkNoShaderLayouts((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).shaderQualifiers); (yyvsp[-1].interm.identifierList)->push_back((yyvsp[-2].lex).string); parseContext.addQualifierToExisting((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).qualifier, *(yyvsp[-1].interm.identifierList)); (yyval.interm.intermNode) = 0; } -#line 5178 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5543 "MachineIndependent/glslang_tab.cpp" break; - case 103: -#line 907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); } -#line 5184 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 103: /* $@2: %empty */ +#line 921 "MachineIndependent/glslang.y" + { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); } +#line 5549 "MachineIndependent/glslang_tab.cpp" break; - case 104: -#line 907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { - --parseContext.structNestingLevel; + case 104: /* block_structure: type_qualifier IDENTIFIER LEFT_BRACE $@2 struct_declaration_list RIGHT_BRACE */ +#line 921 "MachineIndependent/glslang.y" + { + --parseContext.blockNestingLevel; parseContext.blockName = (yyvsp[-4].lex).string; parseContext.globalQualifierFixCheck((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).qualifier); parseContext.checkNoShaderLayouts((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).shaderQualifiers); @@ -5194,55 +5559,55 @@ yyreduce: (yyval.interm).loc = (yyvsp[-5].interm.type).loc; (yyval.interm).typeList = (yyvsp[-1].interm.typeList); } -#line 5198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5563 "MachineIndependent/glslang_tab.cpp" break; - case 105: -#line 918 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 105: /* identifier_list: COMMA IDENTIFIER */ +#line 932 "MachineIndependent/glslang.y" + { (yyval.interm.identifierList) = new TIdentifierList; (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); } -#line 5207 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5572 "MachineIndependent/glslang_tab.cpp" break; - case 106: -#line 922 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 106: /* identifier_list: identifier_list COMMA IDENTIFIER */ +#line 936 "MachineIndependent/glslang.y" + { (yyval.interm.identifierList) = (yyvsp[-2].interm.identifierList); (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); } -#line 5216 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5581 "MachineIndependent/glslang_tab.cpp" break; - case 107: -#line 929 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 107: /* function_prototype: function_declarator RIGHT_PAREN */ +#line 943 "MachineIndependent/glslang.y" + { (yyval.interm).function = (yyvsp[-1].interm.function); (yyval.interm).loc = (yyvsp[0].lex).loc; } -#line 5225 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5590 "MachineIndependent/glslang_tab.cpp" break; - case 108: -#line 936 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 108: /* function_declarator: function_header */ +#line 950 "MachineIndependent/glslang.y" + { (yyval.interm.function) = (yyvsp[0].interm.function); } -#line 5233 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5598 "MachineIndependent/glslang_tab.cpp" break; - case 109: -#line 939 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 109: /* function_declarator: function_header_with_parameters */ +#line 953 "MachineIndependent/glslang.y" + { (yyval.interm.function) = (yyvsp[0].interm.function); } -#line 5241 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5606 "MachineIndependent/glslang_tab.cpp" break; - case 110: -#line 946 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 110: /* function_header_with_parameters: function_header parameter_declaration */ +#line 960 "MachineIndependent/glslang.y" + { // Add the parameter (yyval.interm.function) = (yyvsp[-1].interm.function); if ((yyvsp[0].interm).param.type->getBasicType() != EbtVoid) @@ -5250,12 +5615,12 @@ yyreduce: else delete (yyvsp[0].interm).param.type; } -#line 5254 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5619 "MachineIndependent/glslang_tab.cpp" break; - case 111: -#line 954 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 111: /* function_header_with_parameters: function_header_with_parameters COMMA parameter_declaration */ +#line 968 "MachineIndependent/glslang.y" + { // // Only first parameter of one-parameter functions can be void // The check for named parameters not being void is done in parameter_declarator @@ -5272,12 +5637,12 @@ yyreduce: (yyvsp[-2].interm.function)->addParameter((yyvsp[0].interm).param); } } -#line 5276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5641 "MachineIndependent/glslang_tab.cpp" break; - case 112: -#line 974 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 112: /* function_header: fully_specified_type IDENTIFIER LEFT_PAREN */ +#line 988 "MachineIndependent/glslang.y" + { if ((yyvsp[-2].interm.type).qualifier.storage != EvqGlobal && (yyvsp[-2].interm.type).qualifier.storage != EvqTemporary) { parseContext.error((yyvsp[-1].lex).loc, "no qualifiers allowed for function return", GetStorageQualifierString((yyvsp[-2].interm.type).qualifier.storage), ""); @@ -5296,12 +5661,12 @@ yyreduce: function = new TFunction((yyvsp[-1].lex).string, type); (yyval.interm.function) = function; } -#line 5300 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5665 "MachineIndependent/glslang_tab.cpp" break; - case 113: -#line 997 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 113: /* parameter_declarator: type_specifier IDENTIFIER */ +#line 1011 "MachineIndependent/glslang.y" + { if ((yyvsp[-1].interm.type).arraySizes) { parseContext.profileRequires((yyvsp[-1].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); parseContext.profileRequires((yyvsp[-1].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); @@ -5316,12 +5681,12 @@ yyreduce: (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).param = param; } -#line 5320 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5685 "MachineIndependent/glslang_tab.cpp" break; - case 114: -#line 1012 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 114: /* parameter_declarator: type_specifier IDENTIFIER array_specifier */ +#line 1026 "MachineIndependent/glslang.y" + { if ((yyvsp[-2].interm.type).arraySizes) { parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); @@ -5340,12 +5705,12 @@ yyreduce: (yyval.interm).loc = (yyvsp[-1].lex).loc; (yyval.interm).param = param; } -#line 5344 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5709 "MachineIndependent/glslang_tab.cpp" break; - case 115: -#line 1037 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 115: /* parameter_declaration: type_qualifier parameter_declarator */ +#line 1051 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; @@ -5356,24 +5721,24 @@ yyreduce: parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); } -#line 5360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5725 "MachineIndependent/glslang_tab.cpp" break; - case 116: -#line 1048 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 116: /* parameter_declaration: parameter_declarator */ +#line 1062 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); } -#line 5372 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5737 "MachineIndependent/glslang_tab.cpp" break; - case 117: -#line 1058 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 117: /* parameter_declaration: type_qualifier parameter_type_specifier */ +#line 1072 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; @@ -5383,133 +5748,133 @@ yyreduce: parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type); parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); } -#line 5387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5752 "MachineIndependent/glslang_tab.cpp" break; - case 118: -#line 1068 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 118: /* parameter_declaration: parameter_type_specifier */ +#line 1082 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); } -#line 5399 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5764 "MachineIndependent/glslang_tab.cpp" break; - case 119: -#line 1078 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 119: /* parameter_type_specifier: type_specifier */ +#line 1092 "MachineIndependent/glslang.y" + { TParameter param = { 0, new TType((yyvsp[0].interm.type)) }; (yyval.interm).param = param; if ((yyvsp[0].interm.type).arraySizes) parseContext.arraySizeRequiredCheck((yyvsp[0].interm.type).loc, *(yyvsp[0].interm.type).arraySizes); } -#line 5410 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5775 "MachineIndependent/glslang_tab.cpp" break; - case 120: -#line 1087 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 120: /* init_declarator_list: single_declaration */ +#line 1101 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[0].interm); } -#line 5418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5783 "MachineIndependent/glslang_tab.cpp" break; - case 121: -#line 1090 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 121: /* init_declarator_list: init_declarator_list COMMA IDENTIFIER */ +#line 1104 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-2].interm); parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-2].interm).type); } -#line 5427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5792 "MachineIndependent/glslang_tab.cpp" break; - case 122: -#line 1094 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 122: /* init_declarator_list: init_declarator_list COMMA IDENTIFIER array_specifier */ +#line 1108 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-3].interm); parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-3].interm).type, (yyvsp[0].interm).arraySizes); } -#line 5436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5801 "MachineIndependent/glslang_tab.cpp" break; - case 123: -#line 1098 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 123: /* init_declarator_list: init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer */ +#line 1112 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-5].interm).type; TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-5].interm).type, (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-5].interm).intermNode, initNode, (yyvsp[-1].lex).loc); } -#line 5446 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5811 "MachineIndependent/glslang_tab.cpp" break; - case 124: -#line 1103 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 124: /* init_declarator_list: init_declarator_list COMMA IDENTIFIER EQUAL initializer */ +#line 1117 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-4].interm).type; TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-4].interm).type, 0, (yyvsp[0].interm.intermTypedNode)); (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-4].interm).intermNode, initNode, (yyvsp[-1].lex).loc); } -#line 5456 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5821 "MachineIndependent/glslang_tab.cpp" break; - case 125: -#line 1111 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 125: /* single_declaration: fully_specified_type */ +#line 1125 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[0].interm.type); (yyval.interm).intermNode = 0; parseContext.declareTypeDefaults((yyval.interm).loc, (yyval.interm).type); } -#line 5468 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5833 "MachineIndependent/glslang_tab.cpp" break; - case 126: -#line 1118 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 126: /* single_declaration: fully_specified_type IDENTIFIER */ +#line 1132 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-1].interm.type); (yyval.interm).intermNode = 0; parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-1].interm.type)); } -#line 5478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5843 "MachineIndependent/glslang_tab.cpp" break; - case 127: -#line 1123 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 127: /* single_declaration: fully_specified_type IDENTIFIER array_specifier */ +#line 1137 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-2].interm.type); (yyval.interm).intermNode = 0; parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-2].interm.type), (yyvsp[0].interm).arraySizes); } -#line 5488 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5853 "MachineIndependent/glslang_tab.cpp" break; - case 128: -#line 1128 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 128: /* single_declaration: fully_specified_type IDENTIFIER array_specifier EQUAL initializer */ +#line 1142 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-4].interm.type); TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-4].interm.type), (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); } -#line 5498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5863 "MachineIndependent/glslang_tab.cpp" break; - case 129: -#line 1133 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 129: /* single_declaration: fully_specified_type IDENTIFIER EQUAL initializer */ +#line 1147 "MachineIndependent/glslang.y" + { (yyval.interm).type = (yyvsp[-3].interm.type); TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode)); (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); } -#line 5508 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5873 "MachineIndependent/glslang_tab.cpp" break; - case 130: -#line 1142 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 130: /* fully_specified_type: type_specifier */ +#line 1156 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); parseContext.globalQualifierTypeCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyval.interm.type)); @@ -5519,12 +5884,12 @@ yyreduce: } parseContext.precisionQualifierCheck((yyval.interm.type).loc, (yyval.interm.type).basicType, (yyval.interm.type).qualifier); } -#line 5523 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5888 "MachineIndependent/glslang_tab.cpp" break; - case 131: -#line 1152 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 131: /* fully_specified_type: type_qualifier type_specifier */ +#line 1166 "MachineIndependent/glslang.y" + { parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); parseContext.globalQualifierTypeCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, (yyvsp[0].interm.type)); @@ -5548,71 +5913,71 @@ yyreduce: (parseContext.language == EShLangFragment && (yyval.interm.type).qualifier.storage == EvqVaryingIn))) (yyval.interm.type).qualifier.smooth = true; } -#line 5552 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5917 "MachineIndependent/glslang_tab.cpp" break; - case 132: -#line 1179 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 132: /* invariant_qualifier: INVARIANT */ +#line 1193 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "invariant"); parseContext.profileRequires((yyval.interm.type).loc, ENoProfile, 120, 0, "invariant"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.invariant = true; } -#line 5563 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5928 "MachineIndependent/glslang_tab.cpp" break; - case 133: -#line 1188 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 133: /* interpolation_qualifier: SMOOTH */ +#line 1202 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "smooth"); parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "smooth"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "smooth"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.smooth = true; } -#line 5575 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5940 "MachineIndependent/glslang_tab.cpp" break; - case 134: -#line 1195 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 134: /* interpolation_qualifier: FLAT */ +#line 1209 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "flat"); parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "flat"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "flat"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.flat = true; } -#line 5587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5952 "MachineIndependent/glslang_tab.cpp" break; - case 135: -#line 1203 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 135: /* interpolation_qualifier: NOPERSPECTIVE */ +#line 1217 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "noperspective"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "noperspective"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.nopersp = true; } -#line 5599 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5964 "MachineIndependent/glslang_tab.cpp" break; - case 136: -#line 1210 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 136: /* interpolation_qualifier: EXPLICITINTERPAMD */ +#line 1224 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "__explicitInterpAMD"); parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.explicitInterp = true; } -#line 5611 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5976 "MachineIndependent/glslang_tab.cpp" break; - case 137: -#line 1217 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 137: /* interpolation_qualifier: PERVERTEXNV */ +#line 1231 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "pervertexNV"); parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); @@ -5620,12 +5985,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.pervertexNV = true; } -#line 5624 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 5989 "MachineIndependent/glslang_tab.cpp" break; - case 138: -#line 1225 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 138: /* interpolation_qualifier: PERPRIMITIVENV */ +#line 1239 "MachineIndependent/glslang.y" + { // No need for profile version or extension check. Shader stage already checks both. parseContext.globalCheck((yyvsp[0].lex).loc, "perprimitiveNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); @@ -5635,109 +6000,109 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.perPrimitiveNV = true; } -#line 5639 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6004 "MachineIndependent/glslang_tab.cpp" break; - case 139: -#line 1235 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 139: /* interpolation_qualifier: PERVIEWNV */ +#line 1249 "MachineIndependent/glslang.y" + { // No need for profile version or extension check. Shader stage already checks both. parseContext.globalCheck((yyvsp[0].lex).loc, "perviewNV"); parseContext.requireStage((yyvsp[0].lex).loc, EShLangMeshNV, "perviewNV"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.perViewNV = true; } -#line 5651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6016 "MachineIndependent/glslang_tab.cpp" break; - case 140: -#line 1242 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 140: /* interpolation_qualifier: PERTASKNV */ +#line 1256 "MachineIndependent/glslang.y" + { // No need for profile version or extension check. Shader stage already checks both. parseContext.globalCheck((yyvsp[0].lex).loc, "taskNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.perTaskNV = true; } -#line 5663 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6028 "MachineIndependent/glslang_tab.cpp" break; - case 141: -#line 1253 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 141: /* layout_qualifier: LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN */ +#line 1267 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[-1].interm.type); } -#line 5671 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6036 "MachineIndependent/glslang_tab.cpp" break; - case 142: -#line 1259 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 142: /* layout_qualifier_id_list: layout_qualifier_id */ +#line 1273 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5679 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6044 "MachineIndependent/glslang_tab.cpp" break; - case 143: -#line 1262 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 143: /* layout_qualifier_id_list: layout_qualifier_id_list COMMA layout_qualifier_id */ +#line 1276 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[-2].interm.type); (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); parseContext.mergeObjectLayoutQualifiers((yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); } -#line 5689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6054 "MachineIndependent/glslang_tab.cpp" break; - case 144: -#line 1269 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 144: /* layout_qualifier_id: IDENTIFIER */ +#line 1283 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), *(yyvsp[0].lex).string); } -#line 5698 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6063 "MachineIndependent/glslang_tab.cpp" break; - case 145: -#line 1273 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 145: /* layout_qualifier_id: IDENTIFIER EQUAL constant_expression */ +#line 1287 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[-2].lex).loc); parseContext.setLayoutQualifier((yyvsp[-2].lex).loc, (yyval.interm.type), *(yyvsp[-2].lex).string, (yyvsp[0].interm.intermTypedNode)); } -#line 5707 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6072 "MachineIndependent/glslang_tab.cpp" break; - case 146: -#line 1277 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { // because "shared" is both an identifier and a keyword + case 146: /* layout_qualifier_id: SHARED */ +#line 1291 "MachineIndependent/glslang.y" + { // because "shared" is both an identifier and a keyword (yyval.interm.type).init((yyvsp[0].lex).loc); TString strShared("shared"); parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), strShared); } -#line 5717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6082 "MachineIndependent/glslang_tab.cpp" break; - case 147: -#line 1286 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 147: /* precise_qualifier: PRECISE */ +#line 1300 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyval.interm.type).loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.noContraction = true; } -#line 5728 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6093 "MachineIndependent/glslang_tab.cpp" break; - case 148: -#line 1296 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 148: /* type_qualifier: single_type_qualifier */ +#line 1310 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5736 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6101 "MachineIndependent/glslang_tab.cpp" break; - case 149: -#line 1299 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 149: /* type_qualifier: type_qualifier single_type_qualifier */ +#line 1313 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[-1].interm.type); if ((yyval.interm.type).basicType == EbtVoid) (yyval.interm.type).basicType = (yyvsp[0].interm.type).basicType; @@ -5745,135 +6110,135 @@ yyreduce: (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); parseContext.mergeQualifiers((yyval.interm.type).loc, (yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); } -#line 5749 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6114 "MachineIndependent/glslang_tab.cpp" break; - case 150: -#line 1310 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 150: /* single_type_qualifier: storage_qualifier */ +#line 1324 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5757 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6122 "MachineIndependent/glslang_tab.cpp" break; - case 151: -#line 1313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 151: /* single_type_qualifier: layout_qualifier */ +#line 1327 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5765 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6130 "MachineIndependent/glslang_tab.cpp" break; - case 152: -#line 1316 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 152: /* single_type_qualifier: precision_qualifier */ +#line 1330 "MachineIndependent/glslang.y" + { parseContext.checkPrecisionQualifier((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier.precision); (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5774 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6139 "MachineIndependent/glslang_tab.cpp" break; - case 153: -#line 1320 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 153: /* single_type_qualifier: interpolation_qualifier */ +#line 1334 "MachineIndependent/glslang.y" + { // allow inheritance of storage qualifier from block declaration (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5783 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6148 "MachineIndependent/glslang_tab.cpp" break; - case 154: -#line 1324 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 154: /* single_type_qualifier: invariant_qualifier */ +#line 1338 "MachineIndependent/glslang.y" + { // allow inheritance of storage qualifier from block declaration (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5792 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6157 "MachineIndependent/glslang_tab.cpp" break; - case 155: -#line 1329 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 155: /* single_type_qualifier: precise_qualifier */ +#line 1343 "MachineIndependent/glslang.y" + { // allow inheritance of storage qualifier from block declaration (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5801 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6166 "MachineIndependent/glslang_tab.cpp" break; - case 156: -#line 1333 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 156: /* single_type_qualifier: non_uniform_qualifier */ +#line 1347 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); } -#line 5809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6174 "MachineIndependent/glslang_tab.cpp" break; - case 157: -#line 1340 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 157: /* storage_qualifier: CONST */ +#line 1354 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant } -#line 5818 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6183 "MachineIndependent/glslang_tab.cpp" break; - case 158: -#line 1344 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 158: /* storage_qualifier: INOUT */ +#line 1358 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "inout"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqInOut; } -#line 5828 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6193 "MachineIndependent/glslang_tab.cpp" break; - case 159: -#line 1349 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 159: /* storage_qualifier: IN */ +#line 1363 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "in"); (yyval.interm.type).init((yyvsp[0].lex).loc); // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later (yyval.interm.type).qualifier.storage = EvqIn; } -#line 5839 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6204 "MachineIndependent/glslang_tab.cpp" break; - case 160: -#line 1355 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 160: /* storage_qualifier: OUT */ +#line 1369 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "out"); (yyval.interm.type).init((yyvsp[0].lex).loc); // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later (yyval.interm.type).qualifier.storage = EvqOut; } -#line 5850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6215 "MachineIndependent/glslang_tab.cpp" break; - case 161: -#line 1361 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 161: /* storage_qualifier: CENTROID */ +#line 1375 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 120, 0, "centroid"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "centroid"); parseContext.globalCheck((yyvsp[0].lex).loc, "centroid"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.centroid = true; } -#line 5862 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6227 "MachineIndependent/glslang_tab.cpp" break; - case 162: -#line 1368 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 162: /* storage_qualifier: UNIFORM */ +#line 1382 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "uniform"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqUniform; } -#line 5872 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6237 "MachineIndependent/glslang_tab.cpp" break; - case 163: -#line 1373 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 163: /* storage_qualifier: SHARED */ +#line 1387 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "shared"); parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 310, 0, "shared"); @@ -5881,22 +6246,22 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqShared; } -#line 5885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6250 "MachineIndependent/glslang_tab.cpp" break; - case 164: -#line 1381 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 164: /* storage_qualifier: BUFFER */ +#line 1395 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "buffer"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqBuffer; } -#line 5895 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6260 "MachineIndependent/glslang_tab.cpp" break; - case 165: -#line 1387 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 165: /* storage_qualifier: ATTRIBUTE */ +#line 1401 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangVertex, "attribute"); parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "attribute"); parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "attribute"); @@ -5908,12 +6273,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqVaryingIn; } -#line 5912 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6277 "MachineIndependent/glslang_tab.cpp" break; - case 166: -#line 1399 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 166: /* storage_qualifier: VARYING */ +#line 1413 "MachineIndependent/glslang.y" + { parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "varying"); parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "varying"); parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "varying"); @@ -5927,33 +6292,33 @@ yyreduce: else (yyval.interm.type).qualifier.storage = EvqVaryingIn; } -#line 5931 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6296 "MachineIndependent/glslang_tab.cpp" break; - case 167: -#line 1413 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 167: /* storage_qualifier: PATCH */ +#line 1427 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "patch"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.patch = true; } -#line 5942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6307 "MachineIndependent/glslang_tab.cpp" break; - case 168: -#line 1419 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 168: /* storage_qualifier: SAMPLE */ +#line 1433 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "sample"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.sample = true; } -#line 5952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6317 "MachineIndependent/glslang_tab.cpp" break; - case 169: -#line 1424 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 169: /* storage_qualifier: HITATTRNV */ +#line 1438 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask | EShLangAnyHitMask), "hitAttributeNV"); @@ -5961,12 +6326,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqHitAttr; } -#line 5965 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6330 "MachineIndependent/glslang_tab.cpp" break; - case 170: -#line 1432 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 170: /* storage_qualifier: HITATTREXT */ +#line 1446 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeEXT"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask | EShLangAnyHitMask), "hitAttributeEXT"); @@ -5974,12 +6339,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqHitAttr; } -#line 5978 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6343 "MachineIndependent/glslang_tab.cpp" break; - case 171: -#line 1440 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 171: /* storage_qualifier: PAYLOADNV */ +#line 1454 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); @@ -5987,12 +6352,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqPayload; } -#line 5991 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6356 "MachineIndependent/glslang_tab.cpp" break; - case 172: -#line 1448 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 172: /* storage_qualifier: PAYLOADEXT */ +#line 1462 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadEXT"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); @@ -6000,12 +6365,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqPayload; } -#line 6004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6369 "MachineIndependent/glslang_tab.cpp" break; - case 173: -#line 1456 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 173: /* storage_qualifier: PAYLOADINNV */ +#line 1470 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); @@ -6013,12 +6378,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqPayloadIn; } -#line 6017 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6382 "MachineIndependent/glslang_tab.cpp" break; - case 174: -#line 1464 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 174: /* storage_qualifier: PAYLOADINEXT */ +#line 1478 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInEXT"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); @@ -6026,12 +6391,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqPayloadIn; } -#line 6030 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6395 "MachineIndependent/glslang_tab.cpp" break; - case 175: -#line 1472 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 175: /* storage_qualifier: CALLDATANV */ +#line 1486 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); @@ -6039,12 +6404,12 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqCallableData; } -#line 6043 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6408 "MachineIndependent/glslang_tab.cpp" break; - case 176: -#line 1480 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 176: /* storage_qualifier: CALLDATAEXT */ +#line 1494 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataEXT"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); @@ -6052,222 +6417,222 @@ yyreduce: (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqCallableData; } -#line 6056 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6421 "MachineIndependent/glslang_tab.cpp" break; - case 177: -#line 1488 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 177: /* storage_qualifier: CALLDATAINNV */ +#line 1502 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInNV"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqCallableDataIn; } -#line 6068 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6433 "MachineIndependent/glslang_tab.cpp" break; - case 178: -#line 1495 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 178: /* storage_qualifier: CALLDATAINEXT */ +#line 1509 "MachineIndependent/glslang.y" + { parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInEXT"); parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.storage = EvqCallableDataIn; } -#line 6080 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6445 "MachineIndependent/glslang_tab.cpp" break; - case 179: -#line 1502 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 179: /* storage_qualifier: COHERENT */ +#line 1516 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.coherent = true; } -#line 6089 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6454 "MachineIndependent/glslang_tab.cpp" break; - case 180: -#line 1506 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 180: /* storage_qualifier: DEVICECOHERENT */ +#line 1520 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); (yyval.interm.type).qualifier.devicecoherent = true; } -#line 6099 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6464 "MachineIndependent/glslang_tab.cpp" break; - case 181: -#line 1511 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 181: /* storage_qualifier: QUEUEFAMILYCOHERENT */ +#line 1525 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); (yyval.interm.type).qualifier.queuefamilycoherent = true; } -#line 6109 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6474 "MachineIndependent/glslang_tab.cpp" break; - case 182: -#line 1516 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 182: /* storage_qualifier: WORKGROUPCOHERENT */ +#line 1530 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); (yyval.interm.type).qualifier.workgroupcoherent = true; } -#line 6119 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6484 "MachineIndependent/glslang_tab.cpp" break; - case 183: -#line 1521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 183: /* storage_qualifier: SUBGROUPCOHERENT */ +#line 1535 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); (yyval.interm.type).qualifier.subgroupcoherent = true; } -#line 6129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6494 "MachineIndependent/glslang_tab.cpp" break; - case 184: -#line 1526 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 184: /* storage_qualifier: NONPRIVATE */ +#line 1540 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); (yyval.interm.type).qualifier.nonprivate = true; } -#line 6139 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6504 "MachineIndependent/glslang_tab.cpp" break; - case 185: -#line 1531 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 185: /* storage_qualifier: SHADERCALLCOHERENT */ +#line 1545 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); (yyval.interm.type).qualifier.shadercallcoherent = true; } -#line 6149 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6514 "MachineIndependent/glslang_tab.cpp" break; - case 186: -#line 1536 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 186: /* storage_qualifier: VOLATILE */ +#line 1550 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.volatil = true; } -#line 6158 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6523 "MachineIndependent/glslang_tab.cpp" break; - case 187: -#line 1540 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 187: /* storage_qualifier: RESTRICT */ +#line 1554 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.restrict = true; } -#line 6167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6532 "MachineIndependent/glslang_tab.cpp" break; - case 188: -#line 1544 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 188: /* storage_qualifier: READONLY */ +#line 1558 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.readonly = true; } -#line 6176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6541 "MachineIndependent/glslang_tab.cpp" break; - case 189: -#line 1548 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 189: /* storage_qualifier: WRITEONLY */ +#line 1562 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.writeonly = true; } -#line 6185 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6550 "MachineIndependent/glslang_tab.cpp" break; - case 190: -#line 1552 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 190: /* storage_qualifier: SUBROUTINE */ +#line 1566 "MachineIndependent/glslang.y" + { parseContext.spvRemoved((yyvsp[0].lex).loc, "subroutine"); parseContext.globalCheck((yyvsp[0].lex).loc, "subroutine"); parseContext.unimplemented((yyvsp[0].lex).loc, "subroutine"); (yyval.interm.type).init((yyvsp[0].lex).loc); } -#line 6196 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6561 "MachineIndependent/glslang_tab.cpp" break; - case 191: -#line 1558 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 191: /* storage_qualifier: SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN */ +#line 1572 "MachineIndependent/glslang.y" + { parseContext.spvRemoved((yyvsp[-3].lex).loc, "subroutine"); parseContext.globalCheck((yyvsp[-3].lex).loc, "subroutine"); parseContext.unimplemented((yyvsp[-3].lex).loc, "subroutine"); (yyval.interm.type).init((yyvsp[-3].lex).loc); } -#line 6207 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6572 "MachineIndependent/glslang_tab.cpp" break; - case 192: -#line 1569 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 192: /* non_uniform_qualifier: NONUNIFORM */ +#line 1583 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc); (yyval.interm.type).qualifier.nonUniform = true; } -#line 6216 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6581 "MachineIndependent/glslang_tab.cpp" break; - case 193: -#line 1576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 193: /* type_name_list: IDENTIFIER */ +#line 1590 "MachineIndependent/glslang.y" + { // TODO } -#line 6224 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6589 "MachineIndependent/glslang_tab.cpp" break; - case 194: -#line 1579 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 194: /* type_name_list: type_name_list COMMA IDENTIFIER */ +#line 1593 "MachineIndependent/glslang.y" + { // TODO: 4.0 semantics: subroutines // 1) make sure each identifier is a type declared earlier with SUBROUTINE // 2) save all of the identifiers for future comparison with the declared function } -#line 6234 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6599 "MachineIndependent/glslang_tab.cpp" break; - case 195: -#line 1588 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 195: /* type_specifier: type_specifier_nonarray type_parameter_specifier_opt */ +#line 1602 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[-1].interm.type); (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); (yyval.interm.type).typeParameters = (yyvsp[0].interm.typeParameters); } -#line 6244 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6609 "MachineIndependent/glslang_tab.cpp" break; - case 196: -#line 1593 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 196: /* type_specifier: type_specifier_nonarray type_parameter_specifier_opt array_specifier */ +#line 1607 "MachineIndependent/glslang.y" + { parseContext.arrayOfArrayVersionCheck((yyvsp[0].interm).loc, (yyvsp[0].interm).arraySizes); (yyval.interm.type) = (yyvsp[-2].interm.type); (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); (yyval.interm.type).typeParameters = (yyvsp[-1].interm.typeParameters); (yyval.interm.type).arraySizes = (yyvsp[0].interm).arraySizes; } -#line 6256 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6621 "MachineIndependent/glslang_tab.cpp" break; - case 197: -#line 1603 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 197: /* array_specifier: LEFT_BRACKET RIGHT_BRACKET */ +#line 1617 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[-1].lex).loc; (yyval.interm).arraySizes = new TArraySizes; (yyval.interm).arraySizes->addInnerSize(); } -#line 6266 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6631 "MachineIndependent/glslang_tab.cpp" break; - case 198: -#line 1608 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 198: /* array_specifier: LEFT_BRACKET conditional_expression RIGHT_BRACKET */ +#line 1622 "MachineIndependent/glslang.y" + { (yyval.interm).loc = (yyvsp[-2].lex).loc; (yyval.interm).arraySizes = new TArraySizes; @@ -6275,492 +6640,492 @@ yyreduce: parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); (yyval.interm).arraySizes->addInnerSize(size); } -#line 6279 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6644 "MachineIndependent/glslang_tab.cpp" break; - case 199: -#line 1616 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 199: /* array_specifier: array_specifier LEFT_BRACKET RIGHT_BRACKET */ +#line 1630 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-2].interm); (yyval.interm).arraySizes->addInnerSize(); } -#line 6288 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6653 "MachineIndependent/glslang_tab.cpp" break; - case 200: -#line 1620 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 200: /* array_specifier: array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET */ +#line 1634 "MachineIndependent/glslang.y" + { (yyval.interm) = (yyvsp[-3].interm); TArraySize size; parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); (yyval.interm).arraySizes->addInnerSize(size); } -#line 6300 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6665 "MachineIndependent/glslang_tab.cpp" break; - case 201: -#line 1630 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 201: /* type_parameter_specifier_opt: type_parameter_specifier */ +#line 1644 "MachineIndependent/glslang.y" + { (yyval.interm.typeParameters) = (yyvsp[0].interm.typeParameters); } -#line 6308 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6673 "MachineIndependent/glslang_tab.cpp" break; - case 202: -#line 1633 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 202: /* type_parameter_specifier_opt: %empty */ +#line 1647 "MachineIndependent/glslang.y" + { (yyval.interm.typeParameters) = 0; } -#line 6316 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6681 "MachineIndependent/glslang_tab.cpp" break; - case 203: -#line 1639 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 203: /* type_parameter_specifier: LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE */ +#line 1653 "MachineIndependent/glslang.y" + { (yyval.interm.typeParameters) = (yyvsp[-1].interm.typeParameters); } -#line 6324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6689 "MachineIndependent/glslang_tab.cpp" break; - case 204: -#line 1645 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 204: /* type_parameter_specifier_list: unary_expression */ +#line 1659 "MachineIndependent/glslang.y" + { (yyval.interm.typeParameters) = new TArraySizes; TArraySize size; parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); (yyval.interm.typeParameters)->addInnerSize(size); } -#line 6336 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6701 "MachineIndependent/glslang_tab.cpp" break; - case 205: -#line 1652 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 205: /* type_parameter_specifier_list: type_parameter_specifier_list COMMA unary_expression */ +#line 1666 "MachineIndependent/glslang.y" + { (yyval.interm.typeParameters) = (yyvsp[-2].interm.typeParameters); TArraySize size; parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); (yyval.interm.typeParameters)->addInnerSize(size); } -#line 6348 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6713 "MachineIndependent/glslang_tab.cpp" break; - case 206: -#line 1662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 206: /* type_specifier_nonarray: VOID */ +#line 1676 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtVoid; } -#line 6357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6722 "MachineIndependent/glslang_tab.cpp" break; - case 207: -#line 1666 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 207: /* type_specifier_nonarray: FLOAT */ +#line 1680 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; } -#line 6366 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6731 "MachineIndependent/glslang_tab.cpp" break; - case 208: -#line 1670 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 208: /* type_specifier_nonarray: INT */ +#line 1684 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; } -#line 6375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6740 "MachineIndependent/glslang_tab.cpp" break; - case 209: -#line 1674 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 209: /* type_specifier_nonarray: UINT */ +#line 1688 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; } -#line 6385 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6750 "MachineIndependent/glslang_tab.cpp" break; - case 210: -#line 1679 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 210: /* type_specifier_nonarray: BOOL */ +#line 1693 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtBool; } -#line 6394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6759 "MachineIndependent/glslang_tab.cpp" break; - case 211: -#line 1683 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 211: /* type_specifier_nonarray: VEC2 */ +#line 1697 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(2); } -#line 6404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6769 "MachineIndependent/glslang_tab.cpp" break; - case 212: -#line 1688 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 212: /* type_specifier_nonarray: VEC3 */ +#line 1702 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(3); } -#line 6414 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6779 "MachineIndependent/glslang_tab.cpp" break; - case 213: -#line 1693 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 213: /* type_specifier_nonarray: VEC4 */ +#line 1707 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(4); } -#line 6424 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6789 "MachineIndependent/glslang_tab.cpp" break; - case 214: -#line 1698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 214: /* type_specifier_nonarray: BVEC2 */ +#line 1712 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtBool; (yyval.interm.type).setVector(2); } -#line 6434 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6799 "MachineIndependent/glslang_tab.cpp" break; - case 215: -#line 1703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 215: /* type_specifier_nonarray: BVEC3 */ +#line 1717 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtBool; (yyval.interm.type).setVector(3); } -#line 6444 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6809 "MachineIndependent/glslang_tab.cpp" break; - case 216: -#line 1708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 216: /* type_specifier_nonarray: BVEC4 */ +#line 1722 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtBool; (yyval.interm.type).setVector(4); } -#line 6454 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6819 "MachineIndependent/glslang_tab.cpp" break; - case 217: -#line 1713 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 217: /* type_specifier_nonarray: IVEC2 */ +#line 1727 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(2); } -#line 6464 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6829 "MachineIndependent/glslang_tab.cpp" break; - case 218: -#line 1718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 218: /* type_specifier_nonarray: IVEC3 */ +#line 1732 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(3); } -#line 6474 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6839 "MachineIndependent/glslang_tab.cpp" break; - case 219: -#line 1723 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 219: /* type_specifier_nonarray: IVEC4 */ +#line 1737 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(4); } -#line 6484 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6849 "MachineIndependent/glslang_tab.cpp" break; - case 220: -#line 1728 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 220: /* type_specifier_nonarray: UVEC2 */ +#line 1742 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(2); } -#line 6495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6860 "MachineIndependent/glslang_tab.cpp" break; - case 221: -#line 1734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 221: /* type_specifier_nonarray: UVEC3 */ +#line 1748 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(3); } -#line 6506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6871 "MachineIndependent/glslang_tab.cpp" break; - case 222: -#line 1740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 222: /* type_specifier_nonarray: UVEC4 */ +#line 1754 "MachineIndependent/glslang.y" + { parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(4); } -#line 6517 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6882 "MachineIndependent/glslang_tab.cpp" break; - case 223: -#line 1746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 223: /* type_specifier_nonarray: MAT2 */ +#line 1760 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 2); } -#line 6527 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6892 "MachineIndependent/glslang_tab.cpp" break; - case 224: -#line 1751 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 224: /* type_specifier_nonarray: MAT3 */ +#line 1765 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 3); } -#line 6537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6902 "MachineIndependent/glslang_tab.cpp" break; - case 225: -#line 1756 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 225: /* type_specifier_nonarray: MAT4 */ +#line 1770 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 4); } -#line 6547 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6912 "MachineIndependent/glslang_tab.cpp" break; - case 226: -#line 1761 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 226: /* type_specifier_nonarray: MAT2X2 */ +#line 1775 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 2); } -#line 6557 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6922 "MachineIndependent/glslang_tab.cpp" break; - case 227: -#line 1766 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 227: /* type_specifier_nonarray: MAT2X3 */ +#line 1780 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 3); } -#line 6567 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6932 "MachineIndependent/glslang_tab.cpp" break; - case 228: -#line 1771 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 228: /* type_specifier_nonarray: MAT2X4 */ +#line 1785 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 4); } -#line 6577 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6942 "MachineIndependent/glslang_tab.cpp" break; - case 229: -#line 1776 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 229: /* type_specifier_nonarray: MAT3X2 */ +#line 1790 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 2); } -#line 6587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6952 "MachineIndependent/glslang_tab.cpp" break; - case 230: -#line 1781 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 230: /* type_specifier_nonarray: MAT3X3 */ +#line 1795 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 3); } -#line 6597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6962 "MachineIndependent/glslang_tab.cpp" break; - case 231: -#line 1786 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 231: /* type_specifier_nonarray: MAT3X4 */ +#line 1800 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 4); } -#line 6607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6972 "MachineIndependent/glslang_tab.cpp" break; - case 232: -#line 1791 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 232: /* type_specifier_nonarray: MAT4X2 */ +#line 1805 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 2); } -#line 6617 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6982 "MachineIndependent/glslang_tab.cpp" break; - case 233: -#line 1796 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 233: /* type_specifier_nonarray: MAT4X3 */ +#line 1810 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 3); } -#line 6627 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 6992 "MachineIndependent/glslang_tab.cpp" break; - case 234: -#line 1801 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 234: /* type_specifier_nonarray: MAT4X4 */ +#line 1815 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 4); } -#line 6637 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7002 "MachineIndependent/glslang_tab.cpp" break; - case 235: -#line 1807 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 235: /* type_specifier_nonarray: DOUBLE */ +#line 1821 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; } -#line 6649 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7014 "MachineIndependent/glslang_tab.cpp" break; - case 236: -#line 1814 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 236: /* type_specifier_nonarray: FLOAT16_T */ +#line 1828 "MachineIndependent/glslang.y" + { parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; } -#line 6659 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7024 "MachineIndependent/glslang_tab.cpp" break; - case 237: -#line 1819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 237: /* type_specifier_nonarray: FLOAT32_T */ +#line 1833 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; } -#line 6669 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7034 "MachineIndependent/glslang_tab.cpp" break; - case 238: -#line 1824 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 238: /* type_specifier_nonarray: FLOAT64_T */ +#line 1838 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; } -#line 6679 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7044 "MachineIndependent/glslang_tab.cpp" break; - case 239: -#line 1829 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 239: /* type_specifier_nonarray: INT8_T */ +#line 1843 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt8; } -#line 6689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7054 "MachineIndependent/glslang_tab.cpp" break; - case 240: -#line 1834 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 240: /* type_specifier_nonarray: UINT8_T */ +#line 1848 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint8; } -#line 6699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7064 "MachineIndependent/glslang_tab.cpp" break; - case 241: -#line 1839 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 241: /* type_specifier_nonarray: INT16_T */ +#line 1853 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt16; } -#line 6709 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7074 "MachineIndependent/glslang_tab.cpp" break; - case 242: -#line 1844 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 242: /* type_specifier_nonarray: UINT16_T */ +#line 1858 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint16; } -#line 6719 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7084 "MachineIndependent/glslang_tab.cpp" break; - case 243: -#line 1849 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 243: /* type_specifier_nonarray: INT32_T */ +#line 1863 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; } -#line 6729 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7094 "MachineIndependent/glslang_tab.cpp" break; - case 244: -#line 1854 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 244: /* type_specifier_nonarray: UINT32_T */ +#line 1868 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; } -#line 6739 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7104 "MachineIndependent/glslang_tab.cpp" break; - case 245: -#line 1859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 245: /* type_specifier_nonarray: INT64_T */ +#line 1873 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt64; } -#line 6749 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7114 "MachineIndependent/glslang_tab.cpp" break; - case 246: -#line 1864 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 246: /* type_specifier_nonarray: UINT64_T */ +#line 1878 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint64; } -#line 6759 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7124 "MachineIndependent/glslang_tab.cpp" break; - case 247: -#line 1869 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 247: /* type_specifier_nonarray: DVEC2 */ +#line 1883 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); @@ -6768,12 +7133,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(2); } -#line 6772 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7137 "MachineIndependent/glslang_tab.cpp" break; - case 248: -#line 1877 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 248: /* type_specifier_nonarray: DVEC3 */ +#line 1891 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); @@ -6781,12 +7146,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(3); } -#line 6785 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7150 "MachineIndependent/glslang_tab.cpp" break; - case 249: -#line 1885 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 249: /* type_specifier_nonarray: DVEC4 */ +#line 1899 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); @@ -6794,375 +7159,375 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(4); } -#line 6798 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7163 "MachineIndependent/glslang_tab.cpp" break; - case 250: -#line 1893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 250: /* type_specifier_nonarray: F16VEC2 */ +#line 1907 "MachineIndependent/glslang.y" + { parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setVector(2); } -#line 6809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7174 "MachineIndependent/glslang_tab.cpp" break; - case 251: -#line 1899 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 251: /* type_specifier_nonarray: F16VEC3 */ +#line 1913 "MachineIndependent/glslang.y" + { parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setVector(3); } -#line 6820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7185 "MachineIndependent/glslang_tab.cpp" break; - case 252: -#line 1905 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 252: /* type_specifier_nonarray: F16VEC4 */ +#line 1919 "MachineIndependent/glslang.y" + { parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setVector(4); } -#line 6831 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7196 "MachineIndependent/glslang_tab.cpp" break; - case 253: -#line 1911 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 253: /* type_specifier_nonarray: F32VEC2 */ +#line 1925 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(2); } -#line 6842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7207 "MachineIndependent/glslang_tab.cpp" break; - case 254: -#line 1917 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 254: /* type_specifier_nonarray: F32VEC3 */ +#line 1931 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(3); } -#line 6853 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7218 "MachineIndependent/glslang_tab.cpp" break; - case 255: -#line 1923 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 255: /* type_specifier_nonarray: F32VEC4 */ +#line 1937 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setVector(4); } -#line 6864 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7229 "MachineIndependent/glslang_tab.cpp" break; - case 256: -#line 1929 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 256: /* type_specifier_nonarray: F64VEC2 */ +#line 1943 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(2); } -#line 6875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7240 "MachineIndependent/glslang_tab.cpp" break; - case 257: -#line 1935 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 257: /* type_specifier_nonarray: F64VEC3 */ +#line 1949 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(3); } -#line 6886 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7251 "MachineIndependent/glslang_tab.cpp" break; - case 258: -#line 1941 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 258: /* type_specifier_nonarray: F64VEC4 */ +#line 1955 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setVector(4); } -#line 6897 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7262 "MachineIndependent/glslang_tab.cpp" break; - case 259: -#line 1947 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 259: /* type_specifier_nonarray: I8VEC2 */ +#line 1961 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt8; (yyval.interm.type).setVector(2); } -#line 6908 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7273 "MachineIndependent/glslang_tab.cpp" break; - case 260: -#line 1953 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 260: /* type_specifier_nonarray: I8VEC3 */ +#line 1967 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt8; (yyval.interm.type).setVector(3); } -#line 6919 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7284 "MachineIndependent/glslang_tab.cpp" break; - case 261: -#line 1959 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 261: /* type_specifier_nonarray: I8VEC4 */ +#line 1973 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt8; (yyval.interm.type).setVector(4); } -#line 6930 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7295 "MachineIndependent/glslang_tab.cpp" break; - case 262: -#line 1965 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 262: /* type_specifier_nonarray: I16VEC2 */ +#line 1979 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt16; (yyval.interm.type).setVector(2); } -#line 6941 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7306 "MachineIndependent/glslang_tab.cpp" break; - case 263: -#line 1971 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 263: /* type_specifier_nonarray: I16VEC3 */ +#line 1985 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt16; (yyval.interm.type).setVector(3); } -#line 6952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7317 "MachineIndependent/glslang_tab.cpp" break; - case 264: -#line 1977 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 264: /* type_specifier_nonarray: I16VEC4 */ +#line 1991 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt16; (yyval.interm.type).setVector(4); } -#line 6963 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7328 "MachineIndependent/glslang_tab.cpp" break; - case 265: -#line 1983 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 265: /* type_specifier_nonarray: I32VEC2 */ +#line 1997 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(2); } -#line 6974 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7339 "MachineIndependent/glslang_tab.cpp" break; - case 266: -#line 1989 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 266: /* type_specifier_nonarray: I32VEC3 */ +#line 2003 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(3); } -#line 6985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7350 "MachineIndependent/glslang_tab.cpp" break; - case 267: -#line 1995 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 267: /* type_specifier_nonarray: I32VEC4 */ +#line 2009 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).setVector(4); } -#line 6996 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7361 "MachineIndependent/glslang_tab.cpp" break; - case 268: -#line 2001 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 268: /* type_specifier_nonarray: I64VEC2 */ +#line 2015 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt64; (yyval.interm.type).setVector(2); } -#line 7007 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7372 "MachineIndependent/glslang_tab.cpp" break; - case 269: -#line 2007 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 269: /* type_specifier_nonarray: I64VEC3 */ +#line 2021 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt64; (yyval.interm.type).setVector(3); } -#line 7018 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7383 "MachineIndependent/glslang_tab.cpp" break; - case 270: -#line 2013 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 270: /* type_specifier_nonarray: I64VEC4 */ +#line 2027 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt64; (yyval.interm.type).setVector(4); } -#line 7029 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7394 "MachineIndependent/glslang_tab.cpp" break; - case 271: -#line 2019 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 271: /* type_specifier_nonarray: U8VEC2 */ +#line 2033 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint8; (yyval.interm.type).setVector(2); } -#line 7040 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7405 "MachineIndependent/glslang_tab.cpp" break; - case 272: -#line 2025 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 272: /* type_specifier_nonarray: U8VEC3 */ +#line 2039 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint8; (yyval.interm.type).setVector(3); } -#line 7051 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7416 "MachineIndependent/glslang_tab.cpp" break; - case 273: -#line 2031 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 273: /* type_specifier_nonarray: U8VEC4 */ +#line 2045 "MachineIndependent/glslang.y" + { parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint8; (yyval.interm.type).setVector(4); } -#line 7062 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7427 "MachineIndependent/glslang_tab.cpp" break; - case 274: -#line 2037 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 274: /* type_specifier_nonarray: U16VEC2 */ +#line 2051 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint16; (yyval.interm.type).setVector(2); } -#line 7073 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7438 "MachineIndependent/glslang_tab.cpp" break; - case 275: -#line 2043 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 275: /* type_specifier_nonarray: U16VEC3 */ +#line 2057 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint16; (yyval.interm.type).setVector(3); } -#line 7084 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7449 "MachineIndependent/glslang_tab.cpp" break; - case 276: -#line 2049 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 276: /* type_specifier_nonarray: U16VEC4 */ +#line 2063 "MachineIndependent/glslang.y" + { parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint16; (yyval.interm.type).setVector(4); } -#line 7095 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7460 "MachineIndependent/glslang_tab.cpp" break; - case 277: -#line 2055 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 277: /* type_specifier_nonarray: U32VEC2 */ +#line 2069 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(2); } -#line 7106 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7471 "MachineIndependent/glslang_tab.cpp" break; - case 278: -#line 2061 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 278: /* type_specifier_nonarray: U32VEC3 */ +#line 2075 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(3); } -#line 7117 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7482 "MachineIndependent/glslang_tab.cpp" break; - case 279: -#line 2067 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 279: /* type_specifier_nonarray: U32VEC4 */ +#line 2081 "MachineIndependent/glslang.y" + { parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).setVector(4); } -#line 7128 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7493 "MachineIndependent/glslang_tab.cpp" break; - case 280: -#line 2073 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 280: /* type_specifier_nonarray: U64VEC2 */ +#line 2087 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint64; (yyval.interm.type).setVector(2); } -#line 7139 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7504 "MachineIndependent/glslang_tab.cpp" break; - case 281: -#line 2079 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 281: /* type_specifier_nonarray: U64VEC3 */ +#line 2093 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint64; (yyval.interm.type).setVector(3); } -#line 7150 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7515 "MachineIndependent/glslang_tab.cpp" break; - case 282: -#line 2085 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 282: /* type_specifier_nonarray: U64VEC4 */ +#line 2099 "MachineIndependent/glslang.y" + { parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint64; (yyval.interm.type).setVector(4); } -#line 7161 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7526 "MachineIndependent/glslang_tab.cpp" break; - case 283: -#line 2091 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 283: /* type_specifier_nonarray: DMAT2 */ +#line 2105 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7170,12 +7535,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 2); } -#line 7174 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7539 "MachineIndependent/glslang_tab.cpp" break; - case 284: -#line 2099 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 284: /* type_specifier_nonarray: DMAT3 */ +#line 2113 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7183,12 +7548,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 3); } -#line 7187 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7552 "MachineIndependent/glslang_tab.cpp" break; - case 285: -#line 2107 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 285: /* type_specifier_nonarray: DMAT4 */ +#line 2121 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7196,12 +7561,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 4); } -#line 7200 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7565 "MachineIndependent/glslang_tab.cpp" break; - case 286: -#line 2115 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 286: /* type_specifier_nonarray: DMAT2X2 */ +#line 2129 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7209,12 +7574,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 2); } -#line 7213 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7578 "MachineIndependent/glslang_tab.cpp" break; - case 287: -#line 2123 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 287: /* type_specifier_nonarray: DMAT2X3 */ +#line 2137 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7222,12 +7587,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 3); } -#line 7226 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7591 "MachineIndependent/glslang_tab.cpp" break; - case 288: -#line 2131 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 288: /* type_specifier_nonarray: DMAT2X4 */ +#line 2145 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7235,12 +7600,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 4); } -#line 7239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7604 "MachineIndependent/glslang_tab.cpp" break; - case 289: -#line 2139 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 289: /* type_specifier_nonarray: DMAT3X2 */ +#line 2153 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7248,12 +7613,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 2); } -#line 7252 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7617 "MachineIndependent/glslang_tab.cpp" break; - case 290: -#line 2147 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 290: /* type_specifier_nonarray: DMAT3X3 */ +#line 2161 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7261,12 +7626,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 3); } -#line 7265 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7630 "MachineIndependent/glslang_tab.cpp" break; - case 291: -#line 2155 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 291: /* type_specifier_nonarray: DMAT3X4 */ +#line 2169 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7274,12 +7639,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 4); } -#line 7278 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7643 "MachineIndependent/glslang_tab.cpp" break; - case 292: -#line 2163 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 292: /* type_specifier_nonarray: DMAT4X2 */ +#line 2177 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7287,12 +7652,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 2); } -#line 7291 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7656 "MachineIndependent/glslang_tab.cpp" break; - case 293: -#line 2171 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 293: /* type_specifier_nonarray: DMAT4X3 */ +#line 2185 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7300,12 +7665,12 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 3); } -#line 7304 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7669 "MachineIndependent/glslang_tab.cpp" break; - case 294: -#line 2179 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 294: /* type_specifier_nonarray: DMAT4X4 */ +#line 2193 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); if (! parseContext.symbolTable.atBuiltInLevel()) parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); @@ -7313,2120 +7678,2340 @@ yyreduce: (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 4); } -#line 7317 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7682 "MachineIndependent/glslang_tab.cpp" break; - case 295: -#line 2187 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 295: /* type_specifier_nonarray: F16MAT2 */ +#line 2201 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(2, 2); } -#line 7328 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7693 "MachineIndependent/glslang_tab.cpp" break; - case 296: -#line 2193 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 296: /* type_specifier_nonarray: F16MAT3 */ +#line 2207 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(3, 3); } -#line 7339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7704 "MachineIndependent/glslang_tab.cpp" break; - case 297: -#line 2199 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 297: /* type_specifier_nonarray: F16MAT4 */ +#line 2213 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(4, 4); } -#line 7350 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7715 "MachineIndependent/glslang_tab.cpp" break; - case 298: -#line 2205 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 298: /* type_specifier_nonarray: F16MAT2X2 */ +#line 2219 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(2, 2); } -#line 7361 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7726 "MachineIndependent/glslang_tab.cpp" break; - case 299: -#line 2211 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 299: /* type_specifier_nonarray: F16MAT2X3 */ +#line 2225 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(2, 3); } -#line 7372 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7737 "MachineIndependent/glslang_tab.cpp" break; - case 300: -#line 2217 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 300: /* type_specifier_nonarray: F16MAT2X4 */ +#line 2231 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(2, 4); } -#line 7383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7748 "MachineIndependent/glslang_tab.cpp" break; - case 301: -#line 2223 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 301: /* type_specifier_nonarray: F16MAT3X2 */ +#line 2237 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(3, 2); } -#line 7394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7759 "MachineIndependent/glslang_tab.cpp" break; - case 302: -#line 2229 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 302: /* type_specifier_nonarray: F16MAT3X3 */ +#line 2243 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(3, 3); } -#line 7405 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7770 "MachineIndependent/glslang_tab.cpp" break; - case 303: -#line 2235 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 303: /* type_specifier_nonarray: F16MAT3X4 */ +#line 2249 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(3, 4); } -#line 7416 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7781 "MachineIndependent/glslang_tab.cpp" break; - case 304: -#line 2241 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 304: /* type_specifier_nonarray: F16MAT4X2 */ +#line 2255 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(4, 2); } -#line 7427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7792 "MachineIndependent/glslang_tab.cpp" break; - case 305: -#line 2247 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 305: /* type_specifier_nonarray: F16MAT4X3 */ +#line 2261 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(4, 3); } -#line 7438 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7803 "MachineIndependent/glslang_tab.cpp" break; - case 306: -#line 2253 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 306: /* type_specifier_nonarray: F16MAT4X4 */ +#line 2267 "MachineIndependent/glslang.y" + { parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat16; (yyval.interm.type).setMatrix(4, 4); } -#line 7449 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7814 "MachineIndependent/glslang_tab.cpp" break; - case 307: -#line 2259 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 307: /* type_specifier_nonarray: F32MAT2 */ +#line 2273 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 2); } -#line 7460 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7825 "MachineIndependent/glslang_tab.cpp" break; - case 308: -#line 2265 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 308: /* type_specifier_nonarray: F32MAT3 */ +#line 2279 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 3); } -#line 7471 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7836 "MachineIndependent/glslang_tab.cpp" break; - case 309: -#line 2271 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 309: /* type_specifier_nonarray: F32MAT4 */ +#line 2285 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 4); } -#line 7482 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7847 "MachineIndependent/glslang_tab.cpp" break; - case 310: -#line 2277 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 310: /* type_specifier_nonarray: F32MAT2X2 */ +#line 2291 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 2); } -#line 7493 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7858 "MachineIndependent/glslang_tab.cpp" break; - case 311: -#line 2283 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 311: /* type_specifier_nonarray: F32MAT2X3 */ +#line 2297 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 3); } -#line 7504 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7869 "MachineIndependent/glslang_tab.cpp" break; - case 312: -#line 2289 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 312: /* type_specifier_nonarray: F32MAT2X4 */ +#line 2303 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(2, 4); } -#line 7515 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7880 "MachineIndependent/glslang_tab.cpp" break; - case 313: -#line 2295 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 313: /* type_specifier_nonarray: F32MAT3X2 */ +#line 2309 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 2); } -#line 7526 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7891 "MachineIndependent/glslang_tab.cpp" break; - case 314: -#line 2301 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 314: /* type_specifier_nonarray: F32MAT3X3 */ +#line 2315 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 3); } -#line 7537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7902 "MachineIndependent/glslang_tab.cpp" break; - case 315: -#line 2307 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 315: /* type_specifier_nonarray: F32MAT3X4 */ +#line 2321 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(3, 4); } -#line 7548 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7913 "MachineIndependent/glslang_tab.cpp" break; - case 316: -#line 2313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 316: /* type_specifier_nonarray: F32MAT4X2 */ +#line 2327 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 2); } -#line 7559 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7924 "MachineIndependent/glslang_tab.cpp" break; - case 317: -#line 2319 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 317: /* type_specifier_nonarray: F32MAT4X3 */ +#line 2333 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 3); } -#line 7570 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7935 "MachineIndependent/glslang_tab.cpp" break; - case 318: -#line 2325 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 318: /* type_specifier_nonarray: F32MAT4X4 */ +#line 2339 "MachineIndependent/glslang.y" + { parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).setMatrix(4, 4); } -#line 7581 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7946 "MachineIndependent/glslang_tab.cpp" break; - case 319: -#line 2331 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 319: /* type_specifier_nonarray: F64MAT2 */ +#line 2345 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 2); } -#line 7592 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7957 "MachineIndependent/glslang_tab.cpp" break; - case 320: -#line 2337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 320: /* type_specifier_nonarray: F64MAT3 */ +#line 2351 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 3); } -#line 7603 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7968 "MachineIndependent/glslang_tab.cpp" break; - case 321: -#line 2343 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 321: /* type_specifier_nonarray: F64MAT4 */ +#line 2357 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 4); } -#line 7614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7979 "MachineIndependent/glslang_tab.cpp" break; - case 322: -#line 2349 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 322: /* type_specifier_nonarray: F64MAT2X2 */ +#line 2363 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 2); } -#line 7625 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 7990 "MachineIndependent/glslang_tab.cpp" break; - case 323: -#line 2355 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 323: /* type_specifier_nonarray: F64MAT2X3 */ +#line 2369 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 3); } -#line 7636 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8001 "MachineIndependent/glslang_tab.cpp" break; - case 324: -#line 2361 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 324: /* type_specifier_nonarray: F64MAT2X4 */ +#line 2375 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(2, 4); } -#line 7647 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8012 "MachineIndependent/glslang_tab.cpp" break; - case 325: -#line 2367 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 325: /* type_specifier_nonarray: F64MAT3X2 */ +#line 2381 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 2); } -#line 7658 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8023 "MachineIndependent/glslang_tab.cpp" break; - case 326: -#line 2373 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 326: /* type_specifier_nonarray: F64MAT3X3 */ +#line 2387 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 3); } -#line 7669 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8034 "MachineIndependent/glslang_tab.cpp" break; - case 327: -#line 2379 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 327: /* type_specifier_nonarray: F64MAT3X4 */ +#line 2393 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(3, 4); } -#line 7680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8045 "MachineIndependent/glslang_tab.cpp" break; - case 328: -#line 2385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 328: /* type_specifier_nonarray: F64MAT4X2 */ +#line 2399 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 2); } -#line 7691 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8056 "MachineIndependent/glslang_tab.cpp" break; - case 329: -#line 2391 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 329: /* type_specifier_nonarray: F64MAT4X3 */ +#line 2405 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 3); } -#line 7702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8067 "MachineIndependent/glslang_tab.cpp" break; - case 330: -#line 2397 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 330: /* type_specifier_nonarray: F64MAT4X4 */ +#line 2411 "MachineIndependent/glslang.y" + { parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtDouble; (yyval.interm.type).setMatrix(4, 4); } -#line 7713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8078 "MachineIndependent/glslang_tab.cpp" break; - case 331: -#line 2403 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 331: /* type_specifier_nonarray: ACCSTRUCTNV */ +#line 2417 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtAccStruct; } -#line 7722 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8087 "MachineIndependent/glslang_tab.cpp" break; - case 332: -#line 2407 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 332: /* type_specifier_nonarray: ACCSTRUCTEXT */ +#line 2421 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtAccStruct; } -#line 7731 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8096 "MachineIndependent/glslang_tab.cpp" break; - case 333: -#line 2411 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 333: /* type_specifier_nonarray: RAYQUERYEXT */ +#line 2425 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtRayQuery; } -#line 7740 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8105 "MachineIndependent/glslang_tab.cpp" break; - case 334: -#line 2415 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 334: /* type_specifier_nonarray: ATOMIC_UINT */ +#line 2429 "MachineIndependent/glslang.y" + { parseContext.vulkanRemoved((yyvsp[0].lex).loc, "atomic counter types"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtAtomicUint; } -#line 7750 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8115 "MachineIndependent/glslang_tab.cpp" break; - case 335: -#line 2420 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 335: /* type_specifier_nonarray: SAMPLER1D */ +#line 2434 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd1D); } -#line 7760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8125 "MachineIndependent/glslang_tab.cpp" break; - case 336: -#line 2426 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 336: /* type_specifier_nonarray: SAMPLER2D */ +#line 2440 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D); } -#line 7770 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8135 "MachineIndependent/glslang_tab.cpp" break; - case 337: -#line 2431 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 337: /* type_specifier_nonarray: SAMPLER3D */ +#line 2445 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd3D); } -#line 7780 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8145 "MachineIndependent/glslang_tab.cpp" break; - case 338: -#line 2436 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 338: /* type_specifier_nonarray: SAMPLERCUBE */ +#line 2450 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdCube); } -#line 7790 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8155 "MachineIndependent/glslang_tab.cpp" break; - case 339: -#line 2441 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 339: /* type_specifier_nonarray: SAMPLER2DSHADOW */ +#line 2455 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, true); } -#line 7800 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8165 "MachineIndependent/glslang_tab.cpp" break; - case 340: -#line 2446 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 340: /* type_specifier_nonarray: SAMPLERCUBESHADOW */ +#line 2460 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdCube, false, true); } -#line 7810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8175 "MachineIndependent/glslang_tab.cpp" break; - case 341: -#line 2451 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 341: /* type_specifier_nonarray: SAMPLER2DARRAY */ +#line 2465 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true); } -#line 7820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8185 "MachineIndependent/glslang_tab.cpp" break; - case 342: -#line 2456 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 342: /* type_specifier_nonarray: SAMPLER2DARRAYSHADOW */ +#line 2470 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, true); } -#line 7830 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8195 "MachineIndependent/glslang_tab.cpp" break; - case 343: -#line 2462 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 343: /* type_specifier_nonarray: SAMPLER1DSHADOW */ +#line 2476 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd1D, false, true); } -#line 7840 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8205 "MachineIndependent/glslang_tab.cpp" break; - case 344: -#line 2467 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 344: /* type_specifier_nonarray: SAMPLER1DARRAY */ +#line 2481 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true); } -#line 7850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8215 "MachineIndependent/glslang_tab.cpp" break; - case 345: -#line 2472 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 345: /* type_specifier_nonarray: SAMPLER1DARRAYSHADOW */ +#line 2486 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true, true); } -#line 7860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8225 "MachineIndependent/glslang_tab.cpp" break; - case 346: -#line 2477 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 346: /* type_specifier_nonarray: SAMPLERCUBEARRAY */ +#line 2491 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true); } -#line 7870 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8235 "MachineIndependent/glslang_tab.cpp" break; - case 347: -#line 2482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 347: /* type_specifier_nonarray: SAMPLERCUBEARRAYSHADOW */ +#line 2496 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true, true); } -#line 7880 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8245 "MachineIndependent/glslang_tab.cpp" break; - case 348: -#line 2487 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 348: /* type_specifier_nonarray: F16SAMPLER1D */ +#line 2501 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd1D); } -#line 7891 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8256 "MachineIndependent/glslang_tab.cpp" break; - case 349: -#line 2493 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 349: /* type_specifier_nonarray: F16SAMPLER2D */ +#line 2507 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D); } -#line 7902 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8267 "MachineIndependent/glslang_tab.cpp" break; - case 350: -#line 2499 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 350: /* type_specifier_nonarray: F16SAMPLER3D */ +#line 2513 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd3D); } -#line 7913 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8278 "MachineIndependent/glslang_tab.cpp" break; - case 351: -#line 2505 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 351: /* type_specifier_nonarray: F16SAMPLERCUBE */ +#line 2519 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdCube); } -#line 7924 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8289 "MachineIndependent/glslang_tab.cpp" break; - case 352: -#line 2511 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 352: /* type_specifier_nonarray: F16SAMPLER1DSHADOW */ +#line 2525 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, false, true); } -#line 7935 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8300 "MachineIndependent/glslang_tab.cpp" break; - case 353: -#line 2517 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 353: /* type_specifier_nonarray: F16SAMPLER2DSHADOW */ +#line 2531 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, true); } -#line 7946 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8311 "MachineIndependent/glslang_tab.cpp" break; - case 354: -#line 2523 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 354: /* type_specifier_nonarray: F16SAMPLERCUBESHADOW */ +#line 2537 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, false, true); } -#line 7957 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8322 "MachineIndependent/glslang_tab.cpp" break; - case 355: -#line 2529 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 355: /* type_specifier_nonarray: F16SAMPLER1DARRAY */ +#line 2543 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true); } -#line 7968 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8333 "MachineIndependent/glslang_tab.cpp" break; - case 356: -#line 2535 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 356: /* type_specifier_nonarray: F16SAMPLER2DARRAY */ +#line 2549 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true); } -#line 7979 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8344 "MachineIndependent/glslang_tab.cpp" break; - case 357: -#line 2541 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 357: /* type_specifier_nonarray: F16SAMPLER1DARRAYSHADOW */ +#line 2555 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true, true); } -#line 7990 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8355 "MachineIndependent/glslang_tab.cpp" break; - case 358: -#line 2547 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 358: /* type_specifier_nonarray: F16SAMPLER2DARRAYSHADOW */ +#line 2561 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, true); } -#line 8001 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8366 "MachineIndependent/glslang_tab.cpp" break; - case 359: -#line 2553 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 359: /* type_specifier_nonarray: F16SAMPLERCUBEARRAY */ +#line 2567 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true); } -#line 8012 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8377 "MachineIndependent/glslang_tab.cpp" break; - case 360: -#line 2559 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 360: /* type_specifier_nonarray: F16SAMPLERCUBEARRAYSHADOW */ +#line 2573 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true, true); } -#line 8023 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8388 "MachineIndependent/glslang_tab.cpp" break; - case 361: -#line 2565 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 361: /* type_specifier_nonarray: ISAMPLER1D */ +#line 2579 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd1D); } -#line 8033 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8398 "MachineIndependent/glslang_tab.cpp" break; - case 362: -#line 2571 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 362: /* type_specifier_nonarray: ISAMPLER2D */ +#line 2585 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd2D); } -#line 8043 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8408 "MachineIndependent/glslang_tab.cpp" break; - case 363: -#line 2576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 363: /* type_specifier_nonarray: ISAMPLER3D */ +#line 2590 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd3D); } -#line 8053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8418 "MachineIndependent/glslang_tab.cpp" break; - case 364: -#line 2581 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 364: /* type_specifier_nonarray: ISAMPLERCUBE */ +#line 2595 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, EsdCube); } -#line 8063 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8428 "MachineIndependent/glslang_tab.cpp" break; - case 365: -#line 2586 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 365: /* type_specifier_nonarray: ISAMPLER2DARRAY */ +#line 2600 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd2D, true); } -#line 8073 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8438 "MachineIndependent/glslang_tab.cpp" break; - case 366: -#line 2591 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 366: /* type_specifier_nonarray: USAMPLER2D */ +#line 2605 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd2D); } -#line 8083 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8448 "MachineIndependent/glslang_tab.cpp" break; - case 367: -#line 2596 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 367: /* type_specifier_nonarray: USAMPLER3D */ +#line 2610 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd3D); } -#line 8093 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8458 "MachineIndependent/glslang_tab.cpp" break; - case 368: -#line 2601 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 368: /* type_specifier_nonarray: USAMPLERCUBE */ +#line 2615 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, EsdCube); } -#line 8103 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8468 "MachineIndependent/glslang_tab.cpp" break; - case 369: -#line 2607 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 369: /* type_specifier_nonarray: ISAMPLER1DARRAY */ +#line 2621 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd1D, true); } -#line 8113 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8478 "MachineIndependent/glslang_tab.cpp" break; - case 370: -#line 2612 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 370: /* type_specifier_nonarray: ISAMPLERCUBEARRAY */ +#line 2626 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, EsdCube, true); } -#line 8123 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8488 "MachineIndependent/glslang_tab.cpp" break; - case 371: -#line 2617 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 371: /* type_specifier_nonarray: USAMPLER1D */ +#line 2631 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd1D); } -#line 8133 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8498 "MachineIndependent/glslang_tab.cpp" break; - case 372: -#line 2622 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 372: /* type_specifier_nonarray: USAMPLER1DARRAY */ +#line 2636 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd1D, true); } -#line 8143 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8508 "MachineIndependent/glslang_tab.cpp" break; - case 373: -#line 2627 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 373: /* type_specifier_nonarray: USAMPLERCUBEARRAY */ +#line 2641 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, EsdCube, true); } -#line 8153 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8518 "MachineIndependent/glslang_tab.cpp" break; - case 374: -#line 2632 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 374: /* type_specifier_nonarray: TEXTURECUBEARRAY */ +#line 2646 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube, true); } -#line 8163 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8528 "MachineIndependent/glslang_tab.cpp" break; - case 375: -#line 2637 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 375: /* type_specifier_nonarray: ITEXTURECUBEARRAY */ +#line 2651 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube, true); } -#line 8173 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8538 "MachineIndependent/glslang_tab.cpp" break; - case 376: -#line 2642 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 376: /* type_specifier_nonarray: UTEXTURECUBEARRAY */ +#line 2656 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube, true); } -#line 8183 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8548 "MachineIndependent/glslang_tab.cpp" break; - case 377: -#line 2648 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 377: /* type_specifier_nonarray: USAMPLER2DARRAY */ +#line 2662 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd2D, true); } -#line 8193 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8558 "MachineIndependent/glslang_tab.cpp" break; - case 378: -#line 2653 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 378: /* type_specifier_nonarray: TEXTURE2D */ +#line 2667 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D); } -#line 8203 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8568 "MachineIndependent/glslang_tab.cpp" break; - case 379: -#line 2658 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 379: /* type_specifier_nonarray: TEXTURE3D */ +#line 2672 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd3D); } -#line 8213 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8578 "MachineIndependent/glslang_tab.cpp" break; - case 380: -#line 2663 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 380: /* type_specifier_nonarray: TEXTURE2DARRAY */ +#line 2677 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true); } -#line 8223 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8588 "MachineIndependent/glslang_tab.cpp" break; - case 381: -#line 2668 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 381: /* type_specifier_nonarray: TEXTURECUBE */ +#line 2682 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube); } -#line 8233 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8598 "MachineIndependent/glslang_tab.cpp" break; - case 382: -#line 2673 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 382: /* type_specifier_nonarray: ITEXTURE2D */ +#line 2687 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D); } -#line 8243 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8608 "MachineIndependent/glslang_tab.cpp" break; - case 383: -#line 2678 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 383: /* type_specifier_nonarray: ITEXTURE3D */ +#line 2692 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd3D); } -#line 8253 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8618 "MachineIndependent/glslang_tab.cpp" break; - case 384: -#line 2683 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 384: /* type_specifier_nonarray: ITEXTURECUBE */ +#line 2697 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube); } -#line 8263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8628 "MachineIndependent/glslang_tab.cpp" break; - case 385: -#line 2688 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 385: /* type_specifier_nonarray: ITEXTURE2DARRAY */ +#line 2702 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true); } -#line 8273 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8638 "MachineIndependent/glslang_tab.cpp" break; - case 386: -#line 2693 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 386: /* type_specifier_nonarray: UTEXTURE2D */ +#line 2707 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D); } -#line 8283 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8648 "MachineIndependent/glslang_tab.cpp" break; - case 387: -#line 2698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 387: /* type_specifier_nonarray: UTEXTURE3D */ +#line 2712 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd3D); } -#line 8293 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8658 "MachineIndependent/glslang_tab.cpp" break; - case 388: -#line 2703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 388: /* type_specifier_nonarray: UTEXTURECUBE */ +#line 2717 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube); } -#line 8303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8668 "MachineIndependent/glslang_tab.cpp" break; - case 389: -#line 2708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 389: /* type_specifier_nonarray: UTEXTURE2DARRAY */ +#line 2722 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true); } -#line 8313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8678 "MachineIndependent/glslang_tab.cpp" break; - case 390: -#line 2713 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 390: /* type_specifier_nonarray: SAMPLER */ +#line 2727 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setPureSampler(false); } -#line 8323 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8688 "MachineIndependent/glslang_tab.cpp" break; - case 391: -#line 2718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 391: /* type_specifier_nonarray: SAMPLERSHADOW */ +#line 2732 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setPureSampler(true); } -#line 8333 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8698 "MachineIndependent/glslang_tab.cpp" break; - case 392: -#line 2724 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 392: /* type_specifier_nonarray: SAMPLER2DRECT */ +#line 2738 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdRect); } -#line 8343 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8708 "MachineIndependent/glslang_tab.cpp" break; - case 393: -#line 2729 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 393: /* type_specifier_nonarray: SAMPLER2DRECTSHADOW */ +#line 2743 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdRect, false, true); } -#line 8353 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8718 "MachineIndependent/glslang_tab.cpp" break; - case 394: -#line 2734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 394: /* type_specifier_nonarray: F16SAMPLER2DRECT */ +#line 2748 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdRect); } -#line 8364 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8729 "MachineIndependent/glslang_tab.cpp" break; - case 395: -#line 2740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 395: /* type_specifier_nonarray: F16SAMPLER2DRECTSHADOW */ +#line 2754 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdRect, false, true); } -#line 8375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8740 "MachineIndependent/glslang_tab.cpp" break; - case 396: -#line 2746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 396: /* type_specifier_nonarray: ISAMPLER2DRECT */ +#line 2760 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, EsdRect); } -#line 8385 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8750 "MachineIndependent/glslang_tab.cpp" break; - case 397: -#line 2751 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 397: /* type_specifier_nonarray: USAMPLER2DRECT */ +#line 2765 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, EsdRect); } -#line 8395 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8760 "MachineIndependent/glslang_tab.cpp" break; - case 398: -#line 2756 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 398: /* type_specifier_nonarray: SAMPLERBUFFER */ +#line 2770 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, EsdBuffer); } -#line 8405 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8770 "MachineIndependent/glslang_tab.cpp" break; - case 399: -#line 2761 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 399: /* type_specifier_nonarray: F16SAMPLERBUFFER */ +#line 2775 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, EsdBuffer); } -#line 8416 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8781 "MachineIndependent/glslang_tab.cpp" break; - case 400: -#line 2767 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 400: /* type_specifier_nonarray: ISAMPLERBUFFER */ +#line 2781 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, EsdBuffer); } -#line 8426 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8791 "MachineIndependent/glslang_tab.cpp" break; - case 401: -#line 2772 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 401: /* type_specifier_nonarray: USAMPLERBUFFER */ +#line 2786 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, EsdBuffer); } -#line 8436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8801 "MachineIndependent/glslang_tab.cpp" break; - case 402: -#line 2777 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 402: /* type_specifier_nonarray: SAMPLER2DMS */ +#line 2791 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, false, true); } -#line 8446 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8811 "MachineIndependent/glslang_tab.cpp" break; - case 403: -#line 2782 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 403: /* type_specifier_nonarray: F16SAMPLER2DMS */ +#line 2796 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, false, true); } -#line 8457 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8822 "MachineIndependent/glslang_tab.cpp" break; - case 404: -#line 2788 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 404: /* type_specifier_nonarray: ISAMPLER2DMS */ +#line 2802 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd2D, false, false, true); } -#line 8467 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8832 "MachineIndependent/glslang_tab.cpp" break; - case 405: -#line 2793 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 405: /* type_specifier_nonarray: USAMPLER2DMS */ +#line 2807 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd2D, false, false, true); } -#line 8477 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8842 "MachineIndependent/glslang_tab.cpp" break; - case 406: -#line 2798 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 406: /* type_specifier_nonarray: SAMPLER2DMSARRAY */ +#line 2812 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, false, true); } -#line 8487 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8852 "MachineIndependent/glslang_tab.cpp" break; - case 407: -#line 2803 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 407: /* type_specifier_nonarray: F16SAMPLER2DMSARRAY */ +#line 2817 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, false, true); } -#line 8498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8863 "MachineIndependent/glslang_tab.cpp" break; - case 408: -#line 2809 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 408: /* type_specifier_nonarray: ISAMPLER2DMSARRAY */ +#line 2823 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtInt, Esd2D, true, false, true); } -#line 8508 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8873 "MachineIndependent/glslang_tab.cpp" break; - case 409: -#line 2814 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 409: /* type_specifier_nonarray: USAMPLER2DMSARRAY */ +#line 2828 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtUint, Esd2D, true, false, true); } -#line 8518 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8883 "MachineIndependent/glslang_tab.cpp" break; - case 410: -#line 2819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 410: /* type_specifier_nonarray: TEXTURE1D */ +#line 2833 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D); } -#line 8528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8893 "MachineIndependent/glslang_tab.cpp" break; - case 411: -#line 2824 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 411: /* type_specifier_nonarray: F16TEXTURE1D */ +#line 2838 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D); } -#line 8539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8904 "MachineIndependent/glslang_tab.cpp" break; - case 412: -#line 2830 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 412: /* type_specifier_nonarray: F16TEXTURE2D */ +#line 2844 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D); } -#line 8550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8915 "MachineIndependent/glslang_tab.cpp" break; - case 413: -#line 2836 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 413: /* type_specifier_nonarray: F16TEXTURE3D */ +#line 2850 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd3D); } -#line 8561 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8926 "MachineIndependent/glslang_tab.cpp" break; - case 414: -#line 2842 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 414: /* type_specifier_nonarray: F16TEXTURECUBE */ +#line 2856 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube); } -#line 8572 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8937 "MachineIndependent/glslang_tab.cpp" break; - case 415: -#line 2848 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 415: /* type_specifier_nonarray: TEXTURE1DARRAY */ +#line 2862 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D, true); } -#line 8582 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8947 "MachineIndependent/glslang_tab.cpp" break; - case 416: -#line 2853 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 416: /* type_specifier_nonarray: F16TEXTURE1DARRAY */ +#line 2867 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D, true); } -#line 8593 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8958 "MachineIndependent/glslang_tab.cpp" break; - case 417: -#line 2859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 417: /* type_specifier_nonarray: F16TEXTURE2DARRAY */ +#line 2873 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true); } -#line 8604 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8969 "MachineIndependent/glslang_tab.cpp" break; - case 418: -#line 2865 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 418: /* type_specifier_nonarray: F16TEXTURECUBEARRAY */ +#line 2879 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube, true); } -#line 8615 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8980 "MachineIndependent/glslang_tab.cpp" break; - case 419: -#line 2871 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 419: /* type_specifier_nonarray: ITEXTURE1D */ +#line 2885 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D); } -#line 8625 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 8990 "MachineIndependent/glslang_tab.cpp" break; - case 420: -#line 2876 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 420: /* type_specifier_nonarray: ITEXTURE1DARRAY */ +#line 2890 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D, true); } -#line 8635 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9000 "MachineIndependent/glslang_tab.cpp" break; - case 421: -#line 2881 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 421: /* type_specifier_nonarray: UTEXTURE1D */ +#line 2895 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D); } -#line 8645 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9010 "MachineIndependent/glslang_tab.cpp" break; - case 422: -#line 2886 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 422: /* type_specifier_nonarray: UTEXTURE1DARRAY */ +#line 2900 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D, true); } -#line 8655 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9020 "MachineIndependent/glslang_tab.cpp" break; - case 423: -#line 2891 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 423: /* type_specifier_nonarray: TEXTURE2DRECT */ +#line 2905 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, EsdRect); } -#line 8665 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9030 "MachineIndependent/glslang_tab.cpp" break; - case 424: -#line 2896 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 424: /* type_specifier_nonarray: F16TEXTURE2DRECT */ +#line 2910 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdRect); } -#line 8676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9041 "MachineIndependent/glslang_tab.cpp" break; - case 425: -#line 2902 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 425: /* type_specifier_nonarray: ITEXTURE2DRECT */ +#line 2916 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, EsdRect); } -#line 8686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9051 "MachineIndependent/glslang_tab.cpp" break; - case 426: -#line 2907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 426: /* type_specifier_nonarray: UTEXTURE2DRECT */ +#line 2921 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, EsdRect); } -#line 8696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9061 "MachineIndependent/glslang_tab.cpp" break; - case 427: -#line 2912 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 427: /* type_specifier_nonarray: TEXTUREBUFFER */ +#line 2926 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, EsdBuffer); } -#line 8706 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9071 "MachineIndependent/glslang_tab.cpp" break; - case 428: -#line 2917 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 428: /* type_specifier_nonarray: F16TEXTUREBUFFER */ +#line 2931 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdBuffer); } -#line 8717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9082 "MachineIndependent/glslang_tab.cpp" break; - case 429: -#line 2923 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 429: /* type_specifier_nonarray: ITEXTUREBUFFER */ +#line 2937 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, EsdBuffer); } -#line 8727 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9092 "MachineIndependent/glslang_tab.cpp" break; - case 430: -#line 2928 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 430: /* type_specifier_nonarray: UTEXTUREBUFFER */ +#line 2942 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, EsdBuffer); } -#line 8737 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9102 "MachineIndependent/glslang_tab.cpp" break; - case 431: -#line 2933 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 431: /* type_specifier_nonarray: TEXTURE2DMS */ +#line 2947 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, false, false, true); } -#line 8747 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9112 "MachineIndependent/glslang_tab.cpp" break; - case 432: -#line 2938 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 432: /* type_specifier_nonarray: F16TEXTURE2DMS */ +#line 2952 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, false, false, true); } -#line 8758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9123 "MachineIndependent/glslang_tab.cpp" break; - case 433: -#line 2944 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 433: /* type_specifier_nonarray: ITEXTURE2DMS */ +#line 2958 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, false, false, true); } -#line 8768 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9133 "MachineIndependent/glslang_tab.cpp" break; - case 434: -#line 2949 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 434: /* type_specifier_nonarray: UTEXTURE2DMS */ +#line 2963 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, false, false, true); } -#line 8778 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9143 "MachineIndependent/glslang_tab.cpp" break; - case 435: -#line 2954 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 435: /* type_specifier_nonarray: TEXTURE2DMSARRAY */ +#line 2968 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true, false, true); } -#line 8788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9153 "MachineIndependent/glslang_tab.cpp" break; - case 436: -#line 2959 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 436: /* type_specifier_nonarray: F16TEXTURE2DMSARRAY */ +#line 2973 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true, false, true); } -#line 8799 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9164 "MachineIndependent/glslang_tab.cpp" break; - case 437: -#line 2965 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 437: /* type_specifier_nonarray: ITEXTURE2DMSARRAY */ +#line 2979 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true, false, true); } -#line 8809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9174 "MachineIndependent/glslang_tab.cpp" break; - case 438: -#line 2970 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 438: /* type_specifier_nonarray: UTEXTURE2DMSARRAY */ +#line 2984 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true, false, true); } -#line 8819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9184 "MachineIndependent/glslang_tab.cpp" break; - case 439: -#line 2975 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 439: /* type_specifier_nonarray: IMAGE1D */ +#line 2989 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D); } -#line 8829 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9194 "MachineIndependent/glslang_tab.cpp" break; - case 440: -#line 2980 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 440: /* type_specifier_nonarray: F16IMAGE1D */ +#line 2994 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D); } -#line 8840 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9205 "MachineIndependent/glslang_tab.cpp" break; - case 441: -#line 2986 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 441: /* type_specifier_nonarray: IIMAGE1D */ +#line 3000 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd1D); } -#line 8850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9215 "MachineIndependent/glslang_tab.cpp" break; - case 442: -#line 2991 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 442: /* type_specifier_nonarray: UIMAGE1D */ +#line 3005 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd1D); } -#line 8860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9225 "MachineIndependent/glslang_tab.cpp" break; - case 443: -#line 2996 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 443: /* type_specifier_nonarray: IMAGE2D */ +#line 3010 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D); } -#line 8870 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9235 "MachineIndependent/glslang_tab.cpp" break; - case 444: -#line 3001 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 444: /* type_specifier_nonarray: F16IMAGE2D */ +#line 3015 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D); } -#line 8881 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9246 "MachineIndependent/glslang_tab.cpp" break; - case 445: -#line 3007 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 445: /* type_specifier_nonarray: IIMAGE2D */ +#line 3021 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd2D); } -#line 8891 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9256 "MachineIndependent/glslang_tab.cpp" break; - case 446: -#line 3012 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 446: /* type_specifier_nonarray: UIMAGE2D */ +#line 3026 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd2D); } -#line 8901 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9266 "MachineIndependent/glslang_tab.cpp" break; - case 447: -#line 3017 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 447: /* type_specifier_nonarray: IMAGE3D */ +#line 3031 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd3D); } -#line 8911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9276 "MachineIndependent/glslang_tab.cpp" break; - case 448: -#line 3022 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 448: /* type_specifier_nonarray: F16IMAGE3D */ +#line 3036 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd3D); } -#line 8922 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9287 "MachineIndependent/glslang_tab.cpp" break; - case 449: -#line 3028 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 449: /* type_specifier_nonarray: IIMAGE3D */ +#line 3042 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd3D); } -#line 8932 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9297 "MachineIndependent/glslang_tab.cpp" break; - case 450: -#line 3033 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 450: /* type_specifier_nonarray: UIMAGE3D */ +#line 3047 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd3D); } -#line 8942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9307 "MachineIndependent/glslang_tab.cpp" break; - case 451: -#line 3038 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 451: /* type_specifier_nonarray: IMAGE2DRECT */ +#line 3052 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, EsdRect); } -#line 8952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9317 "MachineIndependent/glslang_tab.cpp" break; - case 452: -#line 3043 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 452: /* type_specifier_nonarray: F16IMAGE2DRECT */ +#line 3057 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, EsdRect); } -#line 8963 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9328 "MachineIndependent/glslang_tab.cpp" break; - case 453: -#line 3049 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 453: /* type_specifier_nonarray: IIMAGE2DRECT */ +#line 3063 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, EsdRect); } -#line 8973 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9338 "MachineIndependent/glslang_tab.cpp" break; - case 454: -#line 3054 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 454: /* type_specifier_nonarray: UIMAGE2DRECT */ +#line 3068 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, EsdRect); } -#line 8983 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9348 "MachineIndependent/glslang_tab.cpp" break; - case 455: -#line 3059 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 455: /* type_specifier_nonarray: IMAGECUBE */ +#line 3073 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube); } -#line 8993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9358 "MachineIndependent/glslang_tab.cpp" break; - case 456: -#line 3064 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 456: /* type_specifier_nonarray: F16IMAGECUBE */ +#line 3078 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube); } -#line 9004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9369 "MachineIndependent/glslang_tab.cpp" break; - case 457: -#line 3070 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 457: /* type_specifier_nonarray: IIMAGECUBE */ +#line 3084 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, EsdCube); } -#line 9014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9379 "MachineIndependent/glslang_tab.cpp" break; - case 458: -#line 3075 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 458: /* type_specifier_nonarray: UIMAGECUBE */ +#line 3089 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, EsdCube); } -#line 9024 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9389 "MachineIndependent/glslang_tab.cpp" break; - case 459: -#line 3080 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 459: /* type_specifier_nonarray: IMAGEBUFFER */ +#line 3094 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, EsdBuffer); } -#line 9034 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9399 "MachineIndependent/glslang_tab.cpp" break; - case 460: -#line 3085 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 460: /* type_specifier_nonarray: F16IMAGEBUFFER */ +#line 3099 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, EsdBuffer); } -#line 9045 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9410 "MachineIndependent/glslang_tab.cpp" break; - case 461: -#line 3091 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 461: /* type_specifier_nonarray: IIMAGEBUFFER */ +#line 3105 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, EsdBuffer); } -#line 9055 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9420 "MachineIndependent/glslang_tab.cpp" break; - case 462: -#line 3096 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 462: /* type_specifier_nonarray: UIMAGEBUFFER */ +#line 3110 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, EsdBuffer); } -#line 9065 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9430 "MachineIndependent/glslang_tab.cpp" break; - case 463: -#line 3101 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 463: /* type_specifier_nonarray: IMAGE1DARRAY */ +#line 3115 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D, true); } -#line 9075 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9440 "MachineIndependent/glslang_tab.cpp" break; - case 464: -#line 3106 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 464: /* type_specifier_nonarray: F16IMAGE1DARRAY */ +#line 3120 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D, true); } -#line 9086 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9451 "MachineIndependent/glslang_tab.cpp" break; - case 465: -#line 3112 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 465: /* type_specifier_nonarray: IIMAGE1DARRAY */ +#line 3126 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd1D, true); } -#line 9096 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9461 "MachineIndependent/glslang_tab.cpp" break; - case 466: -#line 3117 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 466: /* type_specifier_nonarray: UIMAGE1DARRAY */ +#line 3131 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd1D, true); } -#line 9106 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9471 "MachineIndependent/glslang_tab.cpp" break; - case 467: -#line 3122 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 467: /* type_specifier_nonarray: IMAGE2DARRAY */ +#line 3136 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true); } -#line 9116 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9481 "MachineIndependent/glslang_tab.cpp" break; - case 468: -#line 3127 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 468: /* type_specifier_nonarray: F16IMAGE2DARRAY */ +#line 3141 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true); } -#line 9127 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9492 "MachineIndependent/glslang_tab.cpp" break; - case 469: -#line 3133 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 469: /* type_specifier_nonarray: IIMAGE2DARRAY */ +#line 3147 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true); } -#line 9137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9502 "MachineIndependent/glslang_tab.cpp" break; - case 470: -#line 3138 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 470: /* type_specifier_nonarray: UIMAGE2DARRAY */ +#line 3152 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true); } -#line 9147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9512 "MachineIndependent/glslang_tab.cpp" break; - case 471: -#line 3143 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 471: /* type_specifier_nonarray: IMAGECUBEARRAY */ +#line 3157 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube, true); } -#line 9157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9522 "MachineIndependent/glslang_tab.cpp" break; - case 472: -#line 3148 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 472: /* type_specifier_nonarray: F16IMAGECUBEARRAY */ +#line 3162 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube, true); } -#line 9168 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9533 "MachineIndependent/glslang_tab.cpp" break; - case 473: -#line 3154 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 473: /* type_specifier_nonarray: IIMAGECUBEARRAY */ +#line 3168 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, EsdCube, true); } -#line 9178 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9543 "MachineIndependent/glslang_tab.cpp" break; - case 474: -#line 3159 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 474: /* type_specifier_nonarray: UIMAGECUBEARRAY */ +#line 3173 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, EsdCube, true); } -#line 9188 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9553 "MachineIndependent/glslang_tab.cpp" break; - case 475: -#line 3164 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 475: /* type_specifier_nonarray: IMAGE2DMS */ +#line 3178 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, false, false, true); } -#line 9198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9563 "MachineIndependent/glslang_tab.cpp" break; - case 476: -#line 3169 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 476: /* type_specifier_nonarray: F16IMAGE2DMS */ +#line 3183 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, false, false, true); } -#line 9209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9574 "MachineIndependent/glslang_tab.cpp" break; - case 477: -#line 3175 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 477: /* type_specifier_nonarray: IIMAGE2DMS */ +#line 3189 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, false, false, true); } -#line 9219 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9584 "MachineIndependent/glslang_tab.cpp" break; - case 478: -#line 3180 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 478: /* type_specifier_nonarray: UIMAGE2DMS */ +#line 3194 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, false, false, true); } -#line 9229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9594 "MachineIndependent/glslang_tab.cpp" break; - case 479: -#line 3185 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 479: /* type_specifier_nonarray: IMAGE2DMSARRAY */ +#line 3199 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true, false, true); } -#line 9239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9604 "MachineIndependent/glslang_tab.cpp" break; - case 480: -#line 3190 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 480: /* type_specifier_nonarray: F16IMAGE2DMSARRAY */ +#line 3204 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true, false, true); } -#line 9250 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9615 "MachineIndependent/glslang_tab.cpp" break; - case 481: -#line 3196 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 481: /* type_specifier_nonarray: IIMAGE2DMSARRAY */ +#line 3210 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true, false, true); } -#line 9260 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9625 "MachineIndependent/glslang_tab.cpp" break; - case 482: -#line 3201 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 482: /* type_specifier_nonarray: UIMAGE2DMSARRAY */ +#line 3215 "MachineIndependent/glslang.y" + { (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true, false, true); } -#line 9270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9635 "MachineIndependent/glslang_tab.cpp" + break; + + case 483: /* type_specifier_nonarray: I64IMAGE1D */ +#line 3220 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd1D); + } +#line 9645 "MachineIndependent/glslang_tab.cpp" + break; + + case 484: /* type_specifier_nonarray: U64IMAGE1D */ +#line 3225 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd1D); + } +#line 9655 "MachineIndependent/glslang_tab.cpp" + break; + + case 485: /* type_specifier_nonarray: I64IMAGE2D */ +#line 3230 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd2D); + } +#line 9665 "MachineIndependent/glslang_tab.cpp" + break; + + case 486: /* type_specifier_nonarray: U64IMAGE2D */ +#line 3235 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd2D); + } +#line 9675 "MachineIndependent/glslang_tab.cpp" + break; + + case 487: /* type_specifier_nonarray: I64IMAGE3D */ +#line 3240 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd3D); + } +#line 9685 "MachineIndependent/glslang_tab.cpp" + break; + + case 488: /* type_specifier_nonarray: U64IMAGE3D */ +#line 3245 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd3D); + } +#line 9695 "MachineIndependent/glslang_tab.cpp" + break; + + case 489: /* type_specifier_nonarray: I64IMAGE2DRECT */ +#line 3250 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, EsdRect); + } +#line 9705 "MachineIndependent/glslang_tab.cpp" + break; + + case 490: /* type_specifier_nonarray: U64IMAGE2DRECT */ +#line 3255 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, EsdRect); + } +#line 9715 "MachineIndependent/glslang_tab.cpp" + break; + + case 491: /* type_specifier_nonarray: I64IMAGECUBE */ +#line 3260 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, EsdCube); + } +#line 9725 "MachineIndependent/glslang_tab.cpp" + break; + + case 492: /* type_specifier_nonarray: U64IMAGECUBE */ +#line 3265 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, EsdCube); + } +#line 9735 "MachineIndependent/glslang_tab.cpp" + break; + + case 493: /* type_specifier_nonarray: I64IMAGEBUFFER */ +#line 3270 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, EsdBuffer); + } +#line 9745 "MachineIndependent/glslang_tab.cpp" + break; + + case 494: /* type_specifier_nonarray: U64IMAGEBUFFER */ +#line 3275 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, EsdBuffer); + } +#line 9755 "MachineIndependent/glslang_tab.cpp" + break; + + case 495: /* type_specifier_nonarray: I64IMAGE1DARRAY */ +#line 3280 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd1D, true); + } +#line 9765 "MachineIndependent/glslang_tab.cpp" + break; + + case 496: /* type_specifier_nonarray: U64IMAGE1DARRAY */ +#line 3285 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd1D, true); + } +#line 9775 "MachineIndependent/glslang_tab.cpp" + break; + + case 497: /* type_specifier_nonarray: I64IMAGE2DARRAY */ +#line 3290 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd2D, true); + } +#line 9785 "MachineIndependent/glslang_tab.cpp" + break; + + case 498: /* type_specifier_nonarray: U64IMAGE2DARRAY */ +#line 3295 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd2D, true); + } +#line 9795 "MachineIndependent/glslang_tab.cpp" + break; + + case 499: /* type_specifier_nonarray: I64IMAGECUBEARRAY */ +#line 3300 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, EsdCube, true); + } +#line 9805 "MachineIndependent/glslang_tab.cpp" + break; + + case 500: /* type_specifier_nonarray: U64IMAGECUBEARRAY */ +#line 3305 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, EsdCube, true); + } +#line 9815 "MachineIndependent/glslang_tab.cpp" + break; + + case 501: /* type_specifier_nonarray: I64IMAGE2DMS */ +#line 3310 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd2D, false, false, true); + } +#line 9825 "MachineIndependent/glslang_tab.cpp" + break; + + case 502: /* type_specifier_nonarray: U64IMAGE2DMS */ +#line 3315 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd2D, false, false, true); + } +#line 9835 "MachineIndependent/glslang_tab.cpp" + break; + + case 503: /* type_specifier_nonarray: I64IMAGE2DMSARRAY */ +#line 3320 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt64, Esd2D, true, false, true); + } +#line 9845 "MachineIndependent/glslang_tab.cpp" break; - case 483: -#line 3206 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { // GL_OES_EGL_image_external + case 504: /* type_specifier_nonarray: U64IMAGE2DMSARRAY */ +#line 3325 "MachineIndependent/glslang.y" + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint64, Esd2D, true, false, true); + } +#line 9855 "MachineIndependent/glslang_tab.cpp" + break; + + case 505: /* type_specifier_nonarray: SAMPLEREXTERNALOES */ +#line 3330 "MachineIndependent/glslang.y" + { // GL_OES_EGL_image_external (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D); (yyval.interm.type).sampler.external = true; } -#line 9281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9866 "MachineIndependent/glslang_tab.cpp" break; - case 484: -#line 3212 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { // GL_EXT_YUV_target + case 506: /* type_specifier_nonarray: SAMPLEREXTERNAL2DY2YEXT */ +#line 3336 "MachineIndependent/glslang.y" + { // GL_EXT_YUV_target (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.set(EbtFloat, Esd2D); (yyval.interm.type).sampler.yuv = true; } -#line 9292 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9877 "MachineIndependent/glslang_tab.cpp" break; - case 485: -#line 3218 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 507: /* type_specifier_nonarray: SUBPASSINPUT */ +#line 3342 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtFloat); } -#line 9303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9888 "MachineIndependent/glslang_tab.cpp" break; - case 486: -#line 3224 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 508: /* type_specifier_nonarray: SUBPASSINPUTMS */ +#line 3348 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtFloat, true); } -#line 9314 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9899 "MachineIndependent/glslang_tab.cpp" break; - case 487: -#line 3230 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 509: /* type_specifier_nonarray: F16SUBPASSINPUT */ +#line 3354 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtFloat16); } -#line 9326 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9911 "MachineIndependent/glslang_tab.cpp" break; - case 488: -#line 3237 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 510: /* type_specifier_nonarray: F16SUBPASSINPUTMS */ +#line 3361 "MachineIndependent/glslang.y" + { parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtFloat16, true); } -#line 9338 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9923 "MachineIndependent/glslang_tab.cpp" break; - case 489: -#line 3244 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 511: /* type_specifier_nonarray: ISUBPASSINPUT */ +#line 3368 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtInt); } -#line 9349 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9934 "MachineIndependent/glslang_tab.cpp" break; - case 490: -#line 3250 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 512: /* type_specifier_nonarray: ISUBPASSINPUTMS */ +#line 3374 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtInt, true); } -#line 9360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9945 "MachineIndependent/glslang_tab.cpp" break; - case 491: -#line 3256 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 513: /* type_specifier_nonarray: USUBPASSINPUT */ +#line 3380 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtUint); } -#line 9371 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9956 "MachineIndependent/glslang_tab.cpp" break; - case 492: -#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 514: /* type_specifier_nonarray: USUBPASSINPUTMS */ +#line 3386 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtSampler; (yyval.interm.type).sampler.setSubpass(EbtUint, true); } -#line 9382 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9967 "MachineIndependent/glslang_tab.cpp" break; - case 493: -#line 3268 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 515: /* type_specifier_nonarray: FCOOPMATNV */ +#line 3392 "MachineIndependent/glslang.y" + { parseContext.fcoopmatCheck((yyvsp[0].lex).loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtFloat; (yyval.interm.type).coopmat = true; } -#line 9393 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9978 "MachineIndependent/glslang_tab.cpp" break; - case 494: -#line 3274 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 516: /* type_specifier_nonarray: ICOOPMATNV */ +#line 3398 "MachineIndependent/glslang.y" + { parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtInt; (yyval.interm.type).coopmat = true; } -#line 9404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 9989 "MachineIndependent/glslang_tab.cpp" break; - case 495: -#line 3280 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 517: /* type_specifier_nonarray: UCOOPMATNV */ +#line 3404 "MachineIndependent/glslang.y" + { parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); (yyval.interm.type).basicType = EbtUint; (yyval.interm.type).coopmat = true; } -#line 9415 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10000 "MachineIndependent/glslang_tab.cpp" break; - case 496: -#line 3287 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 518: /* type_specifier_nonarray: struct_specifier */ +#line 3411 "MachineIndependent/glslang.y" + { (yyval.interm.type) = (yyvsp[0].interm.type); (yyval.interm.type).qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; parseContext.structTypeCheck((yyval.interm.type).loc, (yyval.interm.type)); } -#line 9425 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10010 "MachineIndependent/glslang_tab.cpp" break; - case 497: -#line 3292 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 519: /* type_specifier_nonarray: TYPE_NAME */ +#line 3416 "MachineIndependent/glslang.y" + { // // This is for user defined type names. The lexical phase looked up the // type. @@ -9439,48 +10024,48 @@ yyreduce: } else parseContext.error((yyvsp[0].lex).loc, "expected type name", (yyvsp[0].lex).string->c_str(), ""); } -#line 9443 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10028 "MachineIndependent/glslang_tab.cpp" break; - case 498: -#line 3308 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 520: /* precision_qualifier: HIGH_PRECISION */ +#line 3432 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "highp precision qualifier"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqHigh); } -#line 9453 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10038 "MachineIndependent/glslang_tab.cpp" break; - case 499: -#line 3313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 521: /* precision_qualifier: MEDIUM_PRECISION */ +#line 3437 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "mediump precision qualifier"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqMedium); } -#line 9463 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10048 "MachineIndependent/glslang_tab.cpp" break; - case 500: -#line 3318 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 522: /* precision_qualifier: LOW_PRECISION */ +#line 3442 "MachineIndependent/glslang.y" + { parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "lowp precision qualifier"); (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqLow); } -#line 9473 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10058 "MachineIndependent/glslang_tab.cpp" break; - case 501: -#line 3326 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); } -#line 9479 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 523: /* $@3: %empty */ +#line 3450 "MachineIndependent/glslang.y" + { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); } +#line 10064 "MachineIndependent/glslang_tab.cpp" break; - case 502: -#line 3326 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 524: /* struct_specifier: STRUCT IDENTIFIER LEFT_BRACE $@3 struct_declaration_list RIGHT_BRACE */ +#line 3450 "MachineIndependent/glslang.y" + { TType* structure = new TType((yyvsp[-1].interm.typeList), *(yyvsp[-4].lex).string); parseContext.structArrayCheck((yyvsp[-4].lex).loc, *structure); TVariable* userTypeDef = new TVariable((yyvsp[-4].lex).string, *structure, true); @@ -9491,38 +10076,38 @@ yyreduce: (yyval.interm.type).userDef = structure; --parseContext.structNestingLevel; } -#line 9495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10080 "MachineIndependent/glslang_tab.cpp" break; - case 503: -#line 3337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); } -#line 9501 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 525: /* $@4: %empty */ +#line 3461 "MachineIndependent/glslang.y" + { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); } +#line 10086 "MachineIndependent/glslang_tab.cpp" break; - case 504: -#line 3337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 526: /* struct_specifier: STRUCT LEFT_BRACE $@4 struct_declaration_list RIGHT_BRACE */ +#line 3461 "MachineIndependent/glslang.y" + { TType* structure = new TType((yyvsp[-1].interm.typeList), TString("")); (yyval.interm.type).init((yyvsp[-4].lex).loc); (yyval.interm.type).basicType = EbtStruct; (yyval.interm.type).userDef = structure; --parseContext.structNestingLevel; } -#line 9513 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10098 "MachineIndependent/glslang_tab.cpp" break; - case 505: -#line 3347 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 527: /* struct_declaration_list: struct_declaration */ +#line 3471 "MachineIndependent/glslang.y" + { (yyval.interm.typeList) = (yyvsp[0].interm.typeList); } -#line 9521 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10106 "MachineIndependent/glslang_tab.cpp" break; - case 506: -#line 3350 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 528: /* struct_declaration_list: struct_declaration_list struct_declaration */ +#line 3474 "MachineIndependent/glslang.y" + { (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); for (unsigned int i = 0; i < (yyvsp[0].interm.typeList)->size(); ++i) { for (unsigned int j = 0; j < (yyval.interm.typeList)->size(); ++j) { @@ -9532,12 +10117,12 @@ yyreduce: (yyval.interm.typeList)->push_back((*(yyvsp[0].interm.typeList))[i]); } } -#line 9536 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10121 "MachineIndependent/glslang_tab.cpp" break; - case 507: -#line 3363 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 529: /* struct_declaration: type_specifier struct_declarator_list SEMICOLON */ +#line 3487 "MachineIndependent/glslang.y" + { if ((yyvsp[-2].interm.type).arraySizes) { parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); @@ -9559,12 +10144,12 @@ yyreduce: (*(yyval.interm.typeList))[i].type->shallowCopy(type); } } -#line 9563 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10148 "MachineIndependent/glslang_tab.cpp" break; - case 508: -#line 3385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 530: /* struct_declaration: type_qualifier type_specifier struct_declarator_list SEMICOLON */ +#line 3509 "MachineIndependent/glslang.y" + { if ((yyvsp[-2].interm.type).arraySizes) { parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); @@ -9588,39 +10173,39 @@ yyreduce: (*(yyval.interm.typeList))[i].type->shallowCopy(type); } } -#line 9592 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10177 "MachineIndependent/glslang_tab.cpp" break; - case 509: -#line 3412 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 531: /* struct_declarator_list: struct_declarator */ +#line 3536 "MachineIndependent/glslang.y" + { (yyval.interm.typeList) = new TTypeList; (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); } -#line 9601 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10186 "MachineIndependent/glslang_tab.cpp" break; - case 510: -#line 3416 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 532: /* struct_declarator_list: struct_declarator_list COMMA struct_declarator */ +#line 3540 "MachineIndependent/glslang.y" + { (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); } -#line 9609 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10194 "MachineIndependent/glslang_tab.cpp" break; - case 511: -#line 3422 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 533: /* struct_declarator: IDENTIFIER */ +#line 3546 "MachineIndependent/glslang.y" + { (yyval.interm.typeLine).type = new TType(EbtVoid); (yyval.interm.typeLine).loc = (yyvsp[0].lex).loc; (yyval.interm.typeLine).type->setFieldName(*(yyvsp[0].lex).string); } -#line 9619 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10204 "MachineIndependent/glslang_tab.cpp" break; - case 512: -#line 3427 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 534: /* struct_declarator: IDENTIFIER array_specifier */ +#line 3551 "MachineIndependent/glslang.y" + { parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, (yyvsp[0].interm).arraySizes); (yyval.interm.typeLine).type = new TType(EbtVoid); @@ -9628,236 +10213,236 @@ yyreduce: (yyval.interm.typeLine).type->setFieldName(*(yyvsp[-1].lex).string); (yyval.interm.typeLine).type->transferArraySizes((yyvsp[0].interm).arraySizes); } -#line 9632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10217 "MachineIndependent/glslang_tab.cpp" break; - case 513: -#line 3438 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 535: /* initializer: assignment_expression */ +#line 3562 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 9640 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10225 "MachineIndependent/glslang_tab.cpp" break; - case 514: -#line 3442 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 536: /* initializer: LEFT_BRACE initializer_list RIGHT_BRACE */ +#line 3566 "MachineIndependent/glslang.y" + { const char* initFeature = "{ } style initializers"; parseContext.requireProfile((yyvsp[-2].lex).loc, ~EEsProfile, initFeature); parseContext.profileRequires((yyvsp[-2].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); } -#line 9651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10236 "MachineIndependent/glslang_tab.cpp" break; - case 515: -#line 3448 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 537: /* initializer: LEFT_BRACE initializer_list COMMA RIGHT_BRACE */ +#line 3572 "MachineIndependent/glslang.y" + { const char* initFeature = "{ } style initializers"; parseContext.requireProfile((yyvsp[-3].lex).loc, ~EEsProfile, initFeature); parseContext.profileRequires((yyvsp[-3].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); } -#line 9662 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10247 "MachineIndependent/glslang_tab.cpp" break; - case 516: -#line 3459 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 538: /* initializer_list: initializer */ +#line 3583 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate(0, (yyvsp[0].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)->getLoc()); } -#line 9670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10255 "MachineIndependent/glslang_tab.cpp" break; - case 517: -#line 3462 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 539: /* initializer_list: initializer_list COMMA initializer */ +#line 3586 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); } -#line 9678 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10263 "MachineIndependent/glslang_tab.cpp" break; - case 518: -#line 3469 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9684 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 540: /* declaration_statement: declaration */ +#line 3593 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10269 "MachineIndependent/glslang_tab.cpp" break; - case 519: -#line 3473 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9690 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 541: /* statement: compound_statement */ +#line 3597 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10275 "MachineIndependent/glslang_tab.cpp" break; - case 520: -#line 3474 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 542: /* statement: simple_statement */ +#line 3598 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10281 "MachineIndependent/glslang_tab.cpp" break; - case 521: -#line 3480 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 543: /* simple_statement: declaration_statement */ +#line 3604 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10287 "MachineIndependent/glslang_tab.cpp" break; - case 522: -#line 3481 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9708 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 544: /* simple_statement: expression_statement */ +#line 3605 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10293 "MachineIndependent/glslang_tab.cpp" break; - case 523: -#line 3482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9714 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 545: /* simple_statement: selection_statement */ +#line 3606 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10299 "MachineIndependent/glslang_tab.cpp" break; - case 524: -#line 3483 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9720 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 546: /* simple_statement: switch_statement */ +#line 3607 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10305 "MachineIndependent/glslang_tab.cpp" break; - case 525: -#line 3484 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 547: /* simple_statement: case_label */ +#line 3608 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10311 "MachineIndependent/glslang_tab.cpp" break; - case 526: -#line 3485 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9732 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 548: /* simple_statement: iteration_statement */ +#line 3609 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10317 "MachineIndependent/glslang_tab.cpp" break; - case 527: -#line 3486 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9738 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 549: /* simple_statement: jump_statement */ +#line 3610 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10323 "MachineIndependent/glslang_tab.cpp" break; - case 528: -#line 3488 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9744 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 550: /* simple_statement: demote_statement */ +#line 3612 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10329 "MachineIndependent/glslang_tab.cpp" break; - case 529: -#line 3494 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 551: /* demote_statement: DEMOTE SEMICOLON */ +#line 3618 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "demote"); parseContext.requireExtensions((yyvsp[-1].lex).loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDemote, (yyvsp[-1].lex).loc); } -#line 9754 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10339 "MachineIndependent/glslang_tab.cpp" break; - case 530: -#line 3503 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = 0; } -#line 9760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 552: /* compound_statement: LEFT_BRACE RIGHT_BRACE */ +#line 3627 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; } +#line 10345 "MachineIndependent/glslang_tab.cpp" break; - case 531: -#line 3504 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 553: /* $@5: %empty */ +#line 3628 "MachineIndependent/glslang.y" + { parseContext.symbolTable.push(); ++parseContext.statementNestingLevel; } -#line 9769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10354 "MachineIndependent/glslang_tab.cpp" break; - case 532: -#line 3508 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 554: /* $@6: %empty */ +#line 3632 "MachineIndependent/glslang.y" + { parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); --parseContext.statementNestingLevel; } -#line 9778 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10363 "MachineIndependent/glslang_tab.cpp" break; - case 533: -#line 3512 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 555: /* compound_statement: LEFT_BRACE $@5 statement_list $@6 RIGHT_BRACE */ +#line 3636 "MachineIndependent/glslang.y" + { if ((yyvsp[-2].interm.intermNode) && (yyvsp[-2].interm.intermNode)->getAsAggregate()) (yyvsp[-2].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); (yyval.interm.intermNode) = (yyvsp[-2].interm.intermNode); } -#line 9788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10373 "MachineIndependent/glslang_tab.cpp" break; - case 534: -#line 3520 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9794 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 556: /* statement_no_new_scope: compound_statement_no_new_scope */ +#line 3644 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10379 "MachineIndependent/glslang_tab.cpp" break; - case 535: -#line 3521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9800 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 557: /* statement_no_new_scope: simple_statement */ +#line 3645 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 10385 "MachineIndependent/glslang_tab.cpp" break; - case 536: -#line 3525 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 558: /* $@7: %empty */ +#line 3649 "MachineIndependent/glslang.y" + { ++parseContext.controlFlowNestingLevel; } -#line 9808 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10393 "MachineIndependent/glslang_tab.cpp" break; - case 537: -#line 3528 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 559: /* statement_scoped: $@7 compound_statement */ +#line 3652 "MachineIndependent/glslang.y" + { --parseContext.controlFlowNestingLevel; (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9817 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10402 "MachineIndependent/glslang_tab.cpp" break; - case 538: -#line 3532 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 560: /* $@8: %empty */ +#line 3656 "MachineIndependent/glslang.y" + { parseContext.symbolTable.push(); ++parseContext.statementNestingLevel; ++parseContext.controlFlowNestingLevel; } -#line 9827 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10412 "MachineIndependent/glslang_tab.cpp" break; - case 539: -#line 3537 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 561: /* statement_scoped: $@8 simple_statement */ +#line 3661 "MachineIndependent/glslang.y" + { parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); --parseContext.statementNestingLevel; --parseContext.controlFlowNestingLevel; (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9838 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10423 "MachineIndependent/glslang_tab.cpp" break; - case 540: -#line 3546 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 562: /* compound_statement_no_new_scope: LEFT_BRACE RIGHT_BRACE */ +#line 3670 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; } -#line 9846 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10431 "MachineIndependent/glslang_tab.cpp" break; - case 541: -#line 3549 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 563: /* compound_statement_no_new_scope: LEFT_BRACE statement_list RIGHT_BRACE */ +#line 3673 "MachineIndependent/glslang.y" + { if ((yyvsp[-1].interm.intermNode) && (yyvsp[-1].interm.intermNode)->getAsAggregate()) (yyvsp[-1].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); (yyval.interm.intermNode) = (yyvsp[-1].interm.intermNode); } -#line 9856 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10441 "MachineIndependent/glslang_tab.cpp" break; - case 542: -#line 3557 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 564: /* statement_list: statement */ +#line 3681 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[0].interm.intermNode)); if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { @@ -9865,12 +10450,12 @@ yyreduce: (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case } } -#line 9869 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10454 "MachineIndependent/glslang_tab.cpp" break; - case 543: -#line 3565 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 565: /* statement_list: statement_list statement */ +#line 3689 "MachineIndependent/glslang.y" + { if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { parseContext.wrapupSwitchSubsequence((yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0, (yyvsp[0].interm.intermNode)); @@ -9878,77 +10463,77 @@ yyreduce: } else (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); } -#line 9882 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10467 "MachineIndependent/glslang_tab.cpp" break; - case 544: -#line 3576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = 0; } -#line 9888 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 566: /* expression_statement: SEMICOLON */ +#line 3700 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; } +#line 10473 "MachineIndependent/glslang_tab.cpp" break; - case 545: -#line 3577 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { (yyval.interm.intermNode) = static_cast<TIntermNode*>((yyvsp[-1].interm.intermTypedNode)); } -#line 9894 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + case 567: /* expression_statement: expression SEMICOLON */ +#line 3701 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = static_cast<TIntermNode*>((yyvsp[-1].interm.intermTypedNode)); } +#line 10479 "MachineIndependent/glslang_tab.cpp" break; - case 546: -#line 3581 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 568: /* selection_statement: selection_statement_nonattributed */ +#line 3705 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9902 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10487 "MachineIndependent/glslang_tab.cpp" break; - case 547: -#line 3585 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 569: /* selection_statement: attribute selection_statement_nonattributed */ +#line 3709 "MachineIndependent/glslang.y" + { parseContext.handleSelectionAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10496 "MachineIndependent/glslang_tab.cpp" break; - case 548: -#line 3592 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 570: /* selection_statement_nonattributed: IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement */ +#line 3716 "MachineIndependent/glslang.y" + { parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-2].interm.intermTypedNode)); (yyval.interm.intermNode) = parseContext.intermediate.addSelection((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.nodePair), (yyvsp[-4].lex).loc); } -#line 9920 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10505 "MachineIndependent/glslang_tab.cpp" break; - case 549: -#line 3599 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 571: /* selection_rest_statement: statement_scoped ELSE statement_scoped */ +#line 3723 "MachineIndependent/glslang.y" + { (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermNode); (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermNode); } -#line 9929 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10514 "MachineIndependent/glslang_tab.cpp" break; - case 550: -#line 3603 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 572: /* selection_rest_statement: statement_scoped */ +#line 3727 "MachineIndependent/glslang.y" + { (yyval.interm.nodePair).node1 = (yyvsp[0].interm.intermNode); (yyval.interm.nodePair).node2 = 0; } -#line 9938 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10523 "MachineIndependent/glslang_tab.cpp" break; - case 551: -#line 3611 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 573: /* condition: expression */ +#line 3735 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); parseContext.boolCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode)); } -#line 9947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10532 "MachineIndependent/glslang_tab.cpp" break; - case 552: -#line 3615 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 574: /* condition: fully_specified_type IDENTIFIER EQUAL initializer */ +#line 3739 "MachineIndependent/glslang.y" + { parseContext.boolCheck((yyvsp[-2].lex).loc, (yyvsp[-3].interm.type)); TType type((yyvsp[-3].interm.type)); @@ -9958,29 +10543,29 @@ yyreduce: else (yyval.interm.intermTypedNode) = 0; } -#line 9962 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10547 "MachineIndependent/glslang_tab.cpp" break; - case 553: -#line 3628 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 575: /* switch_statement: switch_statement_nonattributed */ +#line 3752 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9970 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10555 "MachineIndependent/glslang_tab.cpp" break; - case 554: -#line 3632 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 576: /* switch_statement: attribute switch_statement_nonattributed */ +#line 3756 "MachineIndependent/glslang.y" + { parseContext.handleSwitchAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 9979 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10564 "MachineIndependent/glslang_tab.cpp" break; - case 555: -#line 3639 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 577: /* $@9: %empty */ +#line 3763 "MachineIndependent/glslang.y" + { // start new switch sequence on the switch stack ++parseContext.controlFlowNestingLevel; ++parseContext.statementNestingLevel; @@ -9988,12 +10573,12 @@ yyreduce: parseContext.switchLevel.push_back(parseContext.statementNestingLevel); parseContext.symbolTable.push(); } -#line 9992 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10577 "MachineIndependent/glslang_tab.cpp" break; - case 556: -#line 3647 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 578: /* switch_statement_nonattributed: SWITCH LEFT_PAREN expression RIGHT_PAREN $@9 LEFT_BRACE switch_statement_list RIGHT_BRACE */ +#line 3771 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = parseContext.addSwitch((yyvsp[-7].lex).loc, (yyvsp[-5].interm.intermTypedNode), (yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0); delete parseContext.switchSequenceStack.back(); parseContext.switchSequenceStack.pop_back(); @@ -10002,28 +10587,28 @@ yyreduce: --parseContext.statementNestingLevel; --parseContext.controlFlowNestingLevel; } -#line 10006 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10591 "MachineIndependent/glslang_tab.cpp" break; - case 557: -#line 3659 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 579: /* switch_statement_list: %empty */ +#line 3783 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; } -#line 10014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10599 "MachineIndependent/glslang_tab.cpp" break; - case 558: -#line 3662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 580: /* switch_statement_list: statement_list */ +#line 3786 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10022 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10607 "MachineIndependent/glslang_tab.cpp" break; - case 559: -#line 3668 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 581: /* case_label: CASE expression COLON */ +#line 3792 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; if (parseContext.switchLevel.size() == 0) parseContext.error((yyvsp[-2].lex).loc, "cannot appear outside switch statement", "case", ""); @@ -10035,12 +10620,12 @@ yyreduce: (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpCase, (yyvsp[-1].interm.intermTypedNode), (yyvsp[-2].lex).loc); } } -#line 10039 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10624 "MachineIndependent/glslang_tab.cpp" break; - case 560: -#line 3680 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 582: /* case_label: DEFAULT COLON */ +#line 3804 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = 0; if (parseContext.switchLevel.size() == 0) parseContext.error((yyvsp[-1].lex).loc, "cannot appear outside switch statement", "default", ""); @@ -10049,29 +10634,29 @@ yyreduce: else (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDefault, (yyvsp[-1].lex).loc); } -#line 10053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10638 "MachineIndependent/glslang_tab.cpp" break; - case 561: -#line 3692 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 583: /* iteration_statement: iteration_statement_nonattributed */ +#line 3816 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10061 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10646 "MachineIndependent/glslang_tab.cpp" break; - case 562: -#line 3696 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 584: /* iteration_statement: attribute iteration_statement_nonattributed */ +#line 3820 "MachineIndependent/glslang.y" + { parseContext.handleLoopAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10655 "MachineIndependent/glslang_tab.cpp" break; - case 563: -#line 3703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 585: /* $@10: %empty */ +#line 3827 "MachineIndependent/glslang.y" + { if (! parseContext.limits.whileLoops) parseContext.error((yyvsp[-1].lex).loc, "while loops not available", "limitation", ""); parseContext.symbolTable.push(); @@ -10079,34 +10664,34 @@ yyreduce: ++parseContext.statementNestingLevel; ++parseContext.controlFlowNestingLevel; } -#line 10083 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10668 "MachineIndependent/glslang_tab.cpp" break; - case 564: -#line 3711 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 586: /* iteration_statement_nonattributed: WHILE LEFT_PAREN $@10 condition RIGHT_PAREN statement_no_new_scope */ +#line 3835 "MachineIndependent/glslang.y" + { parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, true, (yyvsp[-5].lex).loc); --parseContext.loopNestingLevel; --parseContext.statementNestingLevel; --parseContext.controlFlowNestingLevel; } -#line 10095 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10680 "MachineIndependent/glslang_tab.cpp" break; - case 565: -#line 3718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 587: /* $@11: %empty */ +#line 3842 "MachineIndependent/glslang.y" + { ++parseContext.loopNestingLevel; ++parseContext.statementNestingLevel; ++parseContext.controlFlowNestingLevel; } -#line 10105 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10690 "MachineIndependent/glslang_tab.cpp" break; - case 566: -#line 3723 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 588: /* iteration_statement_nonattributed: DO $@11 statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON */ +#line 3847 "MachineIndependent/glslang.y" + { if (! parseContext.limits.whileLoops) parseContext.error((yyvsp[-7].lex).loc, "do-while loops not available", "limitation", ""); @@ -10117,23 +10702,23 @@ yyreduce: --parseContext.statementNestingLevel; --parseContext.controlFlowNestingLevel; } -#line 10121 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10706 "MachineIndependent/glslang_tab.cpp" break; - case 567: -#line 3734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 589: /* $@12: %empty */ +#line 3858 "MachineIndependent/glslang.y" + { parseContext.symbolTable.push(); ++parseContext.loopNestingLevel; ++parseContext.statementNestingLevel; ++parseContext.controlFlowNestingLevel; } -#line 10132 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10717 "MachineIndependent/glslang_tab.cpp" break; - case 568: -#line 3740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 590: /* iteration_statement_nonattributed: FOR LEFT_PAREN $@12 for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope */ +#line 3864 "MachineIndependent/glslang.y" + { parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[-3].interm.intermNode), (yyvsp[-5].lex).loc); TIntermLoop* forLoop = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node1), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node2), true, (yyvsp[-6].lex).loc); @@ -10145,157 +10730,184 @@ yyreduce: --parseContext.statementNestingLevel; --parseContext.controlFlowNestingLevel; } -#line 10149 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10734 "MachineIndependent/glslang_tab.cpp" break; - case 569: -#line 3755 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 591: /* for_init_statement: expression_statement */ +#line 3879 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10742 "MachineIndependent/glslang_tab.cpp" break; - case 570: -#line 3758 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 592: /* for_init_statement: declaration_statement */ +#line 3882 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10165 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10750 "MachineIndependent/glslang_tab.cpp" break; - case 571: -#line 3764 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 593: /* conditionopt: condition */ +#line 3888 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } -#line 10173 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10758 "MachineIndependent/glslang_tab.cpp" break; - case 572: -#line 3767 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 594: /* conditionopt: %empty */ +#line 3891 "MachineIndependent/glslang.y" + { (yyval.interm.intermTypedNode) = 0; } -#line 10181 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10766 "MachineIndependent/glslang_tab.cpp" break; - case 573: -#line 3773 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 595: /* for_rest_statement: conditionopt SEMICOLON */ +#line 3897 "MachineIndependent/glslang.y" + { (yyval.interm.nodePair).node1 = (yyvsp[-1].interm.intermTypedNode); (yyval.interm.nodePair).node2 = 0; } -#line 10190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10775 "MachineIndependent/glslang_tab.cpp" break; - case 574: -#line 3777 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 596: /* for_rest_statement: conditionopt SEMICOLON expression */ +#line 3901 "MachineIndependent/glslang.y" + { (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermTypedNode); (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermTypedNode); } -#line 10199 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10784 "MachineIndependent/glslang_tab.cpp" break; - case 575: -#line 3784 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 597: /* jump_statement: CONTINUE SEMICOLON */ +#line 3908 "MachineIndependent/glslang.y" + { if (parseContext.loopNestingLevel <= 0) parseContext.error((yyvsp[-1].lex).loc, "continue statement only allowed in loops", "", ""); (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpContinue, (yyvsp[-1].lex).loc); } -#line 10209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10794 "MachineIndependent/glslang_tab.cpp" break; - case 576: -#line 3789 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 598: /* jump_statement: BREAK SEMICOLON */ +#line 3913 "MachineIndependent/glslang.y" + { if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) parseContext.error((yyvsp[-1].lex).loc, "break statement only allowed in switch and loops", "", ""); (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpBreak, (yyvsp[-1].lex).loc); } -#line 10219 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10804 "MachineIndependent/glslang_tab.cpp" break; - case 577: -#line 3794 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 599: /* jump_statement: RETURN SEMICOLON */ +#line 3918 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpReturn, (yyvsp[-1].lex).loc); if (parseContext.currentFunctionType->getBasicType() != EbtVoid) parseContext.error((yyvsp[-1].lex).loc, "non-void function must return a value", "return", ""); if (parseContext.inMain) parseContext.postEntryPointReturn = true; } -#line 10231 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10816 "MachineIndependent/glslang_tab.cpp" break; - case 578: -#line 3801 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 600: /* jump_statement: RETURN expression SEMICOLON */ +#line 3925 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = parseContext.handleReturnValue((yyvsp[-2].lex).loc, (yyvsp[-1].interm.intermTypedNode)); } -#line 10239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10824 "MachineIndependent/glslang_tab.cpp" break; - case 579: -#line 3804 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 601: /* jump_statement: DISCARD SEMICOLON */ +#line 3928 "MachineIndependent/glslang.y" + { parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "discard"); (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpKill, (yyvsp[-1].lex).loc); } -#line 10248 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10833 "MachineIndependent/glslang_tab.cpp" break; - case 580: -#line 3813 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 602: /* jump_statement: TERMINATE_INVOCATION SEMICOLON */ +#line 3932 "MachineIndependent/glslang.y" + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "terminateInvocation"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpTerminateInvocation, (yyvsp[-1].lex).loc); + } +#line 10842 "MachineIndependent/glslang_tab.cpp" + break; + + case 603: /* jump_statement: TERMINATE_RAY SEMICOLON */ +#line 3937 "MachineIndependent/glslang.y" + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangAnyHit, "terminateRayEXT"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpTerminateRayKHR, (yyvsp[-1].lex).loc); + } +#line 10851 "MachineIndependent/glslang_tab.cpp" + break; + + case 604: /* jump_statement: IGNORE_INTERSECTION SEMICOLON */ +#line 3941 "MachineIndependent/glslang.y" + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangAnyHit, "ignoreIntersectionEXT"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpIgnoreIntersectionKHR, (yyvsp[-1].lex).loc); + } +#line 10860 "MachineIndependent/glslang_tab.cpp" + break; + + case 605: /* translation_unit: external_declaration */ +#line 3951 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); } -#line 10257 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10869 "MachineIndependent/glslang_tab.cpp" break; - case 581: -#line 3817 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 606: /* translation_unit: translation_unit external_declaration */ +#line 3955 "MachineIndependent/glslang.y" + { if ((yyvsp[0].interm.intermNode) != nullptr) { (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); } } -#line 10268 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10880 "MachineIndependent/glslang_tab.cpp" break; - case 582: -#line 3826 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 607: /* external_declaration: function_definition */ +#line 3964 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10888 "MachineIndependent/glslang_tab.cpp" break; - case 583: -#line 3829 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 608: /* external_declaration: declaration */ +#line 3967 "MachineIndependent/glslang.y" + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } -#line 10284 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10896 "MachineIndependent/glslang_tab.cpp" break; - case 584: -#line 3833 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 609: /* external_declaration: SEMICOLON */ +#line 3971 "MachineIndependent/glslang.y" + { parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "extraneous semicolon"); parseContext.profileRequires((yyvsp[0].lex).loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); (yyval.interm.intermNode) = nullptr; } -#line 10294 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10906 "MachineIndependent/glslang_tab.cpp" break; - case 585: -#line 3842 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 610: /* $@13: %empty */ +#line 3980 "MachineIndependent/glslang.y" + { (yyvsp[0].interm).function = parseContext.handleFunctionDeclarator((yyvsp[0].interm).loc, *(yyvsp[0].interm).function, false /* not prototype */); (yyvsp[0].interm).intermNode = parseContext.handleFunctionDefinition((yyvsp[0].interm).loc, *(yyvsp[0].interm).function); @@ -10307,12 +10919,12 @@ yyreduce: ++parseContext.statementNestingLevel; } } -#line 10311 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10923 "MachineIndependent/glslang_tab.cpp" break; - case 586: -#line 3854 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 611: /* function_definition: function_prototype $@13 compound_statement_no_new_scope */ +#line 3992 "MachineIndependent/glslang.y" + { // May be best done as post process phase on intermediate code if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) parseContext.error((yyvsp[-2].interm).loc, "function does not return a value:", "", (yyvsp[-2].interm).function->getName().c_str()); @@ -10338,52 +10950,53 @@ yyreduce: --parseContext.statementNestingLevel; } } -#line 10342 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10954 "MachineIndependent/glslang_tab.cpp" break; - case 587: -#line 3884 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 612: /* attribute: LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET */ +#line 4022 "MachineIndependent/glslang.y" + { (yyval.interm.attributes) = (yyvsp[-2].interm.attributes); parseContext.requireExtensions((yyvsp[-4].lex).loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); } -#line 10351 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10963 "MachineIndependent/glslang_tab.cpp" break; - case 588: -#line 3890 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 613: /* attribute_list: single_attribute */ +#line 4028 "MachineIndependent/glslang.y" + { (yyval.interm.attributes) = (yyvsp[0].interm.attributes); } -#line 10359 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10971 "MachineIndependent/glslang_tab.cpp" break; - case 589: -#line 3893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 614: /* attribute_list: attribute_list COMMA single_attribute */ +#line 4031 "MachineIndependent/glslang.y" + { (yyval.interm.attributes) = parseContext.mergeAttributes((yyvsp[-2].interm.attributes), (yyvsp[0].interm.attributes)); } -#line 10367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10979 "MachineIndependent/glslang_tab.cpp" break; - case 590: -#line 3898 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 615: /* single_attribute: IDENTIFIER */ +#line 4036 "MachineIndependent/glslang.y" + { (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[0].lex).string); } -#line 10375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10987 "MachineIndependent/glslang_tab.cpp" break; - case 591: -#line 3901 "MachineIndependent/glslang.y" /* yacc.c:1646 */ - { + case 616: /* single_attribute: IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN */ +#line 4039 "MachineIndependent/glslang.y" + { (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[-3].lex).string, (yyvsp[-1].interm.intermTypedNode)); } -#line 10383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10995 "MachineIndependent/glslang_tab.cpp" break; -#line 10387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ +#line 10999 "MachineIndependent/glslang_tab.cpp" + default: break; } /* User semantic actions sometimes alter yychar, and that requires @@ -10397,25 +11010,23 @@ yyreduce: case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ - YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); + YY_SYMBOL_PRINT ("-> $$ =", YY_CAST (yysymbol_kind_t, yyr1[yyn]), &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; - YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ - - yyn = yyr1[yyn]; - - yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; - if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) - yystate = yytable[yystate]; - else - yystate = yydefgoto[yyn - YYNTOKENS]; + { + const int yylhs = yyr1[yyn] - YYNTOKENS; + const int yyi = yypgoto[yylhs] + *yyssp; + yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp + ? yytable[yyi] + : yydefgoto[yylhs]); + } goto yynewstate; @@ -10426,50 +11037,44 @@ yyreduce: yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ - yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); - + yytoken = yychar == YYEMPTY ? YYSYMBOL_YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; -#if ! YYERROR_VERBOSE - yyerror (pParseContext, YY_("syntax error")); -#else -# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ - yyssp, yytoken) { + yypcontext_t yyctx + = {yyssp, yytoken}; char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; - yysyntax_error_status = YYSYNTAX_ERROR; + yysyntax_error_status = yysyntax_error (&yymsg_alloc, &yymsg, &yyctx); if (yysyntax_error_status == 0) yymsgp = yymsg; - else if (yysyntax_error_status == 1) + else if (yysyntax_error_status == -1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); - yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); - if (!yymsg) + yymsg = YY_CAST (char *, + YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc))); + if (yymsg) { - yymsg = yymsgbuf; - yymsg_alloc = sizeof yymsgbuf; - yysyntax_error_status = 2; + yysyntax_error_status + = yysyntax_error (&yymsg_alloc, &yymsg, &yyctx); + yymsgp = yymsg; } else { - yysyntax_error_status = YYSYNTAX_ERROR; - yymsgp = yymsg; + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + yysyntax_error_status = YYENOMEM; } } yyerror (pParseContext, yymsgp); - if (yysyntax_error_status == 2) + if (yysyntax_error_status == YYENOMEM) goto yyexhaustedlab; } -# undef YYSYNTAX_ERROR -#endif } - - if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an @@ -10498,12 +11103,10 @@ yyerrlab: | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: - - /* Pacify compilers like GCC when the user code never invokes - YYERROR and the label yyerrorlab therefore never appears in user - code. */ - if (/*CONSTCOND*/ 0) - goto yyerrorlab; + /* Pacify compilers when the user code never invokes YYERROR and the + label yyerrorlab therefore never appears in user code. */ + if (0) + YYERROR; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ @@ -10520,13 +11123,14 @@ yyerrorlab: yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ + /* Pop stack until we find a state that shifts the error token. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) + yyn += YYSYMBOL_YYerror; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYSYMBOL_YYerror) { yyn = yytable[yyn]; if (0 < yyn) @@ -10540,7 +11144,7 @@ yyerrlab1: yydestruct ("Error: popping", - yystos[yystate], yyvsp, pParseContext); + YY_ACCESSING_SYMBOL (yystate), yyvsp, pParseContext); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); @@ -10552,7 +11156,7 @@ yyerrlab1: /* Shift the error token. */ - YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); + YY_SYMBOL_PRINT ("Shifting", YY_ACCESSING_SYMBOL (yyn), yyvsp, yylsp); yystate = yyn; goto yynewstate; @@ -10565,6 +11169,7 @@ yyacceptlab: yyresult = 0; goto yyreturn; + /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ @@ -10572,16 +11177,21 @@ yyabortlab: yyresult = 1; goto yyreturn; -#if !defined yyoverflow || YYERROR_VERBOSE + +#if 1 /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (pParseContext, YY_("memory exhausted")); yyresult = 2; - /* Fall through. */ + goto yyreturn; #endif + +/*-------------------------------------------------------. +| yyreturn -- parsing is finished, clean up and return. | +`-------------------------------------------------------*/ yyreturn: if (yychar != YYEMPTY) { @@ -10598,18 +11208,17 @@ yyreturn: while (yyssp != yyss) { yydestruct ("Cleanup: popping", - yystos[*yyssp], yyvsp, pParseContext); + YY_ACCESSING_SYMBOL (+*yyssp), yyvsp, pParseContext); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif -#if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); -#endif return yyresult; } -#line 3906 "MachineIndependent/glslang.y" /* yacc.c:1906 */ + +#line 4044 "MachineIndependent/glslang.y" diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h index 78b72a246b..d6bc00d9e8 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h +++ b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h @@ -1,8 +1,9 @@ -/* A Bison parser, made by GNU Bison 3.0.4. */ +/* A Bison parser, made by GNU Bison 3.7.4. */ /* Bison interface for Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation, + Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,6 +31,10 @@ This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ +/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual, + especially those whose name start with YY_ or yy_. They are + private implementation details that can be changed or removed. */ + #ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED # define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED /* Debug traces. */ @@ -40,437 +45,466 @@ extern int yydebug; #endif -/* Token type. */ +/* Token kinds. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE enum yytokentype { - CONST = 258, - BOOL = 259, - INT = 260, - UINT = 261, - FLOAT = 262, - BVEC2 = 263, - BVEC3 = 264, - BVEC4 = 265, - IVEC2 = 266, - IVEC3 = 267, - IVEC4 = 268, - UVEC2 = 269, - UVEC3 = 270, - UVEC4 = 271, - VEC2 = 272, - VEC3 = 273, - VEC4 = 274, - MAT2 = 275, - MAT3 = 276, - MAT4 = 277, - MAT2X2 = 278, - MAT2X3 = 279, - MAT2X4 = 280, - MAT3X2 = 281, - MAT3X3 = 282, - MAT3X4 = 283, - MAT4X2 = 284, - MAT4X3 = 285, - MAT4X4 = 286, - SAMPLER2D = 287, - SAMPLER3D = 288, - SAMPLERCUBE = 289, - SAMPLER2DSHADOW = 290, - SAMPLERCUBESHADOW = 291, - SAMPLER2DARRAY = 292, - SAMPLER2DARRAYSHADOW = 293, - ISAMPLER2D = 294, - ISAMPLER3D = 295, - ISAMPLERCUBE = 296, - ISAMPLER2DARRAY = 297, - USAMPLER2D = 298, - USAMPLER3D = 299, - USAMPLERCUBE = 300, - USAMPLER2DARRAY = 301, - SAMPLER = 302, - SAMPLERSHADOW = 303, - TEXTURE2D = 304, - TEXTURE3D = 305, - TEXTURECUBE = 306, - TEXTURE2DARRAY = 307, - ITEXTURE2D = 308, - ITEXTURE3D = 309, - ITEXTURECUBE = 310, - ITEXTURE2DARRAY = 311, - UTEXTURE2D = 312, - UTEXTURE3D = 313, - UTEXTURECUBE = 314, - UTEXTURE2DARRAY = 315, - ATTRIBUTE = 316, - VARYING = 317, - FLOAT16_T = 318, - FLOAT32_T = 319, - DOUBLE = 320, - FLOAT64_T = 321, - INT64_T = 322, - UINT64_T = 323, - INT32_T = 324, - UINT32_T = 325, - INT16_T = 326, - UINT16_T = 327, - INT8_T = 328, - UINT8_T = 329, - I64VEC2 = 330, - I64VEC3 = 331, - I64VEC4 = 332, - U64VEC2 = 333, - U64VEC3 = 334, - U64VEC4 = 335, - I32VEC2 = 336, - I32VEC3 = 337, - I32VEC4 = 338, - U32VEC2 = 339, - U32VEC3 = 340, - U32VEC4 = 341, - I16VEC2 = 342, - I16VEC3 = 343, - I16VEC4 = 344, - U16VEC2 = 345, - U16VEC3 = 346, - U16VEC4 = 347, - I8VEC2 = 348, - I8VEC3 = 349, - I8VEC4 = 350, - U8VEC2 = 351, - U8VEC3 = 352, - U8VEC4 = 353, - DVEC2 = 354, - DVEC3 = 355, - DVEC4 = 356, - DMAT2 = 357, - DMAT3 = 358, - DMAT4 = 359, - F16VEC2 = 360, - F16VEC3 = 361, - F16VEC4 = 362, - F16MAT2 = 363, - F16MAT3 = 364, - F16MAT4 = 365, - F32VEC2 = 366, - F32VEC3 = 367, - F32VEC4 = 368, - F32MAT2 = 369, - F32MAT3 = 370, - F32MAT4 = 371, - F64VEC2 = 372, - F64VEC3 = 373, - F64VEC4 = 374, - F64MAT2 = 375, - F64MAT3 = 376, - F64MAT4 = 377, - DMAT2X2 = 378, - DMAT2X3 = 379, - DMAT2X4 = 380, - DMAT3X2 = 381, - DMAT3X3 = 382, - DMAT3X4 = 383, - DMAT4X2 = 384, - DMAT4X3 = 385, - DMAT4X4 = 386, - F16MAT2X2 = 387, - F16MAT2X3 = 388, - F16MAT2X4 = 389, - F16MAT3X2 = 390, - F16MAT3X3 = 391, - F16MAT3X4 = 392, - F16MAT4X2 = 393, - F16MAT4X3 = 394, - F16MAT4X4 = 395, - F32MAT2X2 = 396, - F32MAT2X3 = 397, - F32MAT2X4 = 398, - F32MAT3X2 = 399, - F32MAT3X3 = 400, - F32MAT3X4 = 401, - F32MAT4X2 = 402, - F32MAT4X3 = 403, - F32MAT4X4 = 404, - F64MAT2X2 = 405, - F64MAT2X3 = 406, - F64MAT2X4 = 407, - F64MAT3X2 = 408, - F64MAT3X3 = 409, - F64MAT3X4 = 410, - F64MAT4X2 = 411, - F64MAT4X3 = 412, - F64MAT4X4 = 413, - ATOMIC_UINT = 414, - ACCSTRUCTNV = 415, - ACCSTRUCTEXT = 416, - RAYQUERYEXT = 417, - FCOOPMATNV = 418, - ICOOPMATNV = 419, - UCOOPMATNV = 420, - SAMPLERCUBEARRAY = 421, - SAMPLERCUBEARRAYSHADOW = 422, - ISAMPLERCUBEARRAY = 423, - USAMPLERCUBEARRAY = 424, - SAMPLER1D = 425, - SAMPLER1DARRAY = 426, - SAMPLER1DARRAYSHADOW = 427, - ISAMPLER1D = 428, - SAMPLER1DSHADOW = 429, - SAMPLER2DRECT = 430, - SAMPLER2DRECTSHADOW = 431, - ISAMPLER2DRECT = 432, - USAMPLER2DRECT = 433, - SAMPLERBUFFER = 434, - ISAMPLERBUFFER = 435, - USAMPLERBUFFER = 436, - SAMPLER2DMS = 437, - ISAMPLER2DMS = 438, - USAMPLER2DMS = 439, - SAMPLER2DMSARRAY = 440, - ISAMPLER2DMSARRAY = 441, - USAMPLER2DMSARRAY = 442, - SAMPLEREXTERNALOES = 443, - SAMPLEREXTERNAL2DY2YEXT = 444, - ISAMPLER1DARRAY = 445, - USAMPLER1D = 446, - USAMPLER1DARRAY = 447, - F16SAMPLER1D = 448, - F16SAMPLER2D = 449, - F16SAMPLER3D = 450, - F16SAMPLER2DRECT = 451, - F16SAMPLERCUBE = 452, - F16SAMPLER1DARRAY = 453, - F16SAMPLER2DARRAY = 454, - F16SAMPLERCUBEARRAY = 455, - F16SAMPLERBUFFER = 456, - F16SAMPLER2DMS = 457, - F16SAMPLER2DMSARRAY = 458, - F16SAMPLER1DSHADOW = 459, - F16SAMPLER2DSHADOW = 460, - F16SAMPLER1DARRAYSHADOW = 461, - F16SAMPLER2DARRAYSHADOW = 462, - F16SAMPLER2DRECTSHADOW = 463, - F16SAMPLERCUBESHADOW = 464, - F16SAMPLERCUBEARRAYSHADOW = 465, - IMAGE1D = 466, - IIMAGE1D = 467, - UIMAGE1D = 468, - IMAGE2D = 469, - IIMAGE2D = 470, - UIMAGE2D = 471, - IMAGE3D = 472, - IIMAGE3D = 473, - UIMAGE3D = 474, - IMAGE2DRECT = 475, - IIMAGE2DRECT = 476, - UIMAGE2DRECT = 477, - IMAGECUBE = 478, - IIMAGECUBE = 479, - UIMAGECUBE = 480, - IMAGEBUFFER = 481, - IIMAGEBUFFER = 482, - UIMAGEBUFFER = 483, - IMAGE1DARRAY = 484, - IIMAGE1DARRAY = 485, - UIMAGE1DARRAY = 486, - IMAGE2DARRAY = 487, - IIMAGE2DARRAY = 488, - UIMAGE2DARRAY = 489, - IMAGECUBEARRAY = 490, - IIMAGECUBEARRAY = 491, - UIMAGECUBEARRAY = 492, - IMAGE2DMS = 493, - IIMAGE2DMS = 494, - UIMAGE2DMS = 495, - IMAGE2DMSARRAY = 496, - IIMAGE2DMSARRAY = 497, - UIMAGE2DMSARRAY = 498, - F16IMAGE1D = 499, - F16IMAGE2D = 500, - F16IMAGE3D = 501, - F16IMAGE2DRECT = 502, - F16IMAGECUBE = 503, - F16IMAGE1DARRAY = 504, - F16IMAGE2DARRAY = 505, - F16IMAGECUBEARRAY = 506, - F16IMAGEBUFFER = 507, - F16IMAGE2DMS = 508, - F16IMAGE2DMSARRAY = 509, - TEXTURECUBEARRAY = 510, - ITEXTURECUBEARRAY = 511, - UTEXTURECUBEARRAY = 512, - TEXTURE1D = 513, - ITEXTURE1D = 514, - UTEXTURE1D = 515, - TEXTURE1DARRAY = 516, - ITEXTURE1DARRAY = 517, - UTEXTURE1DARRAY = 518, - TEXTURE2DRECT = 519, - ITEXTURE2DRECT = 520, - UTEXTURE2DRECT = 521, - TEXTUREBUFFER = 522, - ITEXTUREBUFFER = 523, - UTEXTUREBUFFER = 524, - TEXTURE2DMS = 525, - ITEXTURE2DMS = 526, - UTEXTURE2DMS = 527, - TEXTURE2DMSARRAY = 528, - ITEXTURE2DMSARRAY = 529, - UTEXTURE2DMSARRAY = 530, - F16TEXTURE1D = 531, - F16TEXTURE2D = 532, - F16TEXTURE3D = 533, - F16TEXTURE2DRECT = 534, - F16TEXTURECUBE = 535, - F16TEXTURE1DARRAY = 536, - F16TEXTURE2DARRAY = 537, - F16TEXTURECUBEARRAY = 538, - F16TEXTUREBUFFER = 539, - F16TEXTURE2DMS = 540, - F16TEXTURE2DMSARRAY = 541, - SUBPASSINPUT = 542, - SUBPASSINPUTMS = 543, - ISUBPASSINPUT = 544, - ISUBPASSINPUTMS = 545, - USUBPASSINPUT = 546, - USUBPASSINPUTMS = 547, - F16SUBPASSINPUT = 548, - F16SUBPASSINPUTMS = 549, - LEFT_OP = 550, - RIGHT_OP = 551, - INC_OP = 552, - DEC_OP = 553, - LE_OP = 554, - GE_OP = 555, - EQ_OP = 556, - NE_OP = 557, - AND_OP = 558, - OR_OP = 559, - XOR_OP = 560, - MUL_ASSIGN = 561, - DIV_ASSIGN = 562, - ADD_ASSIGN = 563, - MOD_ASSIGN = 564, - LEFT_ASSIGN = 565, - RIGHT_ASSIGN = 566, - AND_ASSIGN = 567, - XOR_ASSIGN = 568, - OR_ASSIGN = 569, - SUB_ASSIGN = 570, - STRING_LITERAL = 571, - LEFT_PAREN = 572, - RIGHT_PAREN = 573, - LEFT_BRACKET = 574, - RIGHT_BRACKET = 575, - LEFT_BRACE = 576, - RIGHT_BRACE = 577, - DOT = 578, - COMMA = 579, - COLON = 580, - EQUAL = 581, - SEMICOLON = 582, - BANG = 583, - DASH = 584, - TILDE = 585, - PLUS = 586, - STAR = 587, - SLASH = 588, - PERCENT = 589, - LEFT_ANGLE = 590, - RIGHT_ANGLE = 591, - VERTICAL_BAR = 592, - CARET = 593, - AMPERSAND = 594, - QUESTION = 595, - INVARIANT = 596, - HIGH_PRECISION = 597, - MEDIUM_PRECISION = 598, - LOW_PRECISION = 599, - PRECISION = 600, - PACKED = 601, - RESOURCE = 602, - SUPERP = 603, - FLOATCONSTANT = 604, - INTCONSTANT = 605, - UINTCONSTANT = 606, - BOOLCONSTANT = 607, - IDENTIFIER = 608, - TYPE_NAME = 609, - CENTROID = 610, - IN = 611, - OUT = 612, - INOUT = 613, - STRUCT = 614, - VOID = 615, - WHILE = 616, - BREAK = 617, - CONTINUE = 618, - DO = 619, - ELSE = 620, - FOR = 621, - IF = 622, - DISCARD = 623, - RETURN = 624, - SWITCH = 625, - CASE = 626, - DEFAULT = 627, - UNIFORM = 628, - SHARED = 629, - BUFFER = 630, - FLAT = 631, - SMOOTH = 632, - LAYOUT = 633, - DOUBLECONSTANT = 634, - INT16CONSTANT = 635, - UINT16CONSTANT = 636, - FLOAT16CONSTANT = 637, - INT32CONSTANT = 638, - UINT32CONSTANT = 639, - INT64CONSTANT = 640, - UINT64CONSTANT = 641, - SUBROUTINE = 642, - DEMOTE = 643, - PAYLOADNV = 644, - PAYLOADINNV = 645, - HITATTRNV = 646, - CALLDATANV = 647, - CALLDATAINNV = 648, - PAYLOADEXT = 649, - PAYLOADINEXT = 650, - HITATTREXT = 651, - CALLDATAEXT = 652, - CALLDATAINEXT = 653, - PATCH = 654, - SAMPLE = 655, - NONUNIFORM = 656, - COHERENT = 657, - VOLATILE = 658, - RESTRICT = 659, - READONLY = 660, - WRITEONLY = 661, - DEVICECOHERENT = 662, - QUEUEFAMILYCOHERENT = 663, - WORKGROUPCOHERENT = 664, - SUBGROUPCOHERENT = 665, - NONPRIVATE = 666, - SHADERCALLCOHERENT = 667, - NOPERSPECTIVE = 668, - EXPLICITINTERPAMD = 669, - PERVERTEXNV = 670, - PERPRIMITIVENV = 671, - PERVIEWNV = 672, - PERTASKNV = 673, - PRECISE = 674 + YYEMPTY = -2, + YYEOF = 0, /* "end of file" */ + YYerror = 256, /* error */ + YYUNDEF = 257, /* "invalid token" */ + CONST = 258, /* CONST */ + BOOL = 259, /* BOOL */ + INT = 260, /* INT */ + UINT = 261, /* UINT */ + FLOAT = 262, /* FLOAT */ + BVEC2 = 263, /* BVEC2 */ + BVEC3 = 264, /* BVEC3 */ + BVEC4 = 265, /* BVEC4 */ + IVEC2 = 266, /* IVEC2 */ + IVEC3 = 267, /* IVEC3 */ + IVEC4 = 268, /* IVEC4 */ + UVEC2 = 269, /* UVEC2 */ + UVEC3 = 270, /* UVEC3 */ + UVEC4 = 271, /* UVEC4 */ + VEC2 = 272, /* VEC2 */ + VEC3 = 273, /* VEC3 */ + VEC4 = 274, /* VEC4 */ + MAT2 = 275, /* MAT2 */ + MAT3 = 276, /* MAT3 */ + MAT4 = 277, /* MAT4 */ + MAT2X2 = 278, /* MAT2X2 */ + MAT2X3 = 279, /* MAT2X3 */ + MAT2X4 = 280, /* MAT2X4 */ + MAT3X2 = 281, /* MAT3X2 */ + MAT3X3 = 282, /* MAT3X3 */ + MAT3X4 = 283, /* MAT3X4 */ + MAT4X2 = 284, /* MAT4X2 */ + MAT4X3 = 285, /* MAT4X3 */ + MAT4X4 = 286, /* MAT4X4 */ + SAMPLER2D = 287, /* SAMPLER2D */ + SAMPLER3D = 288, /* SAMPLER3D */ + SAMPLERCUBE = 289, /* SAMPLERCUBE */ + SAMPLER2DSHADOW = 290, /* SAMPLER2DSHADOW */ + SAMPLERCUBESHADOW = 291, /* SAMPLERCUBESHADOW */ + SAMPLER2DARRAY = 292, /* SAMPLER2DARRAY */ + SAMPLER2DARRAYSHADOW = 293, /* SAMPLER2DARRAYSHADOW */ + ISAMPLER2D = 294, /* ISAMPLER2D */ + ISAMPLER3D = 295, /* ISAMPLER3D */ + ISAMPLERCUBE = 296, /* ISAMPLERCUBE */ + ISAMPLER2DARRAY = 297, /* ISAMPLER2DARRAY */ + USAMPLER2D = 298, /* USAMPLER2D */ + USAMPLER3D = 299, /* USAMPLER3D */ + USAMPLERCUBE = 300, /* USAMPLERCUBE */ + USAMPLER2DARRAY = 301, /* USAMPLER2DARRAY */ + SAMPLER = 302, /* SAMPLER */ + SAMPLERSHADOW = 303, /* SAMPLERSHADOW */ + TEXTURE2D = 304, /* TEXTURE2D */ + TEXTURE3D = 305, /* TEXTURE3D */ + TEXTURECUBE = 306, /* TEXTURECUBE */ + TEXTURE2DARRAY = 307, /* TEXTURE2DARRAY */ + ITEXTURE2D = 308, /* ITEXTURE2D */ + ITEXTURE3D = 309, /* ITEXTURE3D */ + ITEXTURECUBE = 310, /* ITEXTURECUBE */ + ITEXTURE2DARRAY = 311, /* ITEXTURE2DARRAY */ + UTEXTURE2D = 312, /* UTEXTURE2D */ + UTEXTURE3D = 313, /* UTEXTURE3D */ + UTEXTURECUBE = 314, /* UTEXTURECUBE */ + UTEXTURE2DARRAY = 315, /* UTEXTURE2DARRAY */ + ATTRIBUTE = 316, /* ATTRIBUTE */ + VARYING = 317, /* VARYING */ + FLOAT16_T = 318, /* FLOAT16_T */ + FLOAT32_T = 319, /* FLOAT32_T */ + DOUBLE = 320, /* DOUBLE */ + FLOAT64_T = 321, /* FLOAT64_T */ + INT64_T = 322, /* INT64_T */ + UINT64_T = 323, /* UINT64_T */ + INT32_T = 324, /* INT32_T */ + UINT32_T = 325, /* UINT32_T */ + INT16_T = 326, /* INT16_T */ + UINT16_T = 327, /* UINT16_T */ + INT8_T = 328, /* INT8_T */ + UINT8_T = 329, /* UINT8_T */ + I64VEC2 = 330, /* I64VEC2 */ + I64VEC3 = 331, /* I64VEC3 */ + I64VEC4 = 332, /* I64VEC4 */ + U64VEC2 = 333, /* U64VEC2 */ + U64VEC3 = 334, /* U64VEC3 */ + U64VEC4 = 335, /* U64VEC4 */ + I32VEC2 = 336, /* I32VEC2 */ + I32VEC3 = 337, /* I32VEC3 */ + I32VEC4 = 338, /* I32VEC4 */ + U32VEC2 = 339, /* U32VEC2 */ + U32VEC3 = 340, /* U32VEC3 */ + U32VEC4 = 341, /* U32VEC4 */ + I16VEC2 = 342, /* I16VEC2 */ + I16VEC3 = 343, /* I16VEC3 */ + I16VEC4 = 344, /* I16VEC4 */ + U16VEC2 = 345, /* U16VEC2 */ + U16VEC3 = 346, /* U16VEC3 */ + U16VEC4 = 347, /* U16VEC4 */ + I8VEC2 = 348, /* I8VEC2 */ + I8VEC3 = 349, /* I8VEC3 */ + I8VEC4 = 350, /* I8VEC4 */ + U8VEC2 = 351, /* U8VEC2 */ + U8VEC3 = 352, /* U8VEC3 */ + U8VEC4 = 353, /* U8VEC4 */ + DVEC2 = 354, /* DVEC2 */ + DVEC3 = 355, /* DVEC3 */ + DVEC4 = 356, /* DVEC4 */ + DMAT2 = 357, /* DMAT2 */ + DMAT3 = 358, /* DMAT3 */ + DMAT4 = 359, /* DMAT4 */ + F16VEC2 = 360, /* F16VEC2 */ + F16VEC3 = 361, /* F16VEC3 */ + F16VEC4 = 362, /* F16VEC4 */ + F16MAT2 = 363, /* F16MAT2 */ + F16MAT3 = 364, /* F16MAT3 */ + F16MAT4 = 365, /* F16MAT4 */ + F32VEC2 = 366, /* F32VEC2 */ + F32VEC3 = 367, /* F32VEC3 */ + F32VEC4 = 368, /* F32VEC4 */ + F32MAT2 = 369, /* F32MAT2 */ + F32MAT3 = 370, /* F32MAT3 */ + F32MAT4 = 371, /* F32MAT4 */ + F64VEC2 = 372, /* F64VEC2 */ + F64VEC3 = 373, /* F64VEC3 */ + F64VEC4 = 374, /* F64VEC4 */ + F64MAT2 = 375, /* F64MAT2 */ + F64MAT3 = 376, /* F64MAT3 */ + F64MAT4 = 377, /* F64MAT4 */ + DMAT2X2 = 378, /* DMAT2X2 */ + DMAT2X3 = 379, /* DMAT2X3 */ + DMAT2X4 = 380, /* DMAT2X4 */ + DMAT3X2 = 381, /* DMAT3X2 */ + DMAT3X3 = 382, /* DMAT3X3 */ + DMAT3X4 = 383, /* DMAT3X4 */ + DMAT4X2 = 384, /* DMAT4X2 */ + DMAT4X3 = 385, /* DMAT4X3 */ + DMAT4X4 = 386, /* DMAT4X4 */ + F16MAT2X2 = 387, /* F16MAT2X2 */ + F16MAT2X3 = 388, /* F16MAT2X3 */ + F16MAT2X4 = 389, /* F16MAT2X4 */ + F16MAT3X2 = 390, /* F16MAT3X2 */ + F16MAT3X3 = 391, /* F16MAT3X3 */ + F16MAT3X4 = 392, /* F16MAT3X4 */ + F16MAT4X2 = 393, /* F16MAT4X2 */ + F16MAT4X3 = 394, /* F16MAT4X3 */ + F16MAT4X4 = 395, /* F16MAT4X4 */ + F32MAT2X2 = 396, /* F32MAT2X2 */ + F32MAT2X3 = 397, /* F32MAT2X3 */ + F32MAT2X4 = 398, /* F32MAT2X4 */ + F32MAT3X2 = 399, /* F32MAT3X2 */ + F32MAT3X3 = 400, /* F32MAT3X3 */ + F32MAT3X4 = 401, /* F32MAT3X4 */ + F32MAT4X2 = 402, /* F32MAT4X2 */ + F32MAT4X3 = 403, /* F32MAT4X3 */ + F32MAT4X4 = 404, /* F32MAT4X4 */ + F64MAT2X2 = 405, /* F64MAT2X2 */ + F64MAT2X3 = 406, /* F64MAT2X3 */ + F64MAT2X4 = 407, /* F64MAT2X4 */ + F64MAT3X2 = 408, /* F64MAT3X2 */ + F64MAT3X3 = 409, /* F64MAT3X3 */ + F64MAT3X4 = 410, /* F64MAT3X4 */ + F64MAT4X2 = 411, /* F64MAT4X2 */ + F64MAT4X3 = 412, /* F64MAT4X3 */ + F64MAT4X4 = 413, /* F64MAT4X4 */ + ATOMIC_UINT = 414, /* ATOMIC_UINT */ + ACCSTRUCTNV = 415, /* ACCSTRUCTNV */ + ACCSTRUCTEXT = 416, /* ACCSTRUCTEXT */ + RAYQUERYEXT = 417, /* RAYQUERYEXT */ + FCOOPMATNV = 418, /* FCOOPMATNV */ + ICOOPMATNV = 419, /* ICOOPMATNV */ + UCOOPMATNV = 420, /* UCOOPMATNV */ + SAMPLERCUBEARRAY = 421, /* SAMPLERCUBEARRAY */ + SAMPLERCUBEARRAYSHADOW = 422, /* SAMPLERCUBEARRAYSHADOW */ + ISAMPLERCUBEARRAY = 423, /* ISAMPLERCUBEARRAY */ + USAMPLERCUBEARRAY = 424, /* USAMPLERCUBEARRAY */ + SAMPLER1D = 425, /* SAMPLER1D */ + SAMPLER1DARRAY = 426, /* SAMPLER1DARRAY */ + SAMPLER1DARRAYSHADOW = 427, /* SAMPLER1DARRAYSHADOW */ + ISAMPLER1D = 428, /* ISAMPLER1D */ + SAMPLER1DSHADOW = 429, /* SAMPLER1DSHADOW */ + SAMPLER2DRECT = 430, /* SAMPLER2DRECT */ + SAMPLER2DRECTSHADOW = 431, /* SAMPLER2DRECTSHADOW */ + ISAMPLER2DRECT = 432, /* ISAMPLER2DRECT */ + USAMPLER2DRECT = 433, /* USAMPLER2DRECT */ + SAMPLERBUFFER = 434, /* SAMPLERBUFFER */ + ISAMPLERBUFFER = 435, /* ISAMPLERBUFFER */ + USAMPLERBUFFER = 436, /* USAMPLERBUFFER */ + SAMPLER2DMS = 437, /* SAMPLER2DMS */ + ISAMPLER2DMS = 438, /* ISAMPLER2DMS */ + USAMPLER2DMS = 439, /* USAMPLER2DMS */ + SAMPLER2DMSARRAY = 440, /* SAMPLER2DMSARRAY */ + ISAMPLER2DMSARRAY = 441, /* ISAMPLER2DMSARRAY */ + USAMPLER2DMSARRAY = 442, /* USAMPLER2DMSARRAY */ + SAMPLEREXTERNALOES = 443, /* SAMPLEREXTERNALOES */ + SAMPLEREXTERNAL2DY2YEXT = 444, /* SAMPLEREXTERNAL2DY2YEXT */ + ISAMPLER1DARRAY = 445, /* ISAMPLER1DARRAY */ + USAMPLER1D = 446, /* USAMPLER1D */ + USAMPLER1DARRAY = 447, /* USAMPLER1DARRAY */ + F16SAMPLER1D = 448, /* F16SAMPLER1D */ + F16SAMPLER2D = 449, /* F16SAMPLER2D */ + F16SAMPLER3D = 450, /* F16SAMPLER3D */ + F16SAMPLER2DRECT = 451, /* F16SAMPLER2DRECT */ + F16SAMPLERCUBE = 452, /* F16SAMPLERCUBE */ + F16SAMPLER1DARRAY = 453, /* F16SAMPLER1DARRAY */ + F16SAMPLER2DARRAY = 454, /* F16SAMPLER2DARRAY */ + F16SAMPLERCUBEARRAY = 455, /* F16SAMPLERCUBEARRAY */ + F16SAMPLERBUFFER = 456, /* F16SAMPLERBUFFER */ + F16SAMPLER2DMS = 457, /* F16SAMPLER2DMS */ + F16SAMPLER2DMSARRAY = 458, /* F16SAMPLER2DMSARRAY */ + F16SAMPLER1DSHADOW = 459, /* F16SAMPLER1DSHADOW */ + F16SAMPLER2DSHADOW = 460, /* F16SAMPLER2DSHADOW */ + F16SAMPLER1DARRAYSHADOW = 461, /* F16SAMPLER1DARRAYSHADOW */ + F16SAMPLER2DARRAYSHADOW = 462, /* F16SAMPLER2DARRAYSHADOW */ + F16SAMPLER2DRECTSHADOW = 463, /* F16SAMPLER2DRECTSHADOW */ + F16SAMPLERCUBESHADOW = 464, /* F16SAMPLERCUBESHADOW */ + F16SAMPLERCUBEARRAYSHADOW = 465, /* F16SAMPLERCUBEARRAYSHADOW */ + IMAGE1D = 466, /* IMAGE1D */ + IIMAGE1D = 467, /* IIMAGE1D */ + UIMAGE1D = 468, /* UIMAGE1D */ + IMAGE2D = 469, /* IMAGE2D */ + IIMAGE2D = 470, /* IIMAGE2D */ + UIMAGE2D = 471, /* UIMAGE2D */ + IMAGE3D = 472, /* IMAGE3D */ + IIMAGE3D = 473, /* IIMAGE3D */ + UIMAGE3D = 474, /* UIMAGE3D */ + IMAGE2DRECT = 475, /* IMAGE2DRECT */ + IIMAGE2DRECT = 476, /* IIMAGE2DRECT */ + UIMAGE2DRECT = 477, /* UIMAGE2DRECT */ + IMAGECUBE = 478, /* IMAGECUBE */ + IIMAGECUBE = 479, /* IIMAGECUBE */ + UIMAGECUBE = 480, /* UIMAGECUBE */ + IMAGEBUFFER = 481, /* IMAGEBUFFER */ + IIMAGEBUFFER = 482, /* IIMAGEBUFFER */ + UIMAGEBUFFER = 483, /* UIMAGEBUFFER */ + IMAGE1DARRAY = 484, /* IMAGE1DARRAY */ + IIMAGE1DARRAY = 485, /* IIMAGE1DARRAY */ + UIMAGE1DARRAY = 486, /* UIMAGE1DARRAY */ + IMAGE2DARRAY = 487, /* IMAGE2DARRAY */ + IIMAGE2DARRAY = 488, /* IIMAGE2DARRAY */ + UIMAGE2DARRAY = 489, /* UIMAGE2DARRAY */ + IMAGECUBEARRAY = 490, /* IMAGECUBEARRAY */ + IIMAGECUBEARRAY = 491, /* IIMAGECUBEARRAY */ + UIMAGECUBEARRAY = 492, /* UIMAGECUBEARRAY */ + IMAGE2DMS = 493, /* IMAGE2DMS */ + IIMAGE2DMS = 494, /* IIMAGE2DMS */ + UIMAGE2DMS = 495, /* UIMAGE2DMS */ + IMAGE2DMSARRAY = 496, /* IMAGE2DMSARRAY */ + IIMAGE2DMSARRAY = 497, /* IIMAGE2DMSARRAY */ + UIMAGE2DMSARRAY = 498, /* UIMAGE2DMSARRAY */ + F16IMAGE1D = 499, /* F16IMAGE1D */ + F16IMAGE2D = 500, /* F16IMAGE2D */ + F16IMAGE3D = 501, /* F16IMAGE3D */ + F16IMAGE2DRECT = 502, /* F16IMAGE2DRECT */ + F16IMAGECUBE = 503, /* F16IMAGECUBE */ + F16IMAGE1DARRAY = 504, /* F16IMAGE1DARRAY */ + F16IMAGE2DARRAY = 505, /* F16IMAGE2DARRAY */ + F16IMAGECUBEARRAY = 506, /* F16IMAGECUBEARRAY */ + F16IMAGEBUFFER = 507, /* F16IMAGEBUFFER */ + F16IMAGE2DMS = 508, /* F16IMAGE2DMS */ + F16IMAGE2DMSARRAY = 509, /* F16IMAGE2DMSARRAY */ + I64IMAGE1D = 510, /* I64IMAGE1D */ + U64IMAGE1D = 511, /* U64IMAGE1D */ + I64IMAGE2D = 512, /* I64IMAGE2D */ + U64IMAGE2D = 513, /* U64IMAGE2D */ + I64IMAGE3D = 514, /* I64IMAGE3D */ + U64IMAGE3D = 515, /* U64IMAGE3D */ + I64IMAGE2DRECT = 516, /* I64IMAGE2DRECT */ + U64IMAGE2DRECT = 517, /* U64IMAGE2DRECT */ + I64IMAGECUBE = 518, /* I64IMAGECUBE */ + U64IMAGECUBE = 519, /* U64IMAGECUBE */ + I64IMAGEBUFFER = 520, /* I64IMAGEBUFFER */ + U64IMAGEBUFFER = 521, /* U64IMAGEBUFFER */ + I64IMAGE1DARRAY = 522, /* I64IMAGE1DARRAY */ + U64IMAGE1DARRAY = 523, /* U64IMAGE1DARRAY */ + I64IMAGE2DARRAY = 524, /* I64IMAGE2DARRAY */ + U64IMAGE2DARRAY = 525, /* U64IMAGE2DARRAY */ + I64IMAGECUBEARRAY = 526, /* I64IMAGECUBEARRAY */ + U64IMAGECUBEARRAY = 527, /* U64IMAGECUBEARRAY */ + I64IMAGE2DMS = 528, /* I64IMAGE2DMS */ + U64IMAGE2DMS = 529, /* U64IMAGE2DMS */ + I64IMAGE2DMSARRAY = 530, /* I64IMAGE2DMSARRAY */ + U64IMAGE2DMSARRAY = 531, /* U64IMAGE2DMSARRAY */ + TEXTURECUBEARRAY = 532, /* TEXTURECUBEARRAY */ + ITEXTURECUBEARRAY = 533, /* ITEXTURECUBEARRAY */ + UTEXTURECUBEARRAY = 534, /* UTEXTURECUBEARRAY */ + TEXTURE1D = 535, /* TEXTURE1D */ + ITEXTURE1D = 536, /* ITEXTURE1D */ + UTEXTURE1D = 537, /* UTEXTURE1D */ + TEXTURE1DARRAY = 538, /* TEXTURE1DARRAY */ + ITEXTURE1DARRAY = 539, /* ITEXTURE1DARRAY */ + UTEXTURE1DARRAY = 540, /* UTEXTURE1DARRAY */ + TEXTURE2DRECT = 541, /* TEXTURE2DRECT */ + ITEXTURE2DRECT = 542, /* ITEXTURE2DRECT */ + UTEXTURE2DRECT = 543, /* UTEXTURE2DRECT */ + TEXTUREBUFFER = 544, /* TEXTUREBUFFER */ + ITEXTUREBUFFER = 545, /* ITEXTUREBUFFER */ + UTEXTUREBUFFER = 546, /* UTEXTUREBUFFER */ + TEXTURE2DMS = 547, /* TEXTURE2DMS */ + ITEXTURE2DMS = 548, /* ITEXTURE2DMS */ + UTEXTURE2DMS = 549, /* UTEXTURE2DMS */ + TEXTURE2DMSARRAY = 550, /* TEXTURE2DMSARRAY */ + ITEXTURE2DMSARRAY = 551, /* ITEXTURE2DMSARRAY */ + UTEXTURE2DMSARRAY = 552, /* UTEXTURE2DMSARRAY */ + F16TEXTURE1D = 553, /* F16TEXTURE1D */ + F16TEXTURE2D = 554, /* F16TEXTURE2D */ + F16TEXTURE3D = 555, /* F16TEXTURE3D */ + F16TEXTURE2DRECT = 556, /* F16TEXTURE2DRECT */ + F16TEXTURECUBE = 557, /* F16TEXTURECUBE */ + F16TEXTURE1DARRAY = 558, /* F16TEXTURE1DARRAY */ + F16TEXTURE2DARRAY = 559, /* F16TEXTURE2DARRAY */ + F16TEXTURECUBEARRAY = 560, /* F16TEXTURECUBEARRAY */ + F16TEXTUREBUFFER = 561, /* F16TEXTUREBUFFER */ + F16TEXTURE2DMS = 562, /* F16TEXTURE2DMS */ + F16TEXTURE2DMSARRAY = 563, /* F16TEXTURE2DMSARRAY */ + SUBPASSINPUT = 564, /* SUBPASSINPUT */ + SUBPASSINPUTMS = 565, /* SUBPASSINPUTMS */ + ISUBPASSINPUT = 566, /* ISUBPASSINPUT */ + ISUBPASSINPUTMS = 567, /* ISUBPASSINPUTMS */ + USUBPASSINPUT = 568, /* USUBPASSINPUT */ + USUBPASSINPUTMS = 569, /* USUBPASSINPUTMS */ + F16SUBPASSINPUT = 570, /* F16SUBPASSINPUT */ + F16SUBPASSINPUTMS = 571, /* F16SUBPASSINPUTMS */ + LEFT_OP = 572, /* LEFT_OP */ + RIGHT_OP = 573, /* RIGHT_OP */ + INC_OP = 574, /* INC_OP */ + DEC_OP = 575, /* DEC_OP */ + LE_OP = 576, /* LE_OP */ + GE_OP = 577, /* GE_OP */ + EQ_OP = 578, /* EQ_OP */ + NE_OP = 579, /* NE_OP */ + AND_OP = 580, /* AND_OP */ + OR_OP = 581, /* OR_OP */ + XOR_OP = 582, /* XOR_OP */ + MUL_ASSIGN = 583, /* MUL_ASSIGN */ + DIV_ASSIGN = 584, /* DIV_ASSIGN */ + ADD_ASSIGN = 585, /* ADD_ASSIGN */ + MOD_ASSIGN = 586, /* MOD_ASSIGN */ + LEFT_ASSIGN = 587, /* LEFT_ASSIGN */ + RIGHT_ASSIGN = 588, /* RIGHT_ASSIGN */ + AND_ASSIGN = 589, /* AND_ASSIGN */ + XOR_ASSIGN = 590, /* XOR_ASSIGN */ + OR_ASSIGN = 591, /* OR_ASSIGN */ + SUB_ASSIGN = 592, /* SUB_ASSIGN */ + STRING_LITERAL = 593, /* STRING_LITERAL */ + LEFT_PAREN = 594, /* LEFT_PAREN */ + RIGHT_PAREN = 595, /* RIGHT_PAREN */ + LEFT_BRACKET = 596, /* LEFT_BRACKET */ + RIGHT_BRACKET = 597, /* RIGHT_BRACKET */ + LEFT_BRACE = 598, /* LEFT_BRACE */ + RIGHT_BRACE = 599, /* RIGHT_BRACE */ + DOT = 600, /* DOT */ + COMMA = 601, /* COMMA */ + COLON = 602, /* COLON */ + EQUAL = 603, /* EQUAL */ + SEMICOLON = 604, /* SEMICOLON */ + BANG = 605, /* BANG */ + DASH = 606, /* DASH */ + TILDE = 607, /* TILDE */ + PLUS = 608, /* PLUS */ + STAR = 609, /* STAR */ + SLASH = 610, /* SLASH */ + PERCENT = 611, /* PERCENT */ + LEFT_ANGLE = 612, /* LEFT_ANGLE */ + RIGHT_ANGLE = 613, /* RIGHT_ANGLE */ + VERTICAL_BAR = 614, /* VERTICAL_BAR */ + CARET = 615, /* CARET */ + AMPERSAND = 616, /* AMPERSAND */ + QUESTION = 617, /* QUESTION */ + INVARIANT = 618, /* INVARIANT */ + HIGH_PRECISION = 619, /* HIGH_PRECISION */ + MEDIUM_PRECISION = 620, /* MEDIUM_PRECISION */ + LOW_PRECISION = 621, /* LOW_PRECISION */ + PRECISION = 622, /* PRECISION */ + PACKED = 623, /* PACKED */ + RESOURCE = 624, /* RESOURCE */ + SUPERP = 625, /* SUPERP */ + FLOATCONSTANT = 626, /* FLOATCONSTANT */ + INTCONSTANT = 627, /* INTCONSTANT */ + UINTCONSTANT = 628, /* UINTCONSTANT */ + BOOLCONSTANT = 629, /* BOOLCONSTANT */ + IDENTIFIER = 630, /* IDENTIFIER */ + TYPE_NAME = 631, /* TYPE_NAME */ + CENTROID = 632, /* CENTROID */ + IN = 633, /* IN */ + OUT = 634, /* OUT */ + INOUT = 635, /* INOUT */ + STRUCT = 636, /* STRUCT */ + VOID = 637, /* VOID */ + WHILE = 638, /* WHILE */ + BREAK = 639, /* BREAK */ + CONTINUE = 640, /* CONTINUE */ + DO = 641, /* DO */ + ELSE = 642, /* ELSE */ + FOR = 643, /* FOR */ + IF = 644, /* IF */ + DISCARD = 645, /* DISCARD */ + RETURN = 646, /* RETURN */ + SWITCH = 647, /* SWITCH */ + CASE = 648, /* CASE */ + DEFAULT = 649, /* DEFAULT */ + TERMINATE_INVOCATION = 650, /* TERMINATE_INVOCATION */ + TERMINATE_RAY = 651, /* TERMINATE_RAY */ + IGNORE_INTERSECTION = 652, /* IGNORE_INTERSECTION */ + UNIFORM = 653, /* UNIFORM */ + SHARED = 654, /* SHARED */ + BUFFER = 655, /* BUFFER */ + FLAT = 656, /* FLAT */ + SMOOTH = 657, /* SMOOTH */ + LAYOUT = 658, /* LAYOUT */ + DOUBLECONSTANT = 659, /* DOUBLECONSTANT */ + INT16CONSTANT = 660, /* INT16CONSTANT */ + UINT16CONSTANT = 661, /* UINT16CONSTANT */ + FLOAT16CONSTANT = 662, /* FLOAT16CONSTANT */ + INT32CONSTANT = 663, /* INT32CONSTANT */ + UINT32CONSTANT = 664, /* UINT32CONSTANT */ + INT64CONSTANT = 665, /* INT64CONSTANT */ + UINT64CONSTANT = 666, /* UINT64CONSTANT */ + SUBROUTINE = 667, /* SUBROUTINE */ + DEMOTE = 668, /* DEMOTE */ + PAYLOADNV = 669, /* PAYLOADNV */ + PAYLOADINNV = 670, /* PAYLOADINNV */ + HITATTRNV = 671, /* HITATTRNV */ + CALLDATANV = 672, /* CALLDATANV */ + CALLDATAINNV = 673, /* CALLDATAINNV */ + PAYLOADEXT = 674, /* PAYLOADEXT */ + PAYLOADINEXT = 675, /* PAYLOADINEXT */ + HITATTREXT = 676, /* HITATTREXT */ + CALLDATAEXT = 677, /* CALLDATAEXT */ + CALLDATAINEXT = 678, /* CALLDATAINEXT */ + PATCH = 679, /* PATCH */ + SAMPLE = 680, /* SAMPLE */ + NONUNIFORM = 681, /* NONUNIFORM */ + COHERENT = 682, /* COHERENT */ + VOLATILE = 683, /* VOLATILE */ + RESTRICT = 684, /* RESTRICT */ + READONLY = 685, /* READONLY */ + WRITEONLY = 686, /* WRITEONLY */ + DEVICECOHERENT = 687, /* DEVICECOHERENT */ + QUEUEFAMILYCOHERENT = 688, /* QUEUEFAMILYCOHERENT */ + WORKGROUPCOHERENT = 689, /* WORKGROUPCOHERENT */ + SUBGROUPCOHERENT = 690, /* SUBGROUPCOHERENT */ + NONPRIVATE = 691, /* NONPRIVATE */ + SHADERCALLCOHERENT = 692, /* SHADERCALLCOHERENT */ + NOPERSPECTIVE = 693, /* NOPERSPECTIVE */ + EXPLICITINTERPAMD = 694, /* EXPLICITINTERPAMD */ + PERVERTEXNV = 695, /* PERVERTEXNV */ + PERPRIMITIVENV = 696, /* PERPRIMITIVENV */ + PERVIEWNV = 697, /* PERVIEWNV */ + PERTASKNV = 698, /* PERTASKNV */ + PRECISE = 699 /* PRECISE */ }; + typedef enum yytokentype yytoken_kind_t; #endif /* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED - union YYSTYPE { -#line 97 "MachineIndependent/glslang.y" /* yacc.c:1909 */ +#line 97 "MachineIndependent/glslang.y" struct { glslang::TSourceLoc loc; @@ -506,9 +540,9 @@ union YYSTYPE glslang::TArraySizes* typeParameters; } interm; -#line 510 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */ -}; +#line 544 "MachineIndependent/glslang_tab.cpp.h" +}; typedef union YYSTYPE YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define YYSTYPE_IS_DECLARED 1 diff --git a/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp b/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp index f23a7058ab..5ce3e47280 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp @@ -438,6 +438,9 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break; case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break; + case EOpConvUint64ToAccStruct: out.debug << "Convert uint64_t to acceleration structure"; break; + case EOpConvUvec2ToAccStruct: out.debug << "Convert uvec2 to acceleration strucuture "; break; + case EOpRadians: out.debug << "radians"; break; case EOpDegrees: out.debug << "degrees"; break; case EOpSin: out.debug << "sine"; break; @@ -829,6 +832,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break; case EOpConstructReference: out.debug << "Construct reference"; break; case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break; + case EOpConstructAccStruct: out.debug << "Construct acceleration structure"; break; case EOpLessThan: out.debug << "Compare Less Than"; break; case EOpGreaterThan: out.debug << "Compare Greater Than"; break; @@ -1079,11 +1083,15 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node case EOpSubpassLoad: out.debug << "subpassLoad"; break; case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break; - case EOpTrace: out.debug << "traceNV"; break; + case EOpTraceNV: out.debug << "traceNV"; break; + case EOpTraceKHR: out.debug << "traceRayKHR"; break; case EOpReportIntersection: out.debug << "reportIntersectionNV"; break; - case EOpIgnoreIntersection: out.debug << "ignoreIntersectionNV"; break; - case EOpTerminateRay: out.debug << "terminateRayNV"; break; - case EOpExecuteCallable: out.debug << "executeCallableNV"; break; + case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break; + case EOpIgnoreIntersectionKHR: out.debug << "ignoreIntersectionKHR"; break; + case EOpTerminateRayNV: out.debug << "terminateRayNV"; break; + case EOpTerminateRayKHR: out.debug << "terminateRayKHR"; break; + case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break; + case EOpExecuteCallableKHR: out.debug << "executeCallableKHR"; break; case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break; case EOpRayQueryInitialize: out.debug << "rayQueryInitializeEXT"; break; @@ -1321,6 +1329,9 @@ static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const out.debug << buf << "\n"; } break; + case EbtString: + out.debug << "\"" << constUnion[i].getSConst()->c_str() << "\"\n"; + break; default: out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc()); break; @@ -1406,14 +1417,17 @@ bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node) OutputTreeText(out, node, depth); switch (node->getFlowOp()) { - case EOpKill: out.debug << "Branch: Kill"; break; - case EOpBreak: out.debug << "Branch: Break"; break; - case EOpContinue: out.debug << "Branch: Continue"; break; - case EOpReturn: out.debug << "Branch: Return"; break; - case EOpCase: out.debug << "case: "; break; - case EOpDemote: out.debug << "Demote"; break; - case EOpDefault: out.debug << "default: "; break; - default: out.debug << "Branch: Unknown Branch"; break; + case EOpKill: out.debug << "Branch: Kill"; break; + case EOpTerminateInvocation: out.debug << "Branch: TerminateInvocation"; break; + case EOpIgnoreIntersectionKHR: out.debug << "Branch: IgnoreIntersectionKHR"; break; + case EOpTerminateRayKHR: out.debug << "Branch: TerminateRayKHR"; break; + case EOpBreak: out.debug << "Branch: Break"; break; + case EOpContinue: out.debug << "Branch: Continue"; break; + case EOpReturn: out.debug << "Branch: Return"; break; + case EOpCase: out.debug << "case: "; break; + case EOpDemote: out.debug << "Demote"; break; + case EOpDefault: out.debug << "default: "; break; + default: out.debug << "Branch: Unknown Branch"; break; } if (node->getExpression()) { diff --git a/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp b/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp index 905cf65d6b..c42e74fa5f 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp @@ -37,9 +37,11 @@ #include "../Include/Common.h" #include "../Include/InfoSink.h" +#include "../Include/Types.h" #include "gl_types.h" #include "iomapper.h" +#include "SymbolTable.h" // // Map IO bindings. @@ -82,17 +84,17 @@ public: // If a global is being visited, then we should also traverse it incase it's evaluation // ends up visiting inputs we want to tag as live else if (base->getQualifier().storage == EvqGlobal) - addGlobalReference(base->getName()); + addGlobalReference(base->getAccessName()); if (target) { TVarEntryInfo ent = {base->getId(), base, ! traverseAll}; ent.stage = intermediate.getStage(); TVarLiveMap::iterator at = target->find( - ent.symbol->getName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById()); + ent.symbol->getAccessName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById()); if (at != target->end() && at->second.id == ent.id) at->second.live = at->second.live || ! traverseAll; // update live state else - (*target)[ent.symbol->getName()] = ent; + (*target)[ent.symbol->getAccessName()] = ent; } } @@ -125,7 +127,8 @@ public: return; TVarEntryInfo ent = { base->getId() }; - TVarLiveMap::const_iterator at = source->find(base->getName()); + // Fix a defect, when block has no instance name, we need to find its block name + TVarLiveMap::const_iterator at = source->find(base->getAccessName()); if (at == source->end()) return; @@ -181,7 +184,7 @@ struct TNotifyInOutAdaptor inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey) { - resolver.notifyInOut(stage, entKey.second); + resolver.notifyInOut(entKey.second.stage, entKey.second); } private: @@ -189,12 +192,13 @@ private: }; struct TResolverUniformAdaptor { - TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e) + TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TVarLiveMap* uniform[EShLangCount], TInfoSink& i, bool& e) : stage(s) , resolver(r) , infoSink(i) , error(e) { + memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*))); } inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey) { @@ -206,9 +210,9 @@ struct TResolverUniformAdaptor { ent.newIndex = -1; const bool isValid = resolver.validateBinding(stage, ent); if (isValid) { - resolver.resolveBinding(stage, ent); - resolver.resolveSet(stage, ent); - resolver.resolveUniformLocation(stage, ent); + resolver.resolveBinding(ent.stage, ent); + resolver.resolveSet(ent.stage, ent); + resolver.resolveUniformLocation(ent.stage, ent); if (ent.newBinding != -1) { if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) { @@ -217,6 +221,17 @@ struct TResolverUniformAdaptor { infoSink.info.message(EPrefixInternalError, err.c_str()); error = true; } + + if (ent.symbol->getQualifier().hasBinding()) { + for (uint32_t idx = EShLangVertex; idx < EShLangCount; ++idx) { + if (idx == ent.stage || uniformVarMap[idx] == nullptr) + continue; + auto entKey2 = uniformVarMap[idx]->find(entKey.first); + if (entKey2 != uniformVarMap[idx]->end()) { + entKey2->second.newBinding = ent.newBinding; + } + } + } } if (ent.newSet != -1) { if (ent.newSet >= int(TQualifier::layoutSetEnd)) { @@ -225,6 +240,16 @@ struct TResolverUniformAdaptor { infoSink.info.message(EPrefixInternalError, err.c_str()); error = true; } + if (ent.symbol->getQualifier().hasSet()) { + for (uint32_t idx = EShLangVertex; idx < EShLangCount; ++idx) { + if ((idx == stage) || (uniformVarMap[idx] == nullptr)) + continue; + auto entKey2 = uniformVarMap[idx]->find(entKey.first); + if (entKey2 != uniformVarMap[idx]->end()) { + entKey2->second.newSet = ent.newSet; + } + } + } } } else { TString errorMsg = "Invalid binding: " + entKey.first; @@ -239,7 +264,7 @@ struct TResolverUniformAdaptor { TIoMapResolver& resolver; TInfoSink& infoSink; bool& error; - + TVarLiveMap* uniformVarMap[EShLangCount]; private: TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&) = delete; }; @@ -261,7 +286,7 @@ struct TResolverInOutAdaptor { ent.newBinding = -1; ent.newSet = -1; ent.newIndex = -1; - const bool isValid = resolver.validateInOut(stage, ent); + const bool isValid = resolver.validateInOut(ent.stage, ent); if (isValid) { resolver.resolveInOutLocation(stage, ent); resolver.resolveInOutComponent(stage, ent); @@ -296,17 +321,116 @@ private: struct TSymbolValidater { TSymbolValidater(TIoMapResolver& r, TInfoSink& i, TVarLiveMap* in[EShLangCount], TVarLiveMap* out[EShLangCount], - TVarLiveMap* uniform[EShLangCount], bool& hadError) + TVarLiveMap* uniform[EShLangCount], bool& hadError, EProfile profile, int version) : preStage(EShLangCount) , currentStage(EShLangCount) , nextStage(EShLangCount) , resolver(r) , infoSink(i) , hadError(hadError) + , profile(profile) + , version(version) { memcpy(inVarMaps, in, EShLangCount * (sizeof(TVarLiveMap*))); memcpy(outVarMaps, out, EShLangCount * (sizeof(TVarLiveMap*))); memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*))); + + std::map<TString, TString> anonymousMemberMap; + std::vector<TRange> usedUniformLocation; + std::vector<TString> usedUniformName; + usedUniformLocation.clear(); + usedUniformName.clear(); + for (int i = 0; i < EShLangCount; i++) { + if (uniformVarMap[i]) { + for (auto uniformVar : *uniformVarMap[i]) + { + TIntermSymbol* pSymbol = uniformVar.second.symbol; + TQualifier qualifier = uniformVar.second.symbol->getQualifier(); + TString symbolName = pSymbol->getAccessName(); + + // All the uniform needs multi-stage location check (block/default) + int uniformLocation = qualifier.layoutLocation; + + if (uniformLocation != TQualifier::layoutLocationEnd) { + // Total size of current uniform, could be block, struct or other types. + int size = TIntermediate::computeTypeUniformLocationSize(pSymbol->getType()); + + TRange locationRange(uniformLocation, uniformLocation + size - 1); + + // Combine location and component ranges + int overlapLocation = -1; + bool diffLocation = false; + + // Check for collisions, except for vertex inputs on desktop targeting OpenGL + overlapLocation = checkLocationOverlap(locationRange, usedUniformLocation, symbolName, usedUniformName, diffLocation); + + // Overlap locations of uniforms, regardless of components (multi stages) + if (overlapLocation == -1) { + usedUniformLocation.push_back(locationRange); + usedUniformName.push_back(symbolName); + } + else if (overlapLocation >= 0) { + if (diffLocation == true) { + TString err = ("Uniform location should be equal for same uniforms: " +std::to_string(overlapLocation)).c_str(); + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + break; + } + else { + TString err = ("Uniform location overlaps across stages: " + std::to_string(overlapLocation)).c_str(); + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + break; + } + } + } + + if ((uniformVar.second.symbol->getBasicType() == EbtBlock) && + IsAnonymous(uniformVar.second.symbol->getName())) + { + auto blockType = uniformVar.second.symbol->getType().getStruct(); + for (size_t memberIdx = 0; memberIdx < blockType->size(); ++memberIdx) { + auto memberName = (*blockType)[memberIdx].type->getFieldName(); + if (anonymousMemberMap.find(memberName) != anonymousMemberMap.end()) + { + if (anonymousMemberMap[memberName] != uniformVar.second.symbol->getType().getTypeName()) + { + TString err = "Invalid block member name: " + memberName; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + break; + } + } + else + { + anonymousMemberMap[memberName] = uniformVar.second.symbol->getType().getTypeName(); + } + } + } + if (hadError) + break; + } + } + } + } + + // In case we need to new an intermediate, which costs too much + int checkLocationOverlap(const TRange& locationRange, std::vector<TRange>& usedUniformLocation, const TString symbolName, std::vector<TString>& usedUniformName, bool& diffLocation) + { + for (size_t r = 0; r < usedUniformLocation.size(); ++r) { + if (usedUniformName[r] == symbolName) { + diffLocation = true; + return (usedUniformLocation[r].start == locationRange.start && + usedUniformLocation[r].last == locationRange.last) + ? -2 : std::max(locationRange.start, usedUniformLocation[r].start); + } + if (locationRange.overlap(usedUniformLocation[r])) { + // there is a collision; pick one + return std::max(locationRange.start, usedUniformLocation[r].start); + } + } + + return -1; // no collision } inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey) { @@ -339,11 +463,24 @@ struct TSymbolValidater // validate stage in; if (preStage == EShLangCount) return; - if (name == "gl_PerVertex") + if (TSymbolTable::isBuiltInSymbol(base->getId())) return; if (outVarMaps[preStage] != nullptr) { auto ent2 = outVarMaps[preStage]->find(name); + uint32_t location = base->getType().getQualifier().layoutLocation; + if (ent2 == outVarMaps[preStage]->end() && + location != glslang::TQualifier::layoutLocationEnd) { + for (auto var = outVarMaps[preStage]->begin(); var != ent2; var++) { + if (var->second.symbol->getType().getQualifier().layoutLocation == location) { + ent2 = var; + break; + } + } + } if (ent2 != outVarMaps[preStage]->end()) { + auto& type1 = base->getType(); + auto& type2 = ent2->second.symbol->getType(); + hadError = hadError || typeCheck(&type1, &type2, name.c_str(), false); if (ent2->second.symbol->getType().getQualifier().isArrayedIo(preStage)) { TType subType(ent2->second.symbol->getType(), 0); subType.appendMangledName(mangleName2); @@ -351,23 +488,49 @@ struct TSymbolValidater else { ent2->second.symbol->getType().appendMangledName(mangleName2); } - if (mangleName1 == mangleName2) + + if (mangleName1 == mangleName2) { + // For ES 3.0 only, other versions have no such restrictions + // According to ES 3.0 spec: The type and presence of the interpolation qualifiers and + // storage qualifiers of variables with the same name declared in all linked shaders must + // match, otherwise the link command will fail. + if (profile == EEsProfile && version == 300) { + // Don't need to check smooth qualifier, as it uses the default interpolation mode + if (ent1.stage == EShLangFragment && type1.isBuiltIn() == false) { + if (type1.getQualifier().flat != type2.getQualifier().flat || + type1.getQualifier().nopersp != type2.getQualifier().nopersp) { + TString err = "Interpolation qualifier mismatch : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + } + } return; + } else { TString err = "Invalid In/Out variable type : " + entKey.first; infoSink.info.message(EPrefixInternalError, err.c_str()); hadError = true; } } + else if (!base->getType().isBuiltIn()) { + // According to spec: A link error is generated if any statically referenced input variable + // or block does not have a matching output + if (profile == EEsProfile && ent1.live) { + hadError = true; + TString errorStr = name + ": not been declare as a output variable in pre shader stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + } return; } } else if (base->getQualifier().storage == EvqVaryingOut) { // validate stage out; if (nextStage == EShLangCount) return; - if (name == "gl_PerVertex") + if (TSymbolTable::isBuiltInSymbol(base->getId())) return; - if (outVarMaps[nextStage] != nullptr) { + if (inVarMaps[nextStage] != nullptr) { auto ent2 = inVarMaps[nextStage]->find(name); if (ent2 != inVarMaps[nextStage]->end()) { if (ent2->second.symbol->getType().getQualifier().isArrayedIo(nextStage)) { @@ -400,11 +563,50 @@ struct TSymbolValidater hadError = true; } mangleName2.clear(); + + // validate instance name of blocks + if (hadError == false && + base->getType().getBasicType() == EbtBlock && + IsAnonymous(base->getName()) != IsAnonymous(ent2->second.symbol->getName())) { + TString err = "Matched uniform block names must also either all be lacking " + "an instance name or all having an instance name: " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + + // validate uniform block member qualifier and member names + auto& type1 = base->getType(); + auto& type2 = ent2->second.symbol->getType(); + if (hadError == false && base->getType().getBasicType() == EbtBlock) { + hadError = hadError || typeCheck(&type1, &type2, name.c_str(), true); + } + else { + hadError = hadError || typeCheck(&type1, &type2, name.c_str(), false); + } + } + else if (base->getBasicType() == EbtBlock) + { + if (IsAnonymous(base->getName())) + { + // The name of anonymous block member can't same with default uniform variable. + auto blockType1 = base->getType().getStruct(); + for (size_t memberIdx = 0; memberIdx < blockType1->size(); ++memberIdx) { + auto memberName = (*blockType1)[memberIdx].type->getFieldName(); + if (uniformVarMap[i]->find(memberName) != uniformVarMap[i]->end()) + { + TString err = "Invalid Uniform variable name : " + memberName; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + break; + } + } + } } } } } } + TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], *uniformVarMap[EShLangCount]; // Use for mark pre stage, to get more interface symbol information. EShLanguage preStage, currentStage, nextStage; @@ -412,9 +614,118 @@ struct TSymbolValidater TIoMapResolver& resolver; TInfoSink& infoSink; bool& hadError; + EProfile profile; + int version; private: TSymbolValidater& operator=(TSymbolValidater&) = delete; + + bool qualifierCheck(const TType* const type1, const TType* const type2, const std::string& name, bool isBlock) + { + bool hasError = false; + const TQualifier& qualifier1 = type1->getQualifier(); + const TQualifier& qualifier2 = type2->getQualifier(); + + if (((isBlock == false) && + (type1->getQualifier().storage == EvqUniform && type2->getQualifier().storage == EvqUniform)) || + (type1->getQualifier().storage == EvqGlobal && type2->getQualifier().storage == EvqGlobal)) { + if (qualifier1.precision != qualifier2.precision) { + hasError = true; + std::string errorStr = name + ": have precision conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + if (qualifier1.hasFormat() && qualifier2.hasFormat()) { + if (qualifier1.layoutFormat != qualifier2.layoutFormat) { + hasError = true; + std::string errorStr = name + ": have layout format conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + + } + } + + if (isBlock == true) { + if (qualifier1.layoutPacking != qualifier2.layoutPacking) { + hasError = true; + std::string errorStr = name + ": have layoutPacking conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + if (qualifier1.layoutMatrix != qualifier2.layoutMatrix) { + hasError = true; + std::string errorStr = name + ": have layoutMatrix conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + if (qualifier1.layoutOffset != qualifier2.layoutOffset) { + hasError = true; + std::string errorStr = name + ": have layoutOffset conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + if (qualifier1.layoutAlign != qualifier2.layoutAlign) { + hasError = true; + std::string errorStr = name + ": have layoutAlign conflict cross stage."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + } + } + + return hasError; + } + + bool typeCheck(const TType* const type1, const TType* const type2, const std::string& name, bool isBlock) + { + bool hasError = false; + if (!(type1->isStruct() && type2->isStruct())) { + hasError = hasError || qualifierCheck(type1, type2, name, isBlock); + } + else { + if (type1->getBasicType() == EbtBlock && type2->getBasicType() == EbtBlock) + isBlock = true; + const TTypeList* typeList1 = type1->getStruct(); + const TTypeList* typeList2 = type2->getStruct(); + + std::string newName = name; + size_t memberCount = typeList1->size(); + size_t index2 = 0; + for (size_t index = 0; index < memberCount; index++, index2++) { + // Skip inactive member + if (typeList1->at(index).type->getBasicType() == EbtVoid) + continue; + while (index2 < typeList2->size() && typeList2->at(index2).type->getBasicType() == EbtVoid) { + ++index2; + } + + // TypeList1 has more members in list + if (index2 == typeList2->size()) { + std::string errorStr = name + ": struct mismatch."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + hasError = true; + break; + } + + if (typeList1->at(index).type->getFieldName() != typeList2->at(index2).type->getFieldName()) { + std::string errorStr = name + ": member name mismatch."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + hasError = true; + } + else { + newName = typeList1->at(index).type->getFieldName().c_str(); + } + hasError = hasError || typeCheck(typeList1->at(index).type, typeList2->at(index2).type, newName, isBlock); + } + + while (index2 < typeList2->size()) + { + // TypeList2 has more members + if (typeList2->at(index2).type->getBasicType() != EbtVoid) { + std::string errorStr = name + ": struct mismatch."; + infoSink.info.message(EPrefixError, errorStr.c_str()); + hasError = true; + break; + } + ++index2; + } + } + return hasError; + } }; struct TSlotCollector { @@ -500,7 +811,7 @@ int TDefaultIoResolverBase::resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent int TDefaultIoResolverBase::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { const TType& type = ent.symbol->getType(); - const char* name = ent.symbol->getName().c_str(); + const char* name = ent.symbol->getAccessName().c_str(); // kick out of not doing this if (! doAutoLocationMapping()) { return ent.newLocation = -1; @@ -609,7 +920,7 @@ TDefaultGlslIoResolver::TDefaultGlslIoResolver(const TIntermediate& intermediate int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) { const TType& type = ent.symbol->getType(); - const TString& name = getAccessName(ent.symbol); + const TString& name = ent.symbol->getAccessName(); if (currentStage != stage) { preStage = currentStage; currentStage = stage; @@ -693,7 +1004,7 @@ int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInf int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { const TType& type = ent.symbol->getType(); - const TString& name = getAccessName(ent.symbol); + const TString& name = ent.symbol->getAccessName(); // kick out of not doing this if (! doAutoLocationMapping()) { return ent.newLocation = -1; @@ -764,7 +1075,7 @@ int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEn int TDefaultGlslIoResolver::resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) { const TType& type = ent.symbol->getType(); - const TString& name = getAccessName(ent.symbol); + const TString& name = ent.symbol->getAccessName(); // On OpenGL arrays of opaque types take a separate binding for each element int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1; TResourceType resource = getResourceType(type); @@ -839,7 +1150,7 @@ void TDefaultGlslIoResolver::endCollect(EShLanguage /*stage*/) { void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { const TType& type = ent.symbol->getType(); - const TString& name = getAccessName(ent.symbol); + const TString& name = ent.symbol->getAccessName(); TStorageQualifier storage = type.getQualifier().storage; EShLanguage stage(EShLangCount); switch (storage) { @@ -899,7 +1210,7 @@ void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { const TType& type = ent.symbol->getType(); - const TString& name = getAccessName(ent.symbol); + const TString& name = ent.symbol->getAccessName(); int resource = getResourceType(type); if (type.getQualifier().hasBinding()) { TVarSlotMap& varSlotMap = resourceSlotMap[resource]; @@ -922,13 +1233,6 @@ void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& } } -const TString& TDefaultGlslIoResolver::getAccessName(const TIntermSymbol* symbol) -{ - return symbol->getBasicType() == EbtBlock ? - symbol->getType().getTypeName() : - symbol->getName(); -} - //TDefaultGlslIoResolver end /* @@ -1117,25 +1421,23 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi } // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. - std::for_each(inVarMap.begin(), inVarMap.end(), - [&inVector](TVarLivePair p) { inVector.push_back(p); }); + for (auto& var : inVarMap) { inVector.push_back(var); } std::sort(inVector.begin(), inVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); }); - std::for_each(outVarMap.begin(), outVarMap.end(), - [&outVector](TVarLivePair p) { outVector.push_back(p); }); + for (auto& var : outVarMap) { outVector.push_back(var); } std::sort(outVector.begin(), outVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); }); - std::for_each(uniformVarMap.begin(), uniformVarMap.end(), - [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + for (auto& var : uniformVarMap) { uniformVector.push_back(var); } std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); }); bool hadError = false; + TVarLiveMap* dummyUniformVarMap[EShLangCount] = {}; TNotifyInOutAdaptor inOutNotify(stage, *resolver); TNotifyUniformAdaptor uniformNotify(stage, *resolver); - TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError); + TResolverUniformAdaptor uniformResolve(stage, *resolver, dummyUniformVarMap, infoSink, hadError); TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError); resolver->beginNotifications(stage); std::for_each(inVector.begin(), inVector.end(), inOutNotify); @@ -1143,22 +1445,22 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi std::for_each(uniformVector.begin(), uniformVector.end(), uniformNotify); resolver->endNotifications(stage); resolver->beginResolve(stage); - std::for_each(inVector.begin(), inVector.end(), inOutResolve); + for (auto& var : inVector) { inOutResolve(var); } std::for_each(inVector.begin(), inVector.end(), [&inVarMap](TVarLivePair p) { - auto at = inVarMap.find(p.second.symbol->getName()); - if (at != inVarMap.end()) + auto at = inVarMap.find(p.second.symbol->getAccessName()); + if (at != inVarMap.end() && p.second.id == at->second.id) at->second = p.second; }); - std::for_each(outVector.begin(), outVector.end(), inOutResolve); + for (auto& var : outVector) { inOutResolve(var); } std::for_each(outVector.begin(), outVector.end(), [&outVarMap](TVarLivePair p) { - auto at = outVarMap.find(p.second.symbol->getName()); - if (at != outVarMap.end()) + auto at = outVarMap.find(p.second.symbol->getAccessName()); + if (at != outVarMap.end() && p.second.id == at->second.id) at->second = p.second; }); std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); std::for_each(uniformVector.begin(), uniformVector.end(), [&uniformVarMap](TVarLivePair p) { - auto at = uniformVarMap.find(p.second.symbol->getName()); - if (at != uniformVarMap.end()) + auto at = uniformVarMap.find(p.second.symbol->getAccessName()); + if (at != uniformVarMap.end() && p.second.id == at->second.id) at->second = p.second; }); resolver->endResolve(stage); @@ -1174,9 +1476,14 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi // // Returns false if the input is too malformed to do this. bool TGlslIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) { + bool somethingToDo = !intermediate.getResourceSetBinding().empty() || + intermediate.getAutoMapBindings() || + intermediate.getAutoMapLocations(); + + // Profile and version are use for symbol validate. + profile = intermediate.getProfile(); + version = intermediate.getVersion(); - bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() || - intermediate.getAutoMapLocations(); // Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce // unnecessary or insignificant for-loop operation after 'somethingToDo' have been true. for (int res = 0; (res < EResCount && !somethingToDo); ++res) { @@ -1236,31 +1543,30 @@ bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) { resolver->endResolve(EShLangCount); if (!hadError) { //Resolve uniform location, ubo/ssbo/opaque bindings across stages - TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, infoSink, hadError); + TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, uniformVarMap, infoSink, hadError); TResolverInOutAdaptor inOutResolve(EShLangCount, *resolver, infoSink, hadError); - TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps, outVarMaps, uniformVarMap, hadError); + TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps, + outVarMaps, uniformVarMap, hadError, profile, version); TVarLiveVector uniformVector; resolver->beginResolve(EShLangCount); for (int stage = EShLangVertex; stage < EShLangCount; stage++) { if (inVarMaps[stage] != nullptr) { inOutResolve.setStage(EShLanguage(stage)); - std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), symbolValidater); - std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutResolve); - std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), symbolValidater); - std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutResolve); + for (auto& var : *(inVarMaps[stage])) { symbolValidater(var); } + for (auto& var : *(inVarMaps[stage])) { inOutResolve(var); } + for (auto& var : *(outVarMaps[stage])) { symbolValidater(var); } + for (auto& var : *(outVarMaps[stage])) { inOutResolve(var); } } if (uniformVarMap[stage] != nullptr) { uniformResolve.setStage(EShLanguage(stage)); - // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. - std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), - [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + for (auto& var : *(uniformVarMap[stage])) { uniformVector.push_back(var); } } } std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); }); - std::for_each(uniformVector.begin(), uniformVector.end(), symbolValidater); - std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); + for (auto& var : uniformVector) { symbolValidater(var); } + for (auto& var : uniformVector) { uniformResolve(var); } std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); }); @@ -1269,14 +1575,18 @@ bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) { if (intermediates[stage] != nullptr) { // traverse each stage, set new location to each input/output and unifom symbol, set new binding to // ubo, ssbo and opaque symbols - TVarLiveMap** pUniformVarMap = uniformVarMap; + TVarLiveMap** pUniformVarMap = uniformResolve.uniformVarMap; std::for_each(uniformVector.begin(), uniformVector.end(), [pUniformVarMap, stage](TVarLivePair p) { - auto at = pUniformVarMap[stage]->find(p.second.symbol->getName()); - if (at != pUniformVarMap[stage]->end()) + auto at = pUniformVarMap[stage]->find(p.second.symbol->getAccessName()); + if (at != pUniformVarMap[stage]->end() && at->second.id == p.second.id){ + int resolvedBinding = at->second.newBinding; at->second = p.second; + if (resolvedBinding > 0) + at->second.newBinding = resolvedBinding; + } }); TVarSetTraverser iter_iomap(*intermediates[stage], *inVarMaps[stage], *outVarMaps[stage], - *uniformVarMap[stage]); + *uniformResolve.uniformVarMap[stage]); intermediates[stage]->getTreeRoot()->traverse(&iter_iomap); } } diff --git a/thirdparty/glslang/glslang/MachineIndependent/iomapper.h b/thirdparty/glslang/glslang/MachineIndependent/iomapper.h index 674132786e..7934c4a9d1 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/iomapper.h +++ b/thirdparty/glslang/glslang/MachineIndependent/iomapper.h @@ -203,7 +203,6 @@ public: void endCollect(EShLanguage) override; void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; - const TString& getAccessName(const TIntermSymbol*); // in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol. // We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage. // if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key. @@ -263,10 +262,12 @@ public: class TGlslIoMapper : public TIoMapper { public: TGlslIoMapper() { - memset(inVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); - memset(outVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); - memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * EShLangCount); - memset(intermediates, 0, sizeof(TIntermediate*) * EShLangCount); + memset(inVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1)); + memset(outVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1)); + memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1)); + memset(intermediates, 0, sizeof(TIntermediate*) * (EShLangCount + 1)); + profile = ENoProfile; + version = 0; } virtual ~TGlslIoMapper() { for (size_t stage = 0; stage < EShLangCount; stage++) { @@ -293,6 +294,8 @@ public: *uniformVarMap[EShLangCount]; TIntermediate* intermediates[EShLangCount]; bool hadError = false; + EProfile profile; + int version; }; } // end namespace glslang diff --git a/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp b/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp index a8e031bc6f..4e84adbf0a 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp @@ -196,12 +196,14 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit) MERGE_TRUE(pointMode); for (int i = 0; i < 3; ++i) { - if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) { - localSize[i] = unit.localSize[i]; - localSizeNotDefault[i] = true; + if (unit.localSizeNotDefault[i]) { + if (!localSizeNotDefault[i]) { + localSize[i] = unit.localSize[i]; + localSizeNotDefault[i] = true; + } + else if (localSize[i] != unit.localSize[i]) + error(infoSink, "Contradictory local size"); } - else if (localSize[i] != unit.localSize[i]) - error(infoSink, "Contradictory local size"); if (localSizeSpecId[i] == TQualifier::layoutNotSet) localSizeSpecId[i] = unit.localSizeSpecId[i]; @@ -736,10 +738,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled) // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." - if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) { + if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) { error(infoSink, "xfb_stride is too large:"); infoSink.info.prefix(EPrefixError); - infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n"; + infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n"; } } @@ -1055,8 +1057,8 @@ bool TIntermediate::userOutputUsed() const return found; } -// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions -// as the accumulation is done. +// Accumulate locations used for inputs, outputs, and uniforms, payload and callable data +// and check for collisions as the accumulation is done. // // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. // @@ -1068,6 +1070,7 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ typeCollision = false; int set; + int setRT; if (qualifier.isPipeInput()) set = 0; else if (qualifier.isPipeOutput()) @@ -1076,11 +1079,17 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ set = 2; else if (qualifier.storage == EvqBuffer) set = 3; + else if (qualifier.isAnyPayload()) + setRT = 0; + else if (qualifier.isAnyCallable()) + setRT = 1; else return -1; int size; - if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) { + if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) { + size = 1; + } else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) { if (type.isSizedArray()) size = type.getCumulativeArraySize(); else @@ -1108,10 +1117,17 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ // (A vertex shader input will show using only one location, even for a dvec3/4.) // // So, for the case of dvec3, we need two independent ioRanges. - + // + // For raytracing IO (payloads and callabledata) each declaration occupies a single + // slot irrespective of type. int collision = -1; // no collision #ifndef GLSLANG_WEB - if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 && + if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) { + TRange range(qualifier.layoutLocation, qualifier.layoutLocation); + collision = checkLocationRT(setRT, qualifier.layoutLocation); + if (collision < 0) + usedIoRT[setRT].push_back(range); + } else if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 && (qualifier.isPipeInput() || qualifier.isPipeOutput())) { // Dealing with dvec3 in/out split across two locations. // Need two io-ranges. @@ -1187,6 +1203,16 @@ int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TTyp return -1; // no collision } +int TIntermediate::checkLocationRT(int set, int location) { + TRange range(location, location); + for (size_t r = 0; r < usedIoRT[set].size(); ++r) { + if (range.overlap(usedIoRT[set][r])) { + return range.start; + } + } + return -1; // no collision +} + // Accumulate bindings and offsets, and check for collisions // as the accumulation is done. // diff --git a/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h b/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h index 3cdc1f10cf..f8d8e80199 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h +++ b/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h @@ -259,6 +259,23 @@ private: unsigned int features; }; +// MustBeAssigned wraps a T, asserting that it has been assigned with +// operator =() before attempting to read with operator T() or operator ->(). +// Used to catch cases where fields are read before they have been assigned. +template<typename T> +class MustBeAssigned +{ +public: + MustBeAssigned() = default; + MustBeAssigned(const T& v) : value(v) {} + operator const T&() const { assert(isSet); return value; } + const T* operator ->() const { assert(isSet); return &value; } + MustBeAssigned& operator = (const T& v) { value = v; isSet = true; return *this; } +private: + T value; + bool isSet = false; +}; + // // Set of helper functions to help parse and build the tree. // @@ -270,6 +287,7 @@ public: profile(p), version(v), #endif treeRoot(0), + resources(TBuiltInResource{}), numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false), invertY(false), useStorageBuffer(false), @@ -398,6 +416,9 @@ public: EShLanguage getStage() const { return language; } void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); } const std::set<std::string>& getRequestedExtensions() const { return requestedExtensions; } + bool isRayTracingStage() const { + return language >= EShLangRayGen && language <= EShLangCallableNV; + } void setTreeRoot(TIntermNode* r) { treeRoot = r; } TIntermNode* getTreeRoot() const { return treeRoot; } @@ -406,6 +427,7 @@ public: int getNumErrors() const { return numErrors; } void addPushConstantCount() { ++numPushConstants; } void setLimits(const TBuiltInResource& r) { resources = r; } + const TBuiltInResource& getLimits() const { return resources; } bool postProcess(TIntermNode*, EShLanguage); void removeTree(); @@ -512,6 +534,7 @@ public: // Linkage related void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&); void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&); + TIntermAggregate* findLinkerObjects() const; void setUseStorageBuffer() { useStorageBuffer = true; } bool usingStorageBuffer() const { return useStorageBuffer; } @@ -847,6 +870,7 @@ public: int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision); int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision); + int checkLocationRT(int set, int location); int addUsedOffsets(int binding, int offset, int numOffsets); bool addUsedConstantId(int id); static int computeTypeLocationSize(const TType&, EShLanguage); @@ -922,7 +946,6 @@ protected: void checkCallGraphCycles(TInfoSink&); void checkCallGraphBodies(TInfoSink&, bool keepUncalled); void inOutLocationCheck(TInfoSink&); - TIntermAggregate* findLinkerObjects() const; bool userOutputUsed() const; bool isSpecializationOperation(const TIntermOperator&) const; bool isNonuniformPropagating(TOperator) const; @@ -955,7 +978,7 @@ protected: SpvVersion spvVersion; TIntermNode* treeRoot; std::set<std::string> requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them - TBuiltInResource resources; + MustBeAssigned<TBuiltInResource> resources; int numEntryPoints; int numErrors; int numPushConstants; @@ -1031,6 +1054,8 @@ protected: std::unordered_set<int> usedConstantId; // specialization constant ids used std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers + std::vector<TRange> usedIoRT[2]; // sets of used location, one for rayPayload/rayPayloadIN and other + // for callableData/callableDataIn // set of names of statically read/written I/O that might need extra checking std::set<TString> ioAccessed; // source code of shader, useful as part of debug information diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp index a0a626f9b7..aa1e0d7451 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp @@ -455,6 +455,7 @@ int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, boo token = scanToken(ppToken); } } else { + token = tokenPaste(token, *ppToken); token = evalToToken(token, shortCircuit, res, err, ppToken); return eval(token, precedence, shortCircuit, res, err, ppToken); } diff --git a/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp b/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp index 0aabf37fba..729500295e 100644 --- a/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp +++ b/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp @@ -658,14 +658,17 @@ public: blocks.back().numMembers = countAggregateMembers(type); - EShLanguageMask& stages = blocks.back().stages; - stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage()); + if (updateStageMasks) { + EShLanguageMask& stages = blocks.back().stages; + stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage()); + } } else { blockIndex = it->second; - - EShLanguageMask& stages = blocks[blockIndex].stages; - stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage()); + if (updateStageMasks) { + EShLanguageMask& stages = blocks[blockIndex].stages; + stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage()); + } } } diff --git a/thirdparty/glslang/patches/fix-mingw-snprintf.patch b/thirdparty/glslang/patches/fix-mingw-snprintf.patch deleted file mode 100644 index 2a51bc1f22..0000000000 --- a/thirdparty/glslang/patches/fix-mingw-snprintf.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/thirdparty/glslang/glslang/Include/Common.h b/thirdparty/glslang/glslang/Include/Common.h -index 733a790cfd..2c511bc1c5 100644 ---- a/thirdparty/glslang/glslang/Include/Common.h -+++ b/thirdparty/glslang/glslang/Include/Common.h -@@ -50,7 +50,9 @@ std::string to_string(const T& val) { - } - #endif - --#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API -+// -- GODOT start -- -+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) /* || defined MINGW_HAS_SECURE_API */ -+// -- GODOT end -- - #include <basetsd.h> - #ifndef snprintf - #define snprintf sprintf_s diff --git a/thirdparty/vulkan/include/vulkan/vk_icd.h b/thirdparty/vulkan/include/vulkan/vk_icd.h index fde5bf6214..5e29ef5575 100644 --- a/thirdparty/vulkan/include/vulkan/vk_icd.h +++ b/thirdparty/vulkan/include/vulkan/vk_icd.h @@ -121,6 +121,7 @@ typedef enum { VK_ICD_WSI_PLATFORM_METAL, VK_ICD_WSI_PLATFORM_DIRECTFB, VK_ICD_WSI_PLATFORM_VI, + VK_ICD_WSI_PLATFORM_GGP, } VkIcdWsiPlatform; typedef struct { @@ -196,6 +197,13 @@ typedef struct { } VkIcdSurfaceIOS; #endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP +typedef struct { + VkIcdSurfaceBase base; + GgpStreamDescriptor streamDescriptor; +} VkIcdSurfaceGgp; +#endif // VK_USE_PLATFORM_GGP + typedef struct { VkIcdSurfaceBase base; VkDisplayModeKHR displayMode; diff --git a/thirdparty/vulkan/include/vulkan/vulkan.hpp b/thirdparty/vulkan/include/vulkan/vulkan.hpp index 68b42afe19..8218a3617c 100644 --- a/thirdparty/vulkan/include/vulkan/vulkan.hpp +++ b/thirdparty/vulkan/include/vulkan/vulkan.hpp @@ -33,6 +33,7 @@ #include <cstring> #include <functional> #include <initializer_list> +#include <sstream> #include <string> #include <system_error> #include <tuple> @@ -81,7 +82,11 @@ extern "C" __declspec( dllimport ) FARPROC __stdcall GetProcAddress( HINSTANCE h # endif #endif -#if ( 201711 <= __cpp_impl_three_way_comparison ) && __has_include( <compare> ) +#if !defined(__has_include) +# define __has_include(x) false +#endif + +#if ( 201711 <= __cpp_impl_three_way_comparison ) && __has_include( <compare> ) && !defined( VULKAN_HPP_NO_SPACESHIP_OPERATOR ) # define VULKAN_HPP_HAS_SPACESHIP_OPERATOR #endif #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) @@ -89,7 +94,7 @@ extern "C" __declspec( dllimport ) FARPROC __stdcall GetProcAddress( HINSTANCE h #endif -static_assert( VK_HEADER_VERSION == 154 , "Wrong VK_HEADER_VERSION!" ); +static_assert( VK_HEADER_VERSION == 162 , "Wrong VK_HEADER_VERSION!" ); // 32-bit vulkan is not typesafe for handles, so don't allow copy constructors on this platform by default. // To enable this feature on 32-bit platforms please define VULKAN_HPP_TYPESAFE_CONVERSION @@ -174,6 +179,11 @@ static_assert( VK_HEADER_VERSION == 154 , "Wrong VK_HEADER_VERSION!" ); # else # define VULKAN_HPP_NOEXCEPT noexcept # define VULKAN_HPP_HAS_NOEXCEPT 1 +# if defined(VULKAN_HPP_NO_EXCEPTIONS) +# define VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS noexcept +# else +# define VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS +# endif # endif #endif @@ -536,7 +546,7 @@ namespace VULKAN_HPP_NAMESPACE return std::array<T, N>::operator[](index); } - VULKAN_HPP_CONSTEXPR T & operator[](int index) VULKAN_HPP_NOEXCEPT + T & operator[](int index) VULKAN_HPP_NOEXCEPT { return std::array<T, N>::operator[](index); } @@ -673,9 +683,7 @@ namespace VULKAN_HPP_NAMESPACE : m_mask(static_cast<MaskType>(bit)) {} - VULKAN_HPP_CONSTEXPR Flags(Flags<BitType> const& rhs) VULKAN_HPP_NOEXCEPT - : m_mask(rhs.m_mask) - {} + VULKAN_HPP_CONSTEXPR Flags(Flags<BitType> const& rhs) VULKAN_HPP_NOEXCEPT = default; VULKAN_HPP_CONSTEXPR explicit Flags(MaskType flags) VULKAN_HPP_NOEXCEPT : m_mask(flags) @@ -744,11 +752,7 @@ namespace VULKAN_HPP_NAMESPACE } // assignment operators - VULKAN_HPP_CONSTEXPR_14 Flags<BitType> & operator=(Flags<BitType> const& rhs) VULKAN_HPP_NOEXCEPT - { - m_mask = rhs.m_mask; - return *this; - } + VULKAN_HPP_CONSTEXPR_14 Flags<BitType> & operator=(Flags<BitType> const& rhs) VULKAN_HPP_NOEXCEPT = default; VULKAN_HPP_CONSTEXPR_14 Flags<BitType> & operator|=(Flags<BitType> const& rhs) VULKAN_HPP_NOEXCEPT { @@ -788,37 +792,37 @@ namespace VULKAN_HPP_NAMESPACE template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator<(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags > bit; + return flags.operator>( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator<=(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags >= bit; + return flags.operator>=( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator>(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags < bit; + return flags.operator<( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator>=(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags <= bit; + return flags.operator<=(bit); } template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator==(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags == bit; + return flags.operator==( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR bool operator!=(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags != bit; + return flags.operator!=( bit ); } #endif @@ -826,19 +830,19 @@ namespace VULKAN_HPP_NAMESPACE template <typename BitType> VULKAN_HPP_CONSTEXPR Flags<BitType> operator&(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags & bit; + return flags.operator&( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR Flags<BitType> operator|(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags | bit; + return flags.operator|( bit ); } template <typename BitType> VULKAN_HPP_CONSTEXPR Flags<BitType> operator^(BitType bit, Flags<BitType> const& flags) VULKAN_HPP_NOEXCEPT { - return flags ^ bit; + return flags.operator^( bit ); } template <typename RefType> @@ -942,16 +946,16 @@ namespace VULKAN_HPP_NAMESPACE StructureChain & operator=( StructureChain && rhs ) = delete; - template <typename T, size_t Which = 0> + template <typename T = typename std::tuple_element<0, std::tuple<ChainElements...>>::type, size_t Which = 0> T & get() VULKAN_HPP_NOEXCEPT { - return std::get<ChainElementIndex<0, T, Which, void, ChainElements...>::value>( *this ); + return std::get<ChainElementIndex<0, T, Which, void, ChainElements...>::value>( static_cast<std::tuple<ChainElements...>&>( *this ) ); } - template <typename T, size_t Which = 0> + template <typename T = typename std::tuple_element<0, std::tuple<ChainElements...>>::type, size_t Which = 0> T const & get() const VULKAN_HPP_NOEXCEPT { - return std::get<ChainElementIndex<0, T, Which, void, ChainElements...>::value>( *this ); + return std::get<ChainElementIndex<0, T, Which, void, ChainElements...>::value>( static_cast<std::tuple<ChainElements...> const &>( *this ) ); } template <typename T0, typename T1, typename... Ts> @@ -977,7 +981,7 @@ namespace VULKAN_HPP_NAMESPACE auto pNext = reinterpret_cast<VkBaseInStructure *>( &get<ClassType, Which>() ); VULKAN_HPP_ASSERT( !isLinked( pNext ) ); - auto & headElement = std::get<0>( *this ); + auto & headElement = std::get<0>( static_cast<std::tuple<ChainElements...>&>( *this ) ); pNext->pNext = reinterpret_cast<VkBaseInStructure const*>(headElement.pNext); headElement.pNext = pNext; } @@ -1028,7 +1032,7 @@ namespace VULKAN_HPP_NAMESPACE bool isLinked( VkBaseInStructure const * pNext ) { - VkBaseInStructure const * elementPtr = reinterpret_cast<VkBaseInStructure const*>(&std::get<0>( *this )); + VkBaseInStructure const * elementPtr = reinterpret_cast<VkBaseInStructure const*>(&std::get<0>( static_cast<std::tuple<ChainElements...>&>( *this ) ) ); while ( elementPtr ) { if ( elementPtr->pNext == pNext ) @@ -1043,8 +1047,8 @@ namespace VULKAN_HPP_NAMESPACE template <size_t Index> typename std::enable_if<Index != 0, void>::type link() VULKAN_HPP_NOEXCEPT { - auto & x = std::get<Index - 1>( *this ); - x.pNext = &std::get<Index>( *this ); + auto & x = std::get<Index - 1>( static_cast<std::tuple<ChainElements...>&>( *this ) ); + x.pNext = &std::get<Index>( static_cast<std::tuple<ChainElements...>&>( *this ) ); link<Index - 1>(); } @@ -1055,7 +1059,7 @@ namespace VULKAN_HPP_NAMESPACE template <size_t Index> typename std::enable_if<Index != 0, void>::type unlink( VkBaseOutStructure const * pNext ) VULKAN_HPP_NOEXCEPT { - auto & element = std::get<Index>( *this ); + auto & element = std::get<Index>( static_cast<std::tuple<ChainElements...>&>( *this ) ); if ( element.pNext == pNext ) { element.pNext = pNext->pNext; @@ -1069,7 +1073,7 @@ namespace VULKAN_HPP_NAMESPACE template <size_t Index> typename std::enable_if<Index == 0, void>::type unlink( VkBaseOutStructure const * pNext ) VULKAN_HPP_NOEXCEPT { - auto & element = std::get<0>( *this ); + auto & element = std::get<0>( static_cast<std::tuple<ChainElements...>&>( *this ) ); if ( element.pNext == pNext ) { element.pNext = pNext->pNext; @@ -1258,14 +1262,7 @@ namespace VULKAN_HPP_NAMESPACE return ::vkBeginCommandBuffer( commandBuffer, pBeginInfo ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkBindAccelerationStructureMemoryKHR( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos ) const VULKAN_HPP_NOEXCEPT - { - return ::vkBindAccelerationStructureMemoryKHR( device, bindInfoCount, pBindInfos ); - } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - - VkResult vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos ) const VULKAN_HPP_NOEXCEPT + VkResult vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos ) const VULKAN_HPP_NOEXCEPT { return ::vkBindAccelerationStructureMemoryNV( device, bindInfoCount, pBindInfos ); } @@ -1300,12 +1297,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkBindImageMemory2KHR( device, bindInfoCount, pBindInfos ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkBuildAccelerationStructureKHR( VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos ) const VULKAN_HPP_NOEXCEPT + VkResult vkBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos ) const VULKAN_HPP_NOEXCEPT { - return ::vkBuildAccelerationStructureKHR( device, infoCount, pInfos, ppOffsetInfos ); + return ::vkBuildAccelerationStructuresKHR( device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkCmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin ) const VULKAN_HPP_NOEXCEPT { @@ -1397,23 +1392,19 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdBlitImage2KHR( commandBuffer, pBlitImageInfo ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - void vkCmdBuildAccelerationStructureIndirectKHR( VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride ) const VULKAN_HPP_NOEXCEPT + void vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset ) const VULKAN_HPP_NOEXCEPT { - return ::vkCmdBuildAccelerationStructureIndirectKHR( commandBuffer, pInfo, indirectBuffer, indirectOffset, indirectStride ); + return ::vkCmdBuildAccelerationStructureNV( commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - void vkCmdBuildAccelerationStructureKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos ) const VULKAN_HPP_NOEXCEPT + void vkCmdBuildAccelerationStructuresIndirectKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts ) const VULKAN_HPP_NOEXCEPT { - return ::vkCmdBuildAccelerationStructureKHR( commandBuffer, infoCount, pInfos, ppOffsetInfos ); + return ::vkCmdBuildAccelerationStructuresIndirectKHR( commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - void vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkBuffer scratch, VkDeviceSize scratchOffset ) const VULKAN_HPP_NOEXCEPT + void vkCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos ) const VULKAN_HPP_NOEXCEPT { - return ::vkCmdBuildAccelerationStructureNV( commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset ); + return ::vkCmdBuildAccelerationStructuresKHR( commandBuffer, infoCount, pInfos, ppBuildRangeInfos ); } void vkCmdClearAttachments( VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects ) const VULKAN_HPP_NOEXCEPT @@ -1431,24 +1422,20 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdClearDepthStencilImage( commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkCmdCopyAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdCopyAccelerationStructureKHR( commandBuffer, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - void vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkCopyAccelerationStructureModeKHR mode ) const VULKAN_HPP_NOEXCEPT + void vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdCopyAccelerationStructureNV( commandBuffer, dst, src, mode ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdCopyAccelerationStructureToMemoryKHR( commandBuffer, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkCmdCopyBuffer( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions ) const VULKAN_HPP_NOEXCEPT { @@ -1490,12 +1477,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdCopyImageToBuffer2KHR( commandBuffer, pCopyImageToBufferInfo ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdCopyMemoryToAccelerationStructureKHR( commandBuffer, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkCmdCopyQueryPoolResults( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags ) const VULKAN_HPP_NOEXCEPT { @@ -1802,6 +1787,16 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdSetExclusiveScissorNV( commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors ); } + void vkCmdSetFragmentShadingRateEnumNV( VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetFragmentShadingRateEnumNV( commandBuffer, shadingRate, combinerOps ); + } + + void vkCmdSetFragmentShadingRateKHR( VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetFragmentShadingRateKHR( commandBuffer, pFragmentSize, combinerOps ); + } + void vkCmdSetFrontFaceEXT( VkCommandBuffer commandBuffer, VkFrontFace frontFace ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdSetFrontFaceEXT( commandBuffer, frontFace ); @@ -1837,6 +1832,11 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdSetPrimitiveTopologyEXT( commandBuffer, primitiveTopology ); } + void vkCmdSetRayTracingPipelineStackSizeKHR( VkCommandBuffer commandBuffer, uint32_t pipelineStackSize ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetRayTracingPipelineStackSizeKHR( commandBuffer, pipelineStackSize ); + } + void vkCmdSetSampleLocationsEXT( VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdSetSampleLocationsEXT( commandBuffer, pSampleLocationsInfo ); @@ -1897,19 +1897,15 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdSetViewportWithCountEXT( commandBuffer, viewportCount, pViewports ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - void vkCmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset ) const VULKAN_HPP_NOEXCEPT + void vkCmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress ) const VULKAN_HPP_NOEXCEPT { - return ::vkCmdTraceRaysIndirectKHR( commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, buffer, offset ); + return ::vkCmdTraceRaysIndirectKHR( commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - void vkCmdTraceRaysKHR( VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth ) const VULKAN_HPP_NOEXCEPT + void vkCmdTraceRaysKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdTraceRaysKHR( commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth ) const VULKAN_HPP_NOEXCEPT { @@ -1926,14 +1922,12 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdWaitEvents( commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdWriteAccelerationStructuresPropertiesKHR( commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - void vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT + void vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdWriteAccelerationStructuresPropertiesNV( commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery ); } @@ -1953,33 +1947,25 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCompileDeferredNV( device, pipeline, shader ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkCopyAccelerationStructureKHR( VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkCopyAccelerationStructureKHR( device, pInfo ); + return ::vkCopyAccelerationStructureKHR( device, deferredOperation, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkCopyAccelerationStructureToMemoryKHR( VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyAccelerationStructureToMemoryKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkCopyAccelerationStructureToMemoryKHR( device, pInfo ); + return ::vkCopyAccelerationStructureToMemoryKHR( device, deferredOperation, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkCopyMemoryToAccelerationStructureKHR( VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyMemoryToAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkCopyMemoryToAccelerationStructureKHR( device, pInfo ); + return ::vkCopyMemoryToAccelerationStructureKHR( device, deferredOperation, pInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkCreateAccelerationStructureKHR( VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure ) const VULKAN_HPP_NOEXCEPT { return ::vkCreateAccelerationStructureKHR( device, pCreateInfo, pAllocator, pAccelerationStructure ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ VkResult vkCreateAccelerationStructureNV( VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure ) const VULKAN_HPP_NOEXCEPT { @@ -2023,12 +2009,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCreateDebugUtilsMessengerEXT( instance, pCreateInfo, pAllocator, pMessenger ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkCreateDeferredOperationKHR( VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation ) const VULKAN_HPP_NOEXCEPT { return ::vkCreateDeferredOperationKHR( device, pAllocator, pDeferredOperation ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ VkResult vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool ) const VULKAN_HPP_NOEXCEPT { @@ -2165,12 +2149,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCreateQueryPool( device, pCreateInfo, pAllocator, pQueryPool ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkCreateRayTracingPipelinesKHR( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT + VkResult vkCreateRayTracingPipelinesKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT { - return ::vkCreateRayTracingPipelinesKHR( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); + return ::vkCreateRayTracingPipelinesKHR( device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ VkResult vkCreateRayTracingPipelinesNV( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT { @@ -2289,21 +2271,17 @@ namespace VULKAN_HPP_NAMESPACE return ::vkDebugReportMessageEXT( instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkDeferredOperationJoinKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT { return ::vkDeferredOperationJoinKHR( device, operation ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkDestroyAccelerationStructureKHR( VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT { return ::vkDestroyAccelerationStructureKHR( device, accelerationStructure, pAllocator ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - void vkDestroyAccelerationStructureNV( VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + void vkDestroyAccelerationStructureNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT { return ::vkDestroyAccelerationStructureNV( device, accelerationStructure, pAllocator ); } @@ -2333,12 +2311,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkDestroyDebugUtilsMessengerEXT( instance, messenger, pAllocator ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS void vkDestroyDeferredOperationKHR( VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT { return ::vkDestroyDeferredOperationKHR( device, operation, pAllocator ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkDestroyDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT { @@ -2550,24 +2526,20 @@ namespace VULKAN_HPP_NAMESPACE return ::vkFreeMemory( device, memory, pAllocator ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR( VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + void vkGetAccelerationStructureBuildSizesKHR( VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkGetAccelerationStructureDeviceAddressKHR( device, pInfo ); + return ::vkGetAccelerationStructureBuildSizesKHR( device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - VkResult vkGetAccelerationStructureHandleNV( VkDevice device, VkAccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR( VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkGetAccelerationStructureHandleNV( device, accelerationStructure, dataSize, pData ); + return ::vkGetAccelerationStructureDeviceAddressKHR( device, pInfo ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - void vkGetAccelerationStructureMemoryRequirementsKHR( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + VkResult vkGetAccelerationStructureHandleNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT { - return ::vkGetAccelerationStructureMemoryRequirementsKHR( device, pInfo, pMemoryRequirements ); + return ::vkGetAccelerationStructureHandleNV( device, accelerationStructure, dataSize, pData ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkGetAccelerationStructureMemoryRequirementsNV( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT { @@ -2626,19 +2598,15 @@ namespace VULKAN_HPP_NAMESPACE return ::vkGetCalibratedTimestampsEXT( device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS uint32_t vkGetDeferredOperationMaxConcurrencyKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT { return ::vkGetDeferredOperationMaxConcurrencyKHR( device, operation ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkGetDeferredOperationResultKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT { return ::vkGetDeferredOperationResultKHR( device, operation ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkGetDescriptorSetLayoutSupport( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport ) const VULKAN_HPP_NOEXCEPT { @@ -2650,12 +2618,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkGetDescriptorSetLayoutSupportKHR( device, pCreateInfo, pSupport ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS - VkResult vkGetDeviceAccelerationStructureCompatibilityKHR( VkDevice device, const VkAccelerationStructureVersionKHR* version ) const VULKAN_HPP_NOEXCEPT + void vkGetDeviceAccelerationStructureCompatibilityKHR( VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility ) const VULKAN_HPP_NOEXCEPT { - return ::vkGetDeviceAccelerationStructureCompatibilityKHR( device, version ); + return ::vkGetDeviceAccelerationStructureCompatibilityKHR( device, pVersionInfo, pCompatibility ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ void vkGetDeviceGroupPeerMemoryFeatures( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures ) const VULKAN_HPP_NOEXCEPT { @@ -2976,6 +2942,11 @@ namespace VULKAN_HPP_NAMESPACE return ::vkGetPhysicalDeviceFormatProperties2KHR( physicalDevice, format, pFormatProperties ); } + VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR( VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFragmentShadingRatesKHR( physicalDevice, pFragmentShadingRateCount, pFragmentShadingRates ); + } + VkResult vkGetPhysicalDeviceImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties ) const VULKAN_HPP_NOEXCEPT { return ::vkGetPhysicalDeviceImageFormatProperties( physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties ); @@ -3188,25 +3159,26 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT { return ::vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( device, pipeline, firstGroup, groupCount, dataSize, pData ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkGetRayTracingShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT { return ::vkGetRayTracingShaderGroupHandlesKHR( device, pipeline, firstGroup, groupCount, dataSize, pData ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ VkResult vkGetRayTracingShaderGroupHandlesNV( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT { return ::vkGetRayTracingShaderGroupHandlesNV( device, pipeline, firstGroup, groupCount, dataSize, pData ); } + VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR( VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRayTracingShaderGroupStackSizeKHR( device, pipeline, group, groupShader ); + } + VkResult vkGetRefreshCycleDurationGOOGLE( VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties ) const VULKAN_HPP_NOEXCEPT { return ::vkGetRefreshCycleDurationGOOGLE( device, swapchain, pDisplayTimingProperties ); @@ -3515,12 +3487,10 @@ namespace VULKAN_HPP_NAMESPACE return ::vkWaitSemaphoresKHR( device, pWaitInfo, timeout ); } -#ifdef VK_ENABLE_BETA_EXTENSIONS VkResult vkWriteAccelerationStructuresPropertiesKHR( VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride ) const VULKAN_HPP_NOEXCEPT { return ::vkWriteAccelerationStructuresPropertiesKHR( device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride ); } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ }; #endif @@ -3673,6 +3643,35 @@ namespace VULKAN_HPP_NAMESPACE Dispatch const * m_dispatch = nullptr; }; + template <typename OwnerType, typename Dispatch> + class ObjectRelease + { + public: + ObjectRelease() = default; + + ObjectRelease( OwnerType owner, Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_owner( owner ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT + { + return m_owner; + } + + protected: + template <typename T> + void destroy( T t ) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_owner && m_dispatch ); + m_owner.release( t, *m_dispatch ); + } + + private: + OwnerType m_owner = {}; + Dispatch const * m_dispatch = nullptr; + }; + template <typename OwnerType, typename PoolType, typename Dispatch> class PoolFree { @@ -3718,7 +3717,13 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = false; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS + VULKAN_HPP_INLINE std::string toHexString( uint32_t value ) + { + std::stringstream stream; + stream << std::hex << value; + return stream.str(); + } + enum class AccelerationStructureBuildTypeKHR { eHost = VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR, @@ -3733,34 +3738,63 @@ namespace VULKAN_HPP_NAMESPACE case AccelerationStructureBuildTypeKHR::eHost : return "Host"; case AccelerationStructureBuildTypeKHR::eDevice : return "Device"; case AccelerationStructureBuildTypeKHR::eHostOrDevice : return "HostOrDevice"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - enum class AccelerationStructureMemoryRequirementsTypeKHR + enum class AccelerationStructureCompatibilityKHR { - eObject = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR, - eBuildScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR, - eUpdateScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR + eCompatible = VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR, + eIncompatible = VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR }; - using AccelerationStructureMemoryRequirementsTypeNV = AccelerationStructureMemoryRequirementsTypeKHR; - VULKAN_HPP_INLINE std::string to_string( AccelerationStructureMemoryRequirementsTypeKHR value ) + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCompatibilityKHR value ) { switch ( value ) { - case AccelerationStructureMemoryRequirementsTypeKHR::eObject : return "Object"; - case AccelerationStructureMemoryRequirementsTypeKHR::eBuildScratch : return "BuildScratch"; - case AccelerationStructureMemoryRequirementsTypeKHR::eUpdateScratch : return "UpdateScratch"; - default: return "invalid"; + case AccelerationStructureCompatibilityKHR::eCompatible : return "Compatible"; + case AccelerationStructureCompatibilityKHR::eIncompatible : return "Incompatible"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class AccelerationStructureCreateFlagBitsKHR : VkAccelerationStructureCreateFlagsKHR + { + eDeviceAddressCaptureReplay = VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCreateFlagBitsKHR value ) + { + switch ( value ) + { + case AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class AccelerationStructureMemoryRequirementsTypeNV + { + eObject = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV, + eBuildScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV, + eUpdateScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureMemoryRequirementsTypeNV value ) + { + switch ( value ) + { + case AccelerationStructureMemoryRequirementsTypeNV::eObject : return "Object"; + case AccelerationStructureMemoryRequirementsTypeNV::eBuildScratch : return "BuildScratch"; + case AccelerationStructureMemoryRequirementsTypeNV::eUpdateScratch : return "UpdateScratch"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } enum class AccelerationStructureTypeKHR { eTopLevel = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, - eBottomLevel = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR + eBottomLevel = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + eGeneric = VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR }; using AccelerationStructureTypeNV = AccelerationStructureTypeKHR; @@ -3770,7 +3804,8 @@ namespace VULKAN_HPP_NAMESPACE { case AccelerationStructureTypeKHR::eTopLevel : return "TopLevel"; case AccelerationStructureTypeKHR::eBottomLevel : return "BottomLevel"; - default: return "invalid"; + case AccelerationStructureTypeKHR::eGeneric : return "Generic"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -3805,7 +3840,8 @@ namespace VULKAN_HPP_NAMESPACE eCommandPreprocessReadNV = VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV, eCommandPreprocessWriteNV = VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV, eAccelerationStructureReadNV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV, - eAccelerationStructureWriteNV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV + eAccelerationStructureWriteNV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV, + eFragmentShadingRateAttachmentReadKHR = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR }; VULKAN_HPP_INLINE std::string to_string( AccessFlagBits value ) @@ -3840,7 +3876,7 @@ namespace VULKAN_HPP_NAMESPACE case AccessFlagBits::eFragmentDensityMapReadEXT : return "FragmentDensityMapReadEXT"; case AccessFlagBits::eCommandPreprocessReadNV : return "CommandPreprocessReadNV"; case AccessFlagBits::eCommandPreprocessWriteNV : return "CommandPreprocessWriteNV"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -3862,7 +3898,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case AttachmentDescriptionFlagBits::eMayAlias : return "MayAlias"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -3880,7 +3916,7 @@ namespace VULKAN_HPP_NAMESPACE case AttachmentLoadOp::eLoad : return "Load"; case AttachmentLoadOp::eClear : return "Clear"; case AttachmentLoadOp::eDontCare : return "DontCare"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -3898,7 +3934,7 @@ namespace VULKAN_HPP_NAMESPACE case AttachmentStoreOp::eStore : return "Store"; case AttachmentStoreOp::eDontCare : return "DontCare"; case AttachmentStoreOp::eNoneQCOM : return "NoneQCOM"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -3948,7 +3984,7 @@ namespace VULKAN_HPP_NAMESPACE case BlendFactor::eOneMinusSrc1Color : return "OneMinusSrc1Color"; case BlendFactor::eSrc1Alpha : return "Src1Alpha"; case BlendFactor::eOneMinusSrc1Alpha : return "OneMinusSrc1Alpha"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4062,7 +4098,7 @@ namespace VULKAN_HPP_NAMESPACE case BlendOp::eRedEXT : return "RedEXT"; case BlendOp::eGreenEXT : return "GreenEXT"; case BlendOp::eBlueEXT : return "BlueEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4080,7 +4116,7 @@ namespace VULKAN_HPP_NAMESPACE case BlendOverlapEXT::eUncorrelated : return "Uncorrelated"; case BlendOverlapEXT::eDisjoint : return "Disjoint"; case BlendOverlapEXT::eConjoint : return "Conjoint"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4108,7 +4144,7 @@ namespace VULKAN_HPP_NAMESPACE case BorderColor::eIntOpaqueWhite : return "IntOpaqueWhite"; case BorderColor::eFloatCustomEXT : return "FloatCustomEXT"; case BorderColor::eIntCustomEXT : return "IntCustomEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4132,7 +4168,7 @@ namespace VULKAN_HPP_NAMESPACE case BufferCreateFlagBits::eSparseAliased : return "SparseAliased"; case BufferCreateFlagBits::eProtected : return "Protected"; case BufferCreateFlagBits::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4151,7 +4187,9 @@ namespace VULKAN_HPP_NAMESPACE eTransformFeedbackBufferEXT = VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT, eTransformFeedbackCounterBufferEXT = VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT, eConditionalRenderingEXT = VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT, - eRayTracingKHR = VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR, + eAccelerationStructureBuildInputReadOnlyKHR = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR, + eAccelerationStructureStorageKHR = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR, + eShaderBindingTableKHR = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, eRayTracingNV = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, eShaderDeviceAddressEXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, eShaderDeviceAddressKHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR @@ -4174,8 +4212,10 @@ namespace VULKAN_HPP_NAMESPACE case BufferUsageFlagBits::eTransformFeedbackBufferEXT : return "TransformFeedbackBufferEXT"; case BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT : return "TransformFeedbackCounterBufferEXT"; case BufferUsageFlagBits::eConditionalRenderingEXT : return "ConditionalRenderingEXT"; - case BufferUsageFlagBits::eRayTracingKHR : return "RayTracingKHR"; - default: return "invalid"; + case BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR : return "AccelerationStructureBuildInputReadOnlyKHR"; + case BufferUsageFlagBits::eAccelerationStructureStorageKHR : return "AccelerationStructureStorageKHR"; + case BufferUsageFlagBits::eShaderBindingTableKHR : return "ShaderBindingTableKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4198,7 +4238,23 @@ namespace VULKAN_HPP_NAMESPACE case BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace : return "PreferFastTrace"; case BuildAccelerationStructureFlagBitsKHR::ePreferFastBuild : return "PreferFastBuild"; case BuildAccelerationStructureFlagBitsKHR::eLowMemory : return "LowMemory"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class BuildAccelerationStructureModeKHR + { + eBuild = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + eUpdate = VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( BuildAccelerationStructureModeKHR value ) + { + switch ( value ) + { + case BuildAccelerationStructureModeKHR::eBuild : return "Build"; + case BuildAccelerationStructureModeKHR::eUpdate : return "Update"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4215,7 +4271,7 @@ namespace VULKAN_HPP_NAMESPACE { case ChromaLocation::eCositedEven : return "CositedEven"; case ChromaLocation::eMidpoint : return "Midpoint"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4235,7 +4291,7 @@ namespace VULKAN_HPP_NAMESPACE case CoarseSampleOrderTypeNV::eCustom : return "Custom"; case CoarseSampleOrderTypeNV::ePixelMajor : return "PixelMajor"; case CoarseSampleOrderTypeNV::eSampleMajor : return "SampleMajor"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4255,7 +4311,7 @@ namespace VULKAN_HPP_NAMESPACE case ColorComponentFlagBits::eG : return "G"; case ColorComponentFlagBits::eB : return "B"; case ColorComponentFlagBits::eA : return "A"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4301,7 +4357,7 @@ namespace VULKAN_HPP_NAMESPACE case ColorSpaceKHR::ePassThroughEXT : return "PassThroughEXT"; case ColorSpaceKHR::eExtendedSrgbNonlinearEXT : return "ExtendedSrgbNonlinearEXT"; case ColorSpaceKHR::eDisplayNativeAMD : return "DisplayNativeAMD"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4317,7 +4373,7 @@ namespace VULKAN_HPP_NAMESPACE { case CommandBufferLevel::ePrimary : return "Primary"; case CommandBufferLevel::eSecondary : return "Secondary"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4331,7 +4387,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case CommandBufferResetFlagBits::eReleaseResources : return "ReleaseResources"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4349,7 +4405,7 @@ namespace VULKAN_HPP_NAMESPACE case CommandBufferUsageFlagBits::eOneTimeSubmit : return "OneTimeSubmit"; case CommandBufferUsageFlagBits::eRenderPassContinue : return "RenderPassContinue"; case CommandBufferUsageFlagBits::eSimultaneousUse : return "SimultaneousUse"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4367,7 +4423,7 @@ namespace VULKAN_HPP_NAMESPACE case CommandPoolCreateFlagBits::eTransient : return "Transient"; case CommandPoolCreateFlagBits::eResetCommandBuffer : return "ResetCommandBuffer"; case CommandPoolCreateFlagBits::eProtected : return "Protected"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4381,7 +4437,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case CommandPoolResetFlagBits::eReleaseResources : return "ReleaseResources"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4409,7 +4465,7 @@ namespace VULKAN_HPP_NAMESPACE case CompareOp::eNotEqual : return "NotEqual"; case CompareOp::eGreaterOrEqual : return "GreaterOrEqual"; case CompareOp::eAlways : return "Always"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4435,7 +4491,7 @@ namespace VULKAN_HPP_NAMESPACE case ComponentSwizzle::eG : return "G"; case ComponentSwizzle::eB : return "B"; case ComponentSwizzle::eA : return "A"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4469,7 +4525,7 @@ namespace VULKAN_HPP_NAMESPACE case ComponentTypeNV::eUint16 : return "Uint16"; case ComponentTypeNV::eUint32 : return "Uint32"; case ComponentTypeNV::eUint64 : return "Uint64"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4489,7 +4545,7 @@ namespace VULKAN_HPP_NAMESPACE case CompositeAlphaFlagBitsKHR::ePreMultiplied : return "PreMultiplied"; case CompositeAlphaFlagBitsKHR::ePostMultiplied : return "PostMultiplied"; case CompositeAlphaFlagBitsKHR::eInherit : return "Inherit"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4503,7 +4559,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case ConditionalRenderingFlagBitsEXT::eInverted : return "Inverted"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4521,7 +4577,7 @@ namespace VULKAN_HPP_NAMESPACE case ConservativeRasterizationModeEXT::eDisabled : return "Disabled"; case ConservativeRasterizationModeEXT::eOverestimate : return "Overestimate"; case ConservativeRasterizationModeEXT::eUnderestimate : return "Underestimate"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4542,7 +4598,7 @@ namespace VULKAN_HPP_NAMESPACE case CopyAccelerationStructureModeKHR::eCompact : return "Compact"; case CopyAccelerationStructureModeKHR::eSerialize : return "Serialize"; case CopyAccelerationStructureModeKHR::eDeserialize : return "Deserialize"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4562,7 +4618,7 @@ namespace VULKAN_HPP_NAMESPACE case CoverageModulationModeNV::eRgb : return "Rgb"; case CoverageModulationModeNV::eAlpha : return "Alpha"; case CoverageModulationModeNV::eRgba : return "Rgba"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4578,7 +4634,7 @@ namespace VULKAN_HPP_NAMESPACE { case CoverageReductionModeNV::eMerge : return "Merge"; case CoverageReductionModeNV::eTruncate : return "Truncate"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4598,7 +4654,7 @@ namespace VULKAN_HPP_NAMESPACE case CullModeFlagBits::eFront : return "Front"; case CullModeFlagBits::eBack : return "Back"; case CullModeFlagBits::eFrontAndBack : return "FrontAndBack"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4620,7 +4676,7 @@ namespace VULKAN_HPP_NAMESPACE case DebugReportFlagBitsEXT::ePerformanceWarning : return "PerformanceWarning"; case DebugReportFlagBitsEXT::eError : return "Error"; case DebugReportFlagBitsEXT::eDebug : return "Debug"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4707,7 +4763,8 @@ namespace VULKAN_HPP_NAMESPACE case DebugReportObjectTypeEXT::eSamplerYcbcrConversion : return "SamplerYcbcrConversion"; case DebugReportObjectTypeEXT::eDescriptorUpdateTemplate : return "DescriptorUpdateTemplate"; case DebugReportObjectTypeEXT::eAccelerationStructureKHR : return "AccelerationStructureKHR"; - default: return "invalid"; + case DebugReportObjectTypeEXT::eAccelerationStructureNV : return "AccelerationStructureNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4727,7 +4784,7 @@ namespace VULKAN_HPP_NAMESPACE case DebugUtilsMessageSeverityFlagBitsEXT::eInfo : return "Info"; case DebugUtilsMessageSeverityFlagBitsEXT::eWarning : return "Warning"; case DebugUtilsMessageSeverityFlagBitsEXT::eError : return "Error"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4745,7 +4802,7 @@ namespace VULKAN_HPP_NAMESPACE case DebugUtilsMessageTypeFlagBitsEXT::eGeneral : return "General"; case DebugUtilsMessageTypeFlagBitsEXT::eValidation : return "Validation"; case DebugUtilsMessageTypeFlagBitsEXT::ePerformance : return "Performance"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4765,7 +4822,7 @@ namespace VULKAN_HPP_NAMESPACE case DependencyFlagBits::eByRegion : return "ByRegion"; case DependencyFlagBits::eDeviceGroup : return "DeviceGroup"; case DependencyFlagBits::eViewLocal : return "ViewLocal"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4786,7 +4843,7 @@ namespace VULKAN_HPP_NAMESPACE case DescriptorBindingFlagBits::eUpdateUnusedWhilePending : return "UpdateUnusedWhilePending"; case DescriptorBindingFlagBits::ePartiallyBound : return "PartiallyBound"; case DescriptorBindingFlagBits::eVariableDescriptorCount : return "VariableDescriptorCount"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4803,7 +4860,7 @@ namespace VULKAN_HPP_NAMESPACE { case DescriptorPoolCreateFlagBits::eFreeDescriptorSet : return "FreeDescriptorSet"; case DescriptorPoolCreateFlagBits::eUpdateAfterBind : return "UpdateAfterBind"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4820,7 +4877,7 @@ namespace VULKAN_HPP_NAMESPACE { case DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool : return "UpdateAfterBindPool"; case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR : return "PushDescriptorKHR"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4859,7 +4916,8 @@ namespace VULKAN_HPP_NAMESPACE case DescriptorType::eInputAttachment : return "InputAttachment"; case DescriptorType::eInlineUniformBlockEXT : return "InlineUniformBlockEXT"; case DescriptorType::eAccelerationStructureKHR : return "AccelerationStructureKHR"; - default: return "invalid"; + case DescriptorType::eAccelerationStructureNV : return "AccelerationStructureNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4876,7 +4934,7 @@ namespace VULKAN_HPP_NAMESPACE { case DescriptorUpdateTemplateType::eDescriptorSet : return "DescriptorSet"; case DescriptorUpdateTemplateType::ePushDescriptorsKHR : return "PushDescriptorsKHR"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4902,7 +4960,7 @@ namespace VULKAN_HPP_NAMESPACE case DeviceDiagnosticsConfigFlagBitsNV::eEnableShaderDebugInfo : return "EnableShaderDebugInfo"; case DeviceDiagnosticsConfigFlagBitsNV::eEnableResourceTracking : return "EnableResourceTracking"; case DeviceDiagnosticsConfigFlagBitsNV::eEnableAutomaticCheckpoints : return "EnableAutomaticCheckpoints"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4916,7 +4974,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case DeviceEventTypeEXT::eDisplayHotplug : return "DisplayHotplug"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4936,7 +4994,29 @@ namespace VULKAN_HPP_NAMESPACE case DeviceGroupPresentModeFlagBitsKHR::eRemote : return "Remote"; case DeviceGroupPresentModeFlagBitsKHR::eSum : return "Sum"; case DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice : return "LocalMultiDevice"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class DeviceMemoryReportEventTypeEXT + { + eAllocate = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, + eFree = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT, + eImport = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT, + eUnimport = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT, + eAllocationFailed = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportEventTypeEXT value ) + { + switch ( value ) + { + case DeviceMemoryReportEventTypeEXT::eAllocate : return "Allocate"; + case DeviceMemoryReportEventTypeEXT::eFree : return "Free"; + case DeviceMemoryReportEventTypeEXT::eImport : return "Import"; + case DeviceMemoryReportEventTypeEXT::eUnimport : return "Unimport"; + case DeviceMemoryReportEventTypeEXT::eAllocationFailed : return "AllocationFailed"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4950,7 +5030,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case DeviceQueueCreateFlagBits::eProtected : return "Protected"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4966,7 +5046,7 @@ namespace VULKAN_HPP_NAMESPACE { case DiscardRectangleModeEXT::eInclusive : return "Inclusive"; case DiscardRectangleModeEXT::eExclusive : return "Exclusive"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -4980,7 +5060,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case DisplayEventTypeEXT::eFirstPixelOut : return "FirstPixelOut"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5000,7 +5080,7 @@ namespace VULKAN_HPP_NAMESPACE case DisplayPlaneAlphaFlagBitsKHR::eGlobal : return "Global"; case DisplayPlaneAlphaFlagBitsKHR::ePerPixel : return "PerPixel"; case DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied : return "PerPixelPremultiplied"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5018,7 +5098,7 @@ namespace VULKAN_HPP_NAMESPACE case DisplayPowerStateEXT::eOff : return "Off"; case DisplayPowerStateEXT::eSuspend : return "Suspend"; case DisplayPowerStateEXT::eOn : return "On"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5060,7 +5140,7 @@ namespace VULKAN_HPP_NAMESPACE case DriverId::eBroadcomProprietary : return "BroadcomProprietary"; case DriverId::eMesaLlvmpipe : return "MesaLlvmpipe"; case DriverId::eMoltenvk : return "Moltenvk"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5078,9 +5158,11 @@ namespace VULKAN_HPP_NAMESPACE eViewportWScalingNV = VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, eDiscardRectangleEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, eSampleLocationsEXT = VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, + eRayTracingPipelineStackSizeKHR = VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR, eViewportShadingRatePaletteNV = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV, eViewportCoarseSampleOrderNV = VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV, eExclusiveScissorNV = VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV, + eFragmentShadingRateKHR = VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR, eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT, eCullModeEXT = VK_DYNAMIC_STATE_CULL_MODE_EXT, eFrontFaceEXT = VK_DYNAMIC_STATE_FRONT_FACE_EXT, @@ -5112,9 +5194,11 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eViewportWScalingNV : return "ViewportWScalingNV"; case DynamicState::eDiscardRectangleEXT : return "DiscardRectangleEXT"; case DynamicState::eSampleLocationsEXT : return "SampleLocationsEXT"; + case DynamicState::eRayTracingPipelineStackSizeKHR : return "RayTracingPipelineStackSizeKHR"; case DynamicState::eViewportShadingRatePaletteNV : return "ViewportShadingRatePaletteNV"; case DynamicState::eViewportCoarseSampleOrderNV : return "ViewportCoarseSampleOrderNV"; case DynamicState::eExclusiveScissorNV : return "ExclusiveScissorNV"; + case DynamicState::eFragmentShadingRateKHR : return "FragmentShadingRateKHR"; case DynamicState::eLineStippleEXT : return "LineStippleEXT"; case DynamicState::eCullModeEXT : return "CullModeEXT"; case DynamicState::eFrontFaceEXT : return "FrontFaceEXT"; @@ -5128,7 +5212,7 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eDepthBoundsTestEnableEXT : return "DepthBoundsTestEnableEXT"; case DynamicState::eStencilTestEnableEXT : return "StencilTestEnableEXT"; case DynamicState::eStencilOpEXT : return "StencilOpEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5145,7 +5229,7 @@ namespace VULKAN_HPP_NAMESPACE { case ExternalFenceFeatureFlagBits::eExportable : return "Exportable"; case ExternalFenceFeatureFlagBits::eImportable : return "Importable"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5166,7 +5250,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalFenceHandleTypeFlagBits::eOpaqueWin32 : return "OpaqueWin32"; case ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; case ExternalFenceHandleTypeFlagBits::eSyncFd : return "SyncFd"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5185,7 +5269,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalMemoryFeatureFlagBits::eDedicatedOnly : return "DedicatedOnly"; case ExternalMemoryFeatureFlagBits::eExportable : return "Exportable"; case ExternalMemoryFeatureFlagBits::eImportable : return "Importable"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5203,7 +5287,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly : return "DedicatedOnly"; case ExternalMemoryFeatureFlagBitsNV::eExportable : return "Exportable"; case ExternalMemoryFeatureFlagBitsNV::eImportable : return "Importable"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5238,7 +5322,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID : return "AndroidHardwareBufferANDROID"; case ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT : return "HostAllocationEXT"; case ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT : return "HostMappedForeignMemoryEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5258,7 +5342,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; case ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image : return "D3D11Image"; case ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt : return "D3D11ImageKmt"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5275,7 +5359,7 @@ namespace VULKAN_HPP_NAMESPACE { case ExternalSemaphoreFeatureFlagBits::eExportable : return "Exportable"; case ExternalSemaphoreFeatureFlagBits::eImportable : return "Importable"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5299,7 +5383,7 @@ namespace VULKAN_HPP_NAMESPACE case ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; case ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence : return "D3D12Fence"; case ExternalSemaphoreHandleTypeFlagBits::eSyncFd : return "SyncFd"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5313,7 +5397,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case FenceCreateFlagBits::eSignaled : return "Signaled"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5328,7 +5412,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case FenceImportFlagBits::eTemporary : return "Temporary"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5347,7 +5431,7 @@ namespace VULKAN_HPP_NAMESPACE case Filter::eNearest : return "Nearest"; case Filter::eLinear : return "Linear"; case Filter::eCubicIMG : return "CubicIMG"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5879,7 +5963,7 @@ namespace VULKAN_HPP_NAMESPACE case Format::eAstc12x12SfloatBlockEXT : return "Astc12x12SfloatBlockEXT"; case Format::eA4R4G4B4UnormPack16EXT : return "A4R4G4B4UnormPack16EXT"; case Format::eA4B4G4R4UnormPack16EXT : return "A4B4G4R4UnormPack16EXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5911,6 +5995,7 @@ namespace VULKAN_HPP_NAMESPACE eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, eAccelerationStructureVertexBufferKHR = VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR, eFragmentDensityMapEXT = VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT, + eFragmentShadingRateAttachmentKHR = VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eCositedChromaSamplesKHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR, eDisjointKHR = VK_FORMAT_FEATURE_DISJOINT_BIT_KHR, eMidpointChromaSamplesKHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR, @@ -5954,7 +6039,82 @@ namespace VULKAN_HPP_NAMESPACE case FormatFeatureFlagBits::eSampledImageFilterCubicIMG : return "SampledImageFilterCubicIMG"; case FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR : return "AccelerationStructureVertexBufferKHR"; case FormatFeatureFlagBits::eFragmentDensityMapEXT : return "FragmentDensityMapEXT"; - default: return "invalid"; + case FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR : return "FragmentShadingRateAttachmentKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class FragmentShadingRateCombinerOpKHR + { + eKeep = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR, + eReplace = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR, + eMin = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR, + eMax = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR, + eMul = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateCombinerOpKHR value ) + { + switch ( value ) + { + case FragmentShadingRateCombinerOpKHR::eKeep : return "Keep"; + case FragmentShadingRateCombinerOpKHR::eReplace : return "Replace"; + case FragmentShadingRateCombinerOpKHR::eMin : return "Min"; + case FragmentShadingRateCombinerOpKHR::eMax : return "Max"; + case FragmentShadingRateCombinerOpKHR::eMul : return "Mul"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class FragmentShadingRateNV + { + e1InvocationPerPixel = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV, + e1InvocationPer1X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV, + e1InvocationPer2X1Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV, + e1InvocationPer2X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV, + e1InvocationPer2X4Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV, + e1InvocationPer4X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV, + e1InvocationPer4X4Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV, + e2InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV, + e4InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV, + e8InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV, + e16InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV, + eNoInvocations = VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateNV value ) + { + switch ( value ) + { + case FragmentShadingRateNV::e1InvocationPerPixel : return "1InvocationPerPixel"; + case FragmentShadingRateNV::e1InvocationPer1X2Pixels : return "1InvocationPer1X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X1Pixels : return "1InvocationPer2X1Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X2Pixels : return "1InvocationPer2X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X4Pixels : return "1InvocationPer2X4Pixels"; + case FragmentShadingRateNV::e1InvocationPer4X2Pixels : return "1InvocationPer4X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer4X4Pixels : return "1InvocationPer4X4Pixels"; + case FragmentShadingRateNV::e2InvocationsPerPixel : return "2InvocationsPerPixel"; + case FragmentShadingRateNV::e4InvocationsPerPixel : return "4InvocationsPerPixel"; + case FragmentShadingRateNV::e8InvocationsPerPixel : return "8InvocationsPerPixel"; + case FragmentShadingRateNV::e16InvocationsPerPixel : return "16InvocationsPerPixel"; + case FragmentShadingRateNV::eNoInvocations : return "NoInvocations"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class FragmentShadingRateTypeNV + { + eFragmentSize = VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV, + eEnums = VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateTypeNV value ) + { + switch ( value ) + { + case FragmentShadingRateTypeNV::eFragmentSize : return "FragmentSize"; + case FragmentShadingRateTypeNV::eEnums : return "Enums"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5969,7 +6129,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case FramebufferCreateFlagBits::eImageless : return "Imageless"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -5985,7 +6145,7 @@ namespace VULKAN_HPP_NAMESPACE { case FrontFace::eCounterClockwise : return "CounterClockwise"; case FrontFace::eClockwise : return "Clockwise"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6006,7 +6166,7 @@ namespace VULKAN_HPP_NAMESPACE case FullScreenExclusiveEXT::eAllowed : return "Allowed"; case FullScreenExclusiveEXT::eDisallowed : return "Disallowed"; case FullScreenExclusiveEXT::eApplicationControlled : return "ApplicationControlled"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -6024,7 +6184,7 @@ namespace VULKAN_HPP_NAMESPACE { case GeometryFlagBitsKHR::eOpaque : return "Opaque"; case GeometryFlagBitsKHR::eNoDuplicateAnyHitInvocation : return "NoDuplicateAnyHitInvocation"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6046,7 +6206,7 @@ namespace VULKAN_HPP_NAMESPACE case GeometryInstanceFlagBitsKHR::eTriangleFrontCounterclockwise : return "TriangleFrontCounterclockwise"; case GeometryInstanceFlagBitsKHR::eForceOpaque : return "ForceOpaque"; case GeometryInstanceFlagBitsKHR::eForceNoOpaque : return "ForceNoOpaque"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6065,7 +6225,7 @@ namespace VULKAN_HPP_NAMESPACE case GeometryTypeKHR::eTriangles : return "Triangles"; case GeometryTypeKHR::eAabbs : return "Aabbs"; case GeometryTypeKHR::eInstances : return "Instances"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6102,7 +6262,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageAspectFlagBits::eMemoryPlane1EXT : return "MemoryPlane1EXT"; case ImageAspectFlagBits::eMemoryPlane2EXT : return "MemoryPlane2EXT"; case ImageAspectFlagBits::eMemoryPlane3EXT : return "MemoryPlane3EXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6150,7 +6310,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageCreateFlagBits::eCornerSampledNV : return "CornerSampledNV"; case ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT : return "SampleLocationsCompatibleDepthEXT"; case ImageCreateFlagBits::eSubsampledEXT : return "SubsampledEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6179,6 +6339,7 @@ namespace VULKAN_HPP_NAMESPACE eDepthAttachmentStencilReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR, eDepthReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR, eDepthReadOnlyStencilAttachmentOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR, + eFragmentShadingRateAttachmentOptimalKHR = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, eStencilAttachmentOptimalKHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR, eStencilReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR }; @@ -6206,7 +6367,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageLayout::eSharedPresentKHR : return "SharedPresentKHR"; case ImageLayout::eShadingRateOptimalNV : return "ShadingRateOptimalNV"; case ImageLayout::eFragmentDensityMapOptimalEXT : return "FragmentDensityMapOptimalEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6224,7 +6385,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageTiling::eOptimal : return "Optimal"; case ImageTiling::eLinear : return "Linear"; case ImageTiling::eDrmFormatModifierEXT : return "DrmFormatModifierEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6242,7 +6403,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageType::e1D : return "1D"; case ImageType::e2D : return "2D"; case ImageType::e3D : return "3D"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6257,7 +6418,8 @@ namespace VULKAN_HPP_NAMESPACE eTransientAttachment = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, eInputAttachment = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, eShadingRateImageNV = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, - eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT + eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, + eFragmentShadingRateAttachmentKHR = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR }; VULKAN_HPP_INLINE std::string to_string( ImageUsageFlagBits value ) @@ -6274,7 +6436,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageUsageFlagBits::eInputAttachment : return "InputAttachment"; case ImageUsageFlagBits::eShadingRateImageNV : return "ShadingRateImageNV"; case ImageUsageFlagBits::eFragmentDensityMapEXT : return "FragmentDensityMapEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6290,7 +6452,7 @@ namespace VULKAN_HPP_NAMESPACE { case ImageViewCreateFlagBits::eFragmentDensityMapDynamicEXT : return "FragmentDensityMapDynamicEXT"; case ImageViewCreateFlagBits::eFragmentDensityMapDeferredEXT : return "FragmentDensityMapDeferredEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6316,7 +6478,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageViewType::e1DArray : return "1DArray"; case ImageViewType::e2DArray : return "2DArray"; case ImageViewType::eCubeArray : return "CubeArray"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6337,7 +6499,7 @@ namespace VULKAN_HPP_NAMESPACE case IndexType::eUint32 : return "Uint32"; case IndexType::eNoneKHR : return "NoneKHR"; case IndexType::eUint8EXT : return "Uint8EXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6355,7 +6517,7 @@ namespace VULKAN_HPP_NAMESPACE case IndirectCommandsLayoutUsageFlagBitsNV::eExplicitPreprocess : return "ExplicitPreprocess"; case IndirectCommandsLayoutUsageFlagBitsNV::eIndexedSequences : return "IndexedSequences"; case IndirectCommandsLayoutUsageFlagBitsNV::eUnorderedSequences : return "UnorderedSequences"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6383,7 +6545,7 @@ namespace VULKAN_HPP_NAMESPACE case IndirectCommandsTokenTypeNV::eDrawIndexed : return "DrawIndexed"; case IndirectCommandsTokenTypeNV::eDraw : return "Draw"; case IndirectCommandsTokenTypeNV::eDrawTasks : return "DrawTasks"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6397,7 +6559,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case IndirectStateFlagBitsNV::eFlagFrontface : return "FlagFrontface"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6419,7 +6581,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case InternalAllocationType::eExecutable : return "Executable"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6439,7 +6601,7 @@ namespace VULKAN_HPP_NAMESPACE case LineRasterizationModeEXT::eRectangular : return "Rectangular"; case LineRasterizationModeEXT::eBresenham : return "Bresenham"; case LineRasterizationModeEXT::eRectangularSmooth : return "RectangularSmooth"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6483,7 +6645,7 @@ namespace VULKAN_HPP_NAMESPACE case LogicOp::eOrInverted : return "OrInverted"; case LogicOp::eNand : return "Nand"; case LogicOp::eSet : return "Set"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6502,7 +6664,7 @@ namespace VULKAN_HPP_NAMESPACE case MemoryAllocateFlagBits::eDeviceMask : return "DeviceMask"; case MemoryAllocateFlagBits::eDeviceAddress : return "DeviceAddress"; case MemoryAllocateFlagBits::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6519,7 +6681,7 @@ namespace VULKAN_HPP_NAMESPACE { case MemoryHeapFlagBits::eDeviceLocal : return "DeviceLocal"; case MemoryHeapFlagBits::eMultiInstance : return "MultiInstance"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6537,7 +6699,7 @@ namespace VULKAN_HPP_NAMESPACE case MemoryOverallocationBehaviorAMD::eDefault : return "Default"; case MemoryOverallocationBehaviorAMD::eAllowed : return "Allowed"; case MemoryOverallocationBehaviorAMD::eDisallowed : return "Disallowed"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6565,7 +6727,7 @@ namespace VULKAN_HPP_NAMESPACE case MemoryPropertyFlagBits::eProtected : return "Protected"; case MemoryPropertyFlagBits::eDeviceCoherentAMD : return "DeviceCoherentAMD"; case MemoryPropertyFlagBits::eDeviceUncachedAMD : return "DeviceUncachedAMD"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6607,11 +6769,11 @@ namespace VULKAN_HPP_NAMESPACE eDebugUtilsMessengerEXT = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, eAccelerationStructureKHR = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, eValidationCacheEXT = VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, + eAccelerationStructureNV = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, ePerformanceConfigurationINTEL = VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL, eDeferredOperationKHR = VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR, eIndirectCommandsLayoutNV = VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV, ePrivateDataSlotEXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT, - eAccelerationStructureNV = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, eDescriptorUpdateTemplateKHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR, eSamplerYcbcrConversionKHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR }; @@ -6656,11 +6818,12 @@ namespace VULKAN_HPP_NAMESPACE case ObjectType::eDebugUtilsMessengerEXT : return "DebugUtilsMessengerEXT"; case ObjectType::eAccelerationStructureKHR : return "AccelerationStructureKHR"; case ObjectType::eValidationCacheEXT : return "ValidationCacheEXT"; + case ObjectType::eAccelerationStructureNV : return "AccelerationStructureNV"; case ObjectType::ePerformanceConfigurationINTEL : return "PerformanceConfigurationINTEL"; case ObjectType::eDeferredOperationKHR : return "DeferredOperationKHR"; case ObjectType::eIndirectCommandsLayoutNV : return "IndirectCommandsLayoutNV"; case ObjectType::ePrivateDataSlotEXT : return "PrivateDataSlotEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6685,7 +6848,7 @@ namespace VULKAN_HPP_NAMESPACE case PeerMemoryFeatureFlagBits::eCopyDst : return "CopyDst"; case PeerMemoryFeatureFlagBits::eGenericSrc : return "GenericSrc"; case PeerMemoryFeatureFlagBits::eGenericDst : return "GenericDst"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6699,14 +6862,14 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case PerformanceConfigurationTypeINTEL::eCommandQueueMetricsDiscoveryActivated : return "CommandQueueMetricsDiscoveryActivated"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } enum class PerformanceCounterDescriptionFlagBitsKHR : VkPerformanceCounterDescriptionFlagsKHR { - ePerformanceImpacting = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR, - eConcurrentlyImpacted = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR + ePerformanceImpacting = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + eConcurrentlyImpacted = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR }; VULKAN_HPP_INLINE std::string to_string( PerformanceCounterDescriptionFlagBitsKHR value ) @@ -6715,7 +6878,7 @@ namespace VULKAN_HPP_NAMESPACE { case PerformanceCounterDescriptionFlagBitsKHR::ePerformanceImpacting : return "PerformanceImpacting"; case PerformanceCounterDescriptionFlagBitsKHR::eConcurrentlyImpacted : return "ConcurrentlyImpacted"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6736,7 +6899,7 @@ namespace VULKAN_HPP_NAMESPACE case PerformanceCounterScopeKHR::eCommandBuffer : return "CommandBuffer"; case PerformanceCounterScopeKHR::eRenderPass : return "RenderPass"; case PerformanceCounterScopeKHR::eCommand : return "Command"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6760,7 +6923,7 @@ namespace VULKAN_HPP_NAMESPACE case PerformanceCounterStorageKHR::eUint64 : return "Uint64"; case PerformanceCounterStorageKHR::eFloat32 : return "Float32"; case PerformanceCounterStorageKHR::eFloat64 : return "Float64"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6794,7 +6957,7 @@ namespace VULKAN_HPP_NAMESPACE case PerformanceCounterUnitKHR::eAmps : return "Amps"; case PerformanceCounterUnitKHR::eHertz : return "Hertz"; case PerformanceCounterUnitKHR::eCycles : return "Cycles"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6810,7 +6973,7 @@ namespace VULKAN_HPP_NAMESPACE { case PerformanceOverrideTypeINTEL::eNullHardware : return "NullHardware"; case PerformanceOverrideTypeINTEL::eFlushGpuCaches : return "FlushGpuCaches"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6826,7 +6989,7 @@ namespace VULKAN_HPP_NAMESPACE { case PerformanceParameterTypeINTEL::eHwCountersSupported : return "HwCountersSupported"; case PerformanceParameterTypeINTEL::eStreamMarkerValidBits : return "StreamMarkerValidBits"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6848,7 +7011,7 @@ namespace VULKAN_HPP_NAMESPACE case PerformanceValueTypeINTEL::eFloat : return "Float"; case PerformanceValueTypeINTEL::eBool : return "Bool"; case PerformanceValueTypeINTEL::eString : return "String"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6870,7 +7033,7 @@ namespace VULKAN_HPP_NAMESPACE case PhysicalDeviceType::eDiscreteGpu : return "DiscreteGpu"; case PhysicalDeviceType::eVirtualGpu : return "VirtualGpu"; case PhysicalDeviceType::eCpu : return "Cpu"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6889,7 +7052,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineBindPoint::eGraphics : return "Graphics"; case PipelineBindPoint::eCompute : return "Compute"; case PipelineBindPoint::eRayTracingKHR : return "RayTracingKHR"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6903,7 +7066,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case PipelineCacheCreateFlagBits::eExternallySynchronizedEXT : return "ExternallySynchronizedEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6917,7 +7080,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case PipelineCacheHeaderVersion::eOne : return "One"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6942,6 +7105,7 @@ namespace VULKAN_HPP_NAMESPACE eRayTracingNoNullIntersectionShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR, eRayTracingSkipTrianglesKHR = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR, eRayTracingSkipAabbsKHR = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR, + eRayTracingShaderGroupHandleCaptureReplayKHR = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, eDeferCompileNV = VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV, eCaptureStatisticsKHR = VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR, eCaptureInternalRepresentationsKHR = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR, @@ -6968,6 +7132,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR : return "RayTracingNoNullIntersectionShadersKHR"; case PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR : return "RayTracingSkipTrianglesKHR"; case PipelineCreateFlagBits::eRayTracingSkipAabbsKHR : return "RayTracingSkipAabbsKHR"; + case PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR : return "RayTracingShaderGroupHandleCaptureReplayKHR"; case PipelineCreateFlagBits::eDeferCompileNV : return "DeferCompileNV"; case PipelineCreateFlagBits::eCaptureStatisticsKHR : return "CaptureStatisticsKHR"; case PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR : return "CaptureInternalRepresentationsKHR"; @@ -6975,7 +7140,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreateFlagBits::eLibraryKHR : return "LibraryKHR"; case PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT : return "FailOnPipelineCompileRequiredEXT"; case PipelineCreateFlagBits::eEarlyReturnOnFailureEXT : return "EarlyReturnOnFailureEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -6993,7 +7158,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreationFeedbackFlagBitsEXT::eValid : return "Valid"; case PipelineCreationFeedbackFlagBitsEXT::eApplicationPipelineCacheHit : return "ApplicationPipelineCacheHit"; case PipelineCreationFeedbackFlagBitsEXT::eBasePipelineAcceleration : return "BasePipelineAcceleration"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7013,7 +7178,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineExecutableStatisticFormatKHR::eInt64 : return "Int64"; case PipelineExecutableStatisticFormatKHR::eUint64 : return "Uint64"; case PipelineExecutableStatisticFormatKHR::eFloat64 : return "Float64"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7029,7 +7194,7 @@ namespace VULKAN_HPP_NAMESPACE { case PipelineShaderStageCreateFlagBits::eAllowVaryingSubgroupSizeEXT : return "AllowVaryingSubgroupSizeEXT"; case PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT : return "RequireFullSubgroupsEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7054,14 +7219,15 @@ namespace VULKAN_HPP_NAMESPACE eAllCommands = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, eTransformFeedbackEXT = VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, eConditionalRenderingEXT = VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, - eRayTracingShaderKHR = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, eAccelerationStructureBuildKHR = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + eRayTracingShaderKHR = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, eShadingRateImageNV = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, eTaskShaderNV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, eMeshShaderNV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, eFragmentDensityProcessEXT = VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, eCommandPreprocessNV = VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV, eAccelerationStructureBuildNV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, + eFragmentShadingRateAttachmentKHR = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eRayTracingShaderNV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV }; @@ -7088,14 +7254,14 @@ namespace VULKAN_HPP_NAMESPACE case PipelineStageFlagBits::eAllCommands : return "AllCommands"; case PipelineStageFlagBits::eTransformFeedbackEXT : return "TransformFeedbackEXT"; case PipelineStageFlagBits::eConditionalRenderingEXT : return "ConditionalRenderingEXT"; - case PipelineStageFlagBits::eRayTracingShaderKHR : return "RayTracingShaderKHR"; case PipelineStageFlagBits::eAccelerationStructureBuildKHR : return "AccelerationStructureBuildKHR"; + case PipelineStageFlagBits::eRayTracingShaderKHR : return "RayTracingShaderKHR"; case PipelineStageFlagBits::eShadingRateImageNV : return "ShadingRateImageNV"; case PipelineStageFlagBits::eTaskShaderNV : return "TaskShaderNV"; case PipelineStageFlagBits::eMeshShaderNV : return "MeshShaderNV"; case PipelineStageFlagBits::eFragmentDensityProcessEXT : return "FragmentDensityProcessEXT"; case PipelineStageFlagBits::eCommandPreprocessNV : return "CommandPreprocessNV"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7112,7 +7278,7 @@ namespace VULKAN_HPP_NAMESPACE { case PointClippingBehavior::eAllClipPlanes : return "AllClipPlanes"; case PointClippingBehavior::eUserClipPlanesOnly : return "UserClipPlanesOnly"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7132,7 +7298,7 @@ namespace VULKAN_HPP_NAMESPACE case PolygonMode::eLine : return "Line"; case PolygonMode::ePoint : return "Point"; case PolygonMode::eFillRectangleNV : return "FillRectangleNV"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7156,7 +7322,7 @@ namespace VULKAN_HPP_NAMESPACE case PresentModeKHR::eFifoRelaxed : return "FifoRelaxed"; case PresentModeKHR::eSharedDemandRefresh : return "SharedDemandRefresh"; case PresentModeKHR::eSharedContinuousRefresh : return "SharedContinuousRefresh"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7190,7 +7356,7 @@ namespace VULKAN_HPP_NAMESPACE case PrimitiveTopology::eTriangleListWithAdjacency : return "TriangleListWithAdjacency"; case PrimitiveTopology::eTriangleStripWithAdjacency : return "TriangleStripWithAdjacency"; case PrimitiveTopology::ePatchList : return "PatchList"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7212,7 +7378,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case QueryControlFlagBits::ePrecise : return "Precise"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7246,7 +7412,7 @@ namespace VULKAN_HPP_NAMESPACE case QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches : return "TessellationControlShaderPatches"; case QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations : return "TessellationEvaluationShaderInvocations"; case QueryPipelineStatisticFlagBits::eComputeShaderInvocations : return "ComputeShaderInvocations"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7268,7 +7434,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case QueryPoolSamplingModeINTEL::eManual : return "Manual"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7288,7 +7454,7 @@ namespace VULKAN_HPP_NAMESPACE case QueryResultFlagBits::eWait : return "Wait"; case QueryResultFlagBits::eWithAvailability : return "WithAvailability"; case QueryResultFlagBits::ePartial : return "Partial"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7301,8 +7467,8 @@ namespace VULKAN_HPP_NAMESPACE ePerformanceQueryKHR = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR, eAccelerationStructureCompactedSizeKHR = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR, eAccelerationStructureSerializationSizeKHR = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR, - ePerformanceQueryINTEL = VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL, - eAccelerationStructureCompactedSizeNV = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV + eAccelerationStructureCompactedSizeNV = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV, + ePerformanceQueryINTEL = VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL }; VULKAN_HPP_INLINE std::string to_string( QueryType value ) @@ -7316,8 +7482,9 @@ namespace VULKAN_HPP_NAMESPACE case QueryType::ePerformanceQueryKHR : return "PerformanceQueryKHR"; case QueryType::eAccelerationStructureCompactedSizeKHR : return "AccelerationStructureCompactedSizeKHR"; case QueryType::eAccelerationStructureSerializationSizeKHR : return "AccelerationStructureSerializationSizeKHR"; + case QueryType::eAccelerationStructureCompactedSizeNV : return "AccelerationStructureCompactedSizeNV"; case QueryType::ePerformanceQueryINTEL : return "PerformanceQueryINTEL"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7339,7 +7506,7 @@ namespace VULKAN_HPP_NAMESPACE case QueueFlagBits::eTransfer : return "Transfer"; case QueueFlagBits::eSparseBinding : return "SparseBinding"; case QueueFlagBits::eProtected : return "Protected"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7359,7 +7526,7 @@ namespace VULKAN_HPP_NAMESPACE case QueueGlobalPriorityEXT::eMedium : return "Medium"; case QueueGlobalPriorityEXT::eHigh : return "High"; case QueueGlobalPriorityEXT::eRealtime : return "Realtime"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7375,7 +7542,7 @@ namespace VULKAN_HPP_NAMESPACE { case RasterizationOrderAMD::eStrict : return "Strict"; case RasterizationOrderAMD::eRelaxed : return "Relaxed"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7394,7 +7561,7 @@ namespace VULKAN_HPP_NAMESPACE case RayTracingShaderGroupTypeKHR::eGeneral : return "General"; case RayTracingShaderGroupTypeKHR::eTrianglesHitGroup : return "TrianglesHitGroup"; case RayTracingShaderGroupTypeKHR::eProceduralHitGroup : return "ProceduralHitGroup"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7408,7 +7575,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case RenderPassCreateFlagBits::eTransformQCOM : return "TransformQCOM"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7431,7 +7598,7 @@ namespace VULKAN_HPP_NAMESPACE case ResolveModeFlagBits::eAverage : return "Average"; case ResolveModeFlagBits::eMin : return "Min"; case ResolveModeFlagBits::eMax : return "Max"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7467,7 +7634,6 @@ namespace VULKAN_HPP_NAMESPACE eErrorIncompatibleDisplayKHR = VK_ERROR_INCOMPATIBLE_DISPLAY_KHR, eErrorValidationFailedEXT = VK_ERROR_VALIDATION_FAILED_EXT, eErrorInvalidShaderNV = VK_ERROR_INVALID_SHADER_NV, - eErrorIncompatibleVersionKHR = VK_ERROR_INCOMPATIBLE_VERSION_KHR, eErrorInvalidDrmFormatModifierPlaneLayoutEXT = VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT, eErrorNotPermittedEXT = VK_ERROR_NOT_PERMITTED_EXT, eErrorFullScreenExclusiveModeLostEXT = VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT, @@ -7518,7 +7684,6 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorIncompatibleDisplayKHR : return "ErrorIncompatibleDisplayKHR"; case Result::eErrorValidationFailedEXT : return "ErrorValidationFailedEXT"; case Result::eErrorInvalidShaderNV : return "ErrorInvalidShaderNV"; - case Result::eErrorIncompatibleVersionKHR : return "ErrorIncompatibleVersionKHR"; case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT : return "ErrorInvalidDrmFormatModifierPlaneLayoutEXT"; case Result::eErrorNotPermittedEXT : return "ErrorNotPermittedEXT"; case Result::eErrorFullScreenExclusiveModeLostEXT : return "ErrorFullScreenExclusiveModeLostEXT"; @@ -7527,7 +7692,7 @@ namespace VULKAN_HPP_NAMESPACE case Result::eOperationDeferredKHR : return "OperationDeferredKHR"; case Result::eOperationNotDeferredKHR : return "OperationNotDeferredKHR"; case Result::ePipelineCompileRequiredEXT : return "PipelineCompileRequiredEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7553,7 +7718,7 @@ namespace VULKAN_HPP_NAMESPACE case SampleCountFlagBits::e16 : return "16"; case SampleCountFlagBits::e32 : return "32"; case SampleCountFlagBits::e64 : return "64"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7576,7 +7741,7 @@ namespace VULKAN_HPP_NAMESPACE case SamplerAddressMode::eClampToEdge : return "ClampToEdge"; case SamplerAddressMode::eClampToBorder : return "ClampToBorder"; case SamplerAddressMode::eMirrorClampToEdge : return "MirrorClampToEdge"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7592,7 +7757,7 @@ namespace VULKAN_HPP_NAMESPACE { case SamplerCreateFlagBits::eSubsampledEXT : return "SubsampledEXT"; case SamplerCreateFlagBits::eSubsampledCoarseReconstructionEXT : return "SubsampledCoarseReconstructionEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7608,7 +7773,7 @@ namespace VULKAN_HPP_NAMESPACE { case SamplerMipmapMode::eNearest : return "Nearest"; case SamplerMipmapMode::eLinear : return "Linear"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7627,7 +7792,7 @@ namespace VULKAN_HPP_NAMESPACE case SamplerReductionMode::eWeightedAverage : return "WeightedAverage"; case SamplerReductionMode::eMin : return "Min"; case SamplerReductionMode::eMax : return "Max"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7650,7 +7815,7 @@ namespace VULKAN_HPP_NAMESPACE case SamplerYcbcrModelConversion::eYcbcr709 : return "Ycbcr709"; case SamplerYcbcrModelConversion::eYcbcr601 : return "Ycbcr601"; case SamplerYcbcrModelConversion::eYcbcr2020 : return "Ycbcr2020"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7667,7 +7832,7 @@ namespace VULKAN_HPP_NAMESPACE { case SamplerYcbcrRange::eItuFull : return "ItuFull"; case SamplerYcbcrRange::eItuNarrow : return "ItuNarrow"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7687,18 +7852,10 @@ namespace VULKAN_HPP_NAMESPACE case ScopeNV::eWorkgroup : return "Workgroup"; case ScopeNV::eSubgroup : return "Subgroup"; case ScopeNV::eQueueFamily : return "QueueFamily"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } - enum class SemaphoreCreateFlagBits : VkSemaphoreCreateFlags - {}; - - VULKAN_HPP_INLINE std::string to_string( SemaphoreCreateFlagBits ) - { - return "(void)"; - } - enum class SemaphoreImportFlagBits : VkSemaphoreImportFlags { eTemporary = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT @@ -7710,7 +7867,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case SemaphoreImportFlagBits::eTemporary : return "Temporary"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7727,7 +7884,7 @@ namespace VULKAN_HPP_NAMESPACE { case SemaphoreType::eBinary : return "Binary"; case SemaphoreType::eTimeline : return "Timeline"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7742,7 +7899,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case SemaphoreWaitFlagBits::eAny : return "Any"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7769,7 +7926,27 @@ namespace VULKAN_HPP_NAMESPACE case ShaderFloatControlsIndependence::e32BitOnly : return "32BitOnly"; case ShaderFloatControlsIndependence::eAll : return "All"; case ShaderFloatControlsIndependence::eNone : return "None"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; + } + } + + enum class ShaderGroupShaderKHR + { + eGeneral = VK_SHADER_GROUP_SHADER_GENERAL_KHR, + eClosestHit = VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR, + eAnyHit = VK_SHADER_GROUP_SHADER_ANY_HIT_KHR, + eIntersection = VK_SHADER_GROUP_SHADER_INTERSECTION_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ShaderGroupShaderKHR value ) + { + switch ( value ) + { + case ShaderGroupShaderKHR::eGeneral : return "General"; + case ShaderGroupShaderKHR::eClosestHit : return "ClosestHit"; + case ShaderGroupShaderKHR::eAnyHit : return "AnyHit"; + case ShaderGroupShaderKHR::eIntersection : return "Intersection"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7787,7 +7964,7 @@ namespace VULKAN_HPP_NAMESPACE case ShaderInfoTypeAMD::eStatistics : return "Statistics"; case ShaderInfoTypeAMD::eBinary : return "Binary"; case ShaderInfoTypeAMD::eDisassembly : return "Disassembly"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7845,7 +8022,7 @@ namespace VULKAN_HPP_NAMESPACE case ShaderStageFlagBits::eCallableKHR : return "CallableKHR"; case ShaderStageFlagBits::eTaskNV : return "TaskNV"; case ShaderStageFlagBits::eMeshNV : return "MeshNV"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7881,7 +8058,7 @@ namespace VULKAN_HPP_NAMESPACE case ShadingRatePaletteEntryNV::e1InvocationPer4X2Pixels : return "1InvocationPer4X2Pixels"; case ShadingRatePaletteEntryNV::e1InvocationPer2X4Pixels : return "1InvocationPer2X4Pixels"; case ShadingRatePaletteEntryNV::e1InvocationPer4X4Pixels : return "1InvocationPer4X4Pixels"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7897,7 +8074,7 @@ namespace VULKAN_HPP_NAMESPACE { case SharingMode::eExclusive : return "Exclusive"; case SharingMode::eConcurrent : return "Concurrent"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7915,7 +8092,7 @@ namespace VULKAN_HPP_NAMESPACE case SparseImageFormatFlagBits::eSingleMiptail : return "SingleMiptail"; case SparseImageFormatFlagBits::eAlignedMipSize : return "AlignedMipSize"; case SparseImageFormatFlagBits::eNonstandardBlockSize : return "NonstandardBlockSize"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7929,7 +8106,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case SparseMemoryBindFlagBits::eMetadata : return "Metadata"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7948,7 +8125,7 @@ namespace VULKAN_HPP_NAMESPACE case StencilFaceFlagBits::eFront : return "Front"; case StencilFaceFlagBits::eBack : return "Back"; case StencilFaceFlagBits::eFrontAndBack : return "FrontAndBack"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -7976,7 +8153,7 @@ namespace VULKAN_HPP_NAMESPACE case StencilOp::eInvert : return "Invert"; case StencilOp::eIncrementAndWrap : return "IncrementAndWrap"; case StencilOp::eDecrementAndWrap : return "DecrementAndWrap"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -8270,31 +8447,31 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceBlendOperationAdvancedPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT, ePipelineColorBlendAdvancedStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT, ePipelineCoverageToColorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV, - eBindAccelerationStructureMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR, eWriteDescriptorSetAccelerationStructureKHR = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, eAccelerationStructureBuildGeometryInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, - eAccelerationStructureCreateGeometryTypeInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR, eAccelerationStructureDeviceAddressInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR, eAccelerationStructureGeometryAabbsDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, eAccelerationStructureGeometryInstancesDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, eAccelerationStructureGeometryTrianglesDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, eAccelerationStructureGeometryKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR, - eAccelerationStructureMemoryRequirementsInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR, - eAccelerationStructureVersionKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR, + eAccelerationStructureVersionInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR, eCopyAccelerationStructureInfoKHR = VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR, eCopyAccelerationStructureToMemoryInfoKHR = VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR, eCopyMemoryToAccelerationStructureInfoKHR = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR, - ePhysicalDeviceRayTracingFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR, - ePhysicalDeviceRayTracingPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR, + ePhysicalDeviceAccelerationStructureFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR, + ePhysicalDeviceAccelerationStructurePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR, + eAccelerationStructureCreateInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR, + eAccelerationStructureBuildSizesInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR, + ePhysicalDeviceRayTracingPipelineFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR, + ePhysicalDeviceRayTracingPipelinePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR, eRayTracingPipelineCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR, eRayTracingShaderGroupCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR, - eAccelerationStructureCreateInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR, eRayTracingPipelineInterfaceCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR, + ePhysicalDeviceRayQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR, ePipelineCoverageModulationStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV, ePhysicalDeviceShaderSmBuiltinsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV, ePhysicalDeviceShaderSmBuiltinsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV, eDrmFormatModifierPropertiesListEXT = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, - eDrmFormatModifierPropertiesEXT = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT, ePhysicalDeviceImageDrmFormatModifierInfoEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT, eImageDrmFormatModifierListCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT, eImageDrmFormatModifierExplicitCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT, @@ -8312,6 +8489,8 @@ namespace VULKAN_HPP_NAMESPACE eGeometryNV = VK_STRUCTURE_TYPE_GEOMETRY_NV, eGeometryTrianglesNV = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV, eGeometryAabbNV = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV, + eBindAccelerationStructureMemoryInfoNV = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, + eWriteDescriptorSetAccelerationStructureNV = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV, eAccelerationStructureMemoryRequirementsInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, ePhysicalDeviceRayTracingPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV, eRayTracingShaderGroupCreateInfoNV = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, @@ -8354,6 +8533,7 @@ namespace VULKAN_HPP_NAMESPACE eDisplayNativeHdrSurfaceCapabilitiesAMD = VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD, eSwapchainDisplayNativeHdrCreateInfoAMD = VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD, eImagepipeSurfaceCreateInfoFUCHSIA = VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA, + ePhysicalDeviceShaderTerminateInvocationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR, eMetalSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, ePhysicalDeviceFragmentDensityMapFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT, ePhysicalDeviceFragmentDensityMapPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT, @@ -8361,8 +8541,14 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceSubgroupSizeControlPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT, ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, ePhysicalDeviceSubgroupSizeControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT, + eFragmentShadingRateAttachmentInfoKHR = VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR, + ePipelineFragmentShadingRateStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR, + ePhysicalDeviceFragmentShadingRatePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR, + ePhysicalDeviceFragmentShadingRateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR, + ePhysicalDeviceFragmentShadingRateKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR, ePhysicalDeviceShaderCoreProperties2AMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD, ePhysicalDeviceCoherentMemoryFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, + ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT, ePhysicalDeviceMemoryBudgetPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT, ePhysicalDeviceMemoryPriorityFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT, eMemoryPriorityAllocateInfoEXT = VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT, @@ -8390,7 +8576,6 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceShaderAtomicFloatFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT, ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, ePhysicalDeviceExtendedDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT, - eDeferredOperationInfoKHR = VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR, ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR, ePipelineInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, ePipelineExecutablePropertiesKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR, @@ -8410,6 +8595,9 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceTexelBufferAlignmentPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT, eCommandBufferInheritanceRenderPassTransformInfoQCOM = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM, eRenderPassTransformBeginInfoQCOM = VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM, + ePhysicalDeviceDeviceMemoryReportFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT, + eDeviceDeviceMemoryReportCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT, + eDeviceMemoryReportCallbackDataEXT = VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, ePhysicalDeviceRobustness2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT, ePhysicalDeviceRobustness2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT, eSamplerCustomBorderColorCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT, @@ -8422,8 +8610,12 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDevicePipelineCreationCacheControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT, ePhysicalDeviceDiagnosticsConfigFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV, eDeviceDiagnosticsConfigCreateInfoNV = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV, + ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV, + ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV, + ePipelineFragmentShadingRateEnumStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV, ePhysicalDeviceFragmentDensityMap2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT, ePhysicalDeviceFragmentDensityMap2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT, + eCopyCommandTransformInfoQCOM = VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM, ePhysicalDeviceImageRobustnessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT, eCopyBufferInfo2KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR, eCopyImageInfo2KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR, @@ -8442,7 +8634,6 @@ namespace VULKAN_HPP_NAMESPACE eAttachmentDescriptionStencilLayoutKHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR, eAttachmentReference2KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR, eAttachmentReferenceStencilLayoutKHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR, - eBindAccelerationStructureMemoryInfoNV = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, eBindBufferMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR, eBindBufferMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, eBindImageMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR, @@ -8551,8 +8742,7 @@ namespace VULKAN_HPP_NAMESPACE eSubpassDescription2KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, eSubpassDescriptionDepthStencilResolveKHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR, eSubpassEndInfoKHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, - eTimelineSemaphoreSubmitInfoKHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR, - eWriteDescriptorSetAccelerationStructureNV = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV + eTimelineSemaphoreSubmitInfoKHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR }; VULKAN_HPP_INLINE std::string to_string( StructureType value ) @@ -8847,31 +9037,31 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT : return "PhysicalDeviceBlendOperationAdvancedPropertiesEXT"; case StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT : return "PipelineColorBlendAdvancedStateCreateInfoEXT"; case StructureType::ePipelineCoverageToColorStateCreateInfoNV : return "PipelineCoverageToColorStateCreateInfoNV"; - case StructureType::eBindAccelerationStructureMemoryInfoKHR : return "BindAccelerationStructureMemoryInfoKHR"; case StructureType::eWriteDescriptorSetAccelerationStructureKHR : return "WriteDescriptorSetAccelerationStructureKHR"; case StructureType::eAccelerationStructureBuildGeometryInfoKHR : return "AccelerationStructureBuildGeometryInfoKHR"; - case StructureType::eAccelerationStructureCreateGeometryTypeInfoKHR : return "AccelerationStructureCreateGeometryTypeInfoKHR"; case StructureType::eAccelerationStructureDeviceAddressInfoKHR : return "AccelerationStructureDeviceAddressInfoKHR"; case StructureType::eAccelerationStructureGeometryAabbsDataKHR : return "AccelerationStructureGeometryAabbsDataKHR"; case StructureType::eAccelerationStructureGeometryInstancesDataKHR : return "AccelerationStructureGeometryInstancesDataKHR"; case StructureType::eAccelerationStructureGeometryTrianglesDataKHR : return "AccelerationStructureGeometryTrianglesDataKHR"; case StructureType::eAccelerationStructureGeometryKHR : return "AccelerationStructureGeometryKHR"; - case StructureType::eAccelerationStructureMemoryRequirementsInfoKHR : return "AccelerationStructureMemoryRequirementsInfoKHR"; - case StructureType::eAccelerationStructureVersionKHR : return "AccelerationStructureVersionKHR"; + case StructureType::eAccelerationStructureVersionInfoKHR : return "AccelerationStructureVersionInfoKHR"; case StructureType::eCopyAccelerationStructureInfoKHR : return "CopyAccelerationStructureInfoKHR"; case StructureType::eCopyAccelerationStructureToMemoryInfoKHR : return "CopyAccelerationStructureToMemoryInfoKHR"; case StructureType::eCopyMemoryToAccelerationStructureInfoKHR : return "CopyMemoryToAccelerationStructureInfoKHR"; - case StructureType::ePhysicalDeviceRayTracingFeaturesKHR : return "PhysicalDeviceRayTracingFeaturesKHR"; - case StructureType::ePhysicalDeviceRayTracingPropertiesKHR : return "PhysicalDeviceRayTracingPropertiesKHR"; + case StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR : return "PhysicalDeviceAccelerationStructureFeaturesKHR"; + case StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR : return "PhysicalDeviceAccelerationStructurePropertiesKHR"; + case StructureType::eAccelerationStructureCreateInfoKHR : return "AccelerationStructureCreateInfoKHR"; + case StructureType::eAccelerationStructureBuildSizesInfoKHR : return "AccelerationStructureBuildSizesInfoKHR"; + case StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR : return "PhysicalDeviceRayTracingPipelineFeaturesKHR"; + case StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR : return "PhysicalDeviceRayTracingPipelinePropertiesKHR"; case StructureType::eRayTracingPipelineCreateInfoKHR : return "RayTracingPipelineCreateInfoKHR"; case StructureType::eRayTracingShaderGroupCreateInfoKHR : return "RayTracingShaderGroupCreateInfoKHR"; - case StructureType::eAccelerationStructureCreateInfoKHR : return "AccelerationStructureCreateInfoKHR"; case StructureType::eRayTracingPipelineInterfaceCreateInfoKHR : return "RayTracingPipelineInterfaceCreateInfoKHR"; + case StructureType::ePhysicalDeviceRayQueryFeaturesKHR : return "PhysicalDeviceRayQueryFeaturesKHR"; case StructureType::ePipelineCoverageModulationStateCreateInfoNV : return "PipelineCoverageModulationStateCreateInfoNV"; case StructureType::ePhysicalDeviceShaderSmBuiltinsFeaturesNV : return "PhysicalDeviceShaderSmBuiltinsFeaturesNV"; case StructureType::ePhysicalDeviceShaderSmBuiltinsPropertiesNV : return "PhysicalDeviceShaderSmBuiltinsPropertiesNV"; case StructureType::eDrmFormatModifierPropertiesListEXT : return "DrmFormatModifierPropertiesListEXT"; - case StructureType::eDrmFormatModifierPropertiesEXT : return "DrmFormatModifierPropertiesEXT"; case StructureType::ePhysicalDeviceImageDrmFormatModifierInfoEXT : return "PhysicalDeviceImageDrmFormatModifierInfoEXT"; case StructureType::eImageDrmFormatModifierListCreateInfoEXT : return "ImageDrmFormatModifierListCreateInfoEXT"; case StructureType::eImageDrmFormatModifierExplicitCreateInfoEXT : return "ImageDrmFormatModifierExplicitCreateInfoEXT"; @@ -8889,6 +9079,8 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eGeometryNV : return "GeometryNV"; case StructureType::eGeometryTrianglesNV : return "GeometryTrianglesNV"; case StructureType::eGeometryAabbNV : return "GeometryAabbNV"; + case StructureType::eBindAccelerationStructureMemoryInfoNV : return "BindAccelerationStructureMemoryInfoNV"; + case StructureType::eWriteDescriptorSetAccelerationStructureNV : return "WriteDescriptorSetAccelerationStructureNV"; case StructureType::eAccelerationStructureMemoryRequirementsInfoNV : return "AccelerationStructureMemoryRequirementsInfoNV"; case StructureType::ePhysicalDeviceRayTracingPropertiesNV : return "PhysicalDeviceRayTracingPropertiesNV"; case StructureType::eRayTracingShaderGroupCreateInfoNV : return "RayTracingShaderGroupCreateInfoNV"; @@ -8931,6 +9123,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eDisplayNativeHdrSurfaceCapabilitiesAMD : return "DisplayNativeHdrSurfaceCapabilitiesAMD"; case StructureType::eSwapchainDisplayNativeHdrCreateInfoAMD : return "SwapchainDisplayNativeHdrCreateInfoAMD"; case StructureType::eImagepipeSurfaceCreateInfoFUCHSIA : return "ImagepipeSurfaceCreateInfoFUCHSIA"; + case StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR : return "PhysicalDeviceShaderTerminateInvocationFeaturesKHR"; case StructureType::eMetalSurfaceCreateInfoEXT : return "MetalSurfaceCreateInfoEXT"; case StructureType::ePhysicalDeviceFragmentDensityMapFeaturesEXT : return "PhysicalDeviceFragmentDensityMapFeaturesEXT"; case StructureType::ePhysicalDeviceFragmentDensityMapPropertiesEXT : return "PhysicalDeviceFragmentDensityMapPropertiesEXT"; @@ -8938,8 +9131,14 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceSubgroupSizeControlPropertiesEXT : return "PhysicalDeviceSubgroupSizeControlPropertiesEXT"; case StructureType::ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT : return "PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT"; case StructureType::ePhysicalDeviceSubgroupSizeControlFeaturesEXT : return "PhysicalDeviceSubgroupSizeControlFeaturesEXT"; + case StructureType::eFragmentShadingRateAttachmentInfoKHR : return "FragmentShadingRateAttachmentInfoKHR"; + case StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR : return "PipelineFragmentShadingRateStateCreateInfoKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR : return "PhysicalDeviceFragmentShadingRatePropertiesKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR : return "PhysicalDeviceFragmentShadingRateFeaturesKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRateKHR : return "PhysicalDeviceFragmentShadingRateKHR"; case StructureType::ePhysicalDeviceShaderCoreProperties2AMD : return "PhysicalDeviceShaderCoreProperties2AMD"; case StructureType::ePhysicalDeviceCoherentMemoryFeaturesAMD : return "PhysicalDeviceCoherentMemoryFeaturesAMD"; + case StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT : return "PhysicalDeviceShaderImageAtomicInt64FeaturesEXT"; case StructureType::ePhysicalDeviceMemoryBudgetPropertiesEXT : return "PhysicalDeviceMemoryBudgetPropertiesEXT"; case StructureType::ePhysicalDeviceMemoryPriorityFeaturesEXT : return "PhysicalDeviceMemoryPriorityFeaturesEXT"; case StructureType::eMemoryPriorityAllocateInfoEXT : return "MemoryPriorityAllocateInfoEXT"; @@ -8967,7 +9166,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceShaderAtomicFloatFeaturesEXT : return "PhysicalDeviceShaderAtomicFloatFeaturesEXT"; case StructureType::ePhysicalDeviceIndexTypeUint8FeaturesEXT : return "PhysicalDeviceIndexTypeUint8FeaturesEXT"; case StructureType::ePhysicalDeviceExtendedDynamicStateFeaturesEXT : return "PhysicalDeviceExtendedDynamicStateFeaturesEXT"; - case StructureType::eDeferredOperationInfoKHR : return "DeferredOperationInfoKHR"; case StructureType::ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR : return "PhysicalDevicePipelineExecutablePropertiesFeaturesKHR"; case StructureType::ePipelineInfoKHR : return "PipelineInfoKHR"; case StructureType::ePipelineExecutablePropertiesKHR : return "PipelineExecutablePropertiesKHR"; @@ -8987,6 +9185,9 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceTexelBufferAlignmentPropertiesEXT : return "PhysicalDeviceTexelBufferAlignmentPropertiesEXT"; case StructureType::eCommandBufferInheritanceRenderPassTransformInfoQCOM : return "CommandBufferInheritanceRenderPassTransformInfoQCOM"; case StructureType::eRenderPassTransformBeginInfoQCOM : return "RenderPassTransformBeginInfoQCOM"; + case StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT : return "PhysicalDeviceDeviceMemoryReportFeaturesEXT"; + case StructureType::eDeviceDeviceMemoryReportCreateInfoEXT : return "DeviceDeviceMemoryReportCreateInfoEXT"; + case StructureType::eDeviceMemoryReportCallbackDataEXT : return "DeviceMemoryReportCallbackDataEXT"; case StructureType::ePhysicalDeviceRobustness2FeaturesEXT : return "PhysicalDeviceRobustness2FeaturesEXT"; case StructureType::ePhysicalDeviceRobustness2PropertiesEXT : return "PhysicalDeviceRobustness2PropertiesEXT"; case StructureType::eSamplerCustomBorderColorCreateInfoEXT : return "SamplerCustomBorderColorCreateInfoEXT"; @@ -8999,8 +9200,12 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDevicePipelineCreationCacheControlFeaturesEXT : return "PhysicalDevicePipelineCreationCacheControlFeaturesEXT"; case StructureType::ePhysicalDeviceDiagnosticsConfigFeaturesNV : return "PhysicalDeviceDiagnosticsConfigFeaturesNV"; case StructureType::eDeviceDiagnosticsConfigCreateInfoNV : return "DeviceDiagnosticsConfigCreateInfoNV"; + case StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV : return "PhysicalDeviceFragmentShadingRateEnumsPropertiesNV"; + case StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV : return "PhysicalDeviceFragmentShadingRateEnumsFeaturesNV"; + case StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV : return "PipelineFragmentShadingRateEnumStateCreateInfoNV"; case StructureType::ePhysicalDeviceFragmentDensityMap2FeaturesEXT : return "PhysicalDeviceFragmentDensityMap2FeaturesEXT"; case StructureType::ePhysicalDeviceFragmentDensityMap2PropertiesEXT : return "PhysicalDeviceFragmentDensityMap2PropertiesEXT"; + case StructureType::eCopyCommandTransformInfoQCOM : return "CopyCommandTransformInfoQCOM"; case StructureType::ePhysicalDeviceImageRobustnessFeaturesEXT : return "PhysicalDeviceImageRobustnessFeaturesEXT"; case StructureType::eCopyBufferInfo2KHR : return "CopyBufferInfo2KHR"; case StructureType::eCopyImageInfo2KHR : return "CopyImageInfo2KHR"; @@ -9015,7 +9220,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eImageResolve2KHR : return "ImageResolve2KHR"; case StructureType::ePhysicalDevice4444FormatsFeaturesEXT : return "PhysicalDevice4444FormatsFeaturesEXT"; case StructureType::eDirectfbSurfaceCreateInfoEXT : return "DirectfbSurfaceCreateInfoEXT"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9045,7 +9250,7 @@ namespace VULKAN_HPP_NAMESPACE case SubgroupFeatureFlagBits::eClustered : return "Clustered"; case SubgroupFeatureFlagBits::eQuad : return "Quad"; case SubgroupFeatureFlagBits::ePartitionedNV : return "PartitionedNV"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9061,7 +9266,7 @@ namespace VULKAN_HPP_NAMESPACE { case SubpassContents::eInline : return "Inline"; case SubpassContents::eSecondaryCommandBuffers : return "SecondaryCommandBuffers"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9081,13 +9286,13 @@ namespace VULKAN_HPP_NAMESPACE case SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX : return "PerViewPositionXOnlyNVX"; case SubpassDescriptionFlagBits::eFragmentRegionQCOM : return "FragmentRegionQCOM"; case SubpassDescriptionFlagBits::eShaderResolveQCOM : return "ShaderResolveQCOM"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } enum class SurfaceCounterFlagBitsEXT : VkSurfaceCounterFlagsEXT { - eVblank = VK_SURFACE_COUNTER_VBLANK_EXT + eVblank = VK_SURFACE_COUNTER_VBLANK_BIT_EXT }; VULKAN_HPP_INLINE std::string to_string( SurfaceCounterFlagBitsEXT value ) @@ -9095,7 +9300,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case SurfaceCounterFlagBitsEXT::eVblank : return "Vblank"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9125,7 +9330,7 @@ namespace VULKAN_HPP_NAMESPACE case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180 : return "HorizontalMirrorRotate180"; case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270 : return "HorizontalMirrorRotate270"; case SurfaceTransformFlagBitsKHR::eInherit : return "Inherit"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9143,7 +9348,7 @@ namespace VULKAN_HPP_NAMESPACE case SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions : return "SplitInstanceBindRegions"; case SwapchainCreateFlagBitsKHR::eProtected : return "Protected"; case SwapchainCreateFlagBitsKHR::eMutableFormat : return "MutableFormat"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9165,7 +9370,7 @@ namespace VULKAN_HPP_NAMESPACE case SystemAllocationScope::eCache : return "Cache"; case SystemAllocationScope::eDevice : return "Device"; case SystemAllocationScope::eInstance : return "Instance"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9182,7 +9387,7 @@ namespace VULKAN_HPP_NAMESPACE { case TessellationDomainOrigin::eUpperLeft : return "UpperLeft"; case TessellationDomainOrigin::eLowerLeft : return "LowerLeft"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9202,7 +9407,7 @@ namespace VULKAN_HPP_NAMESPACE case TimeDomainEXT::eClockMonotonic : return "ClockMonotonic"; case TimeDomainEXT::eClockMonotonicRaw : return "ClockMonotonicRaw"; case TimeDomainEXT::eQueryPerformanceCounter : return "QueryPerformanceCounter"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9228,7 +9433,7 @@ namespace VULKAN_HPP_NAMESPACE case ToolPurposeFlagBitsEXT::eModifyingFeatures : return "ModifyingFeatures"; case ToolPurposeFlagBitsEXT::eDebugReporting : return "DebugReporting"; case ToolPurposeFlagBitsEXT::eDebugMarkers : return "DebugMarkers"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9242,7 +9447,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case ValidationCacheHeaderVersionEXT::eOne : return "One"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9258,7 +9463,7 @@ namespace VULKAN_HPP_NAMESPACE { case ValidationCheckEXT::eAll : return "All"; case ValidationCheckEXT::eShaders : return "Shaders"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9284,7 +9489,7 @@ namespace VULKAN_HPP_NAMESPACE case ValidationFeatureDisableEXT::eObjectLifetimes : return "ObjectLifetimes"; case ValidationFeatureDisableEXT::eCoreChecks : return "CoreChecks"; case ValidationFeatureDisableEXT::eUniqueHandles : return "UniqueHandles"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9306,7 +9511,7 @@ namespace VULKAN_HPP_NAMESPACE case ValidationFeatureEnableEXT::eBestPractices : return "BestPractices"; case ValidationFeatureEnableEXT::eDebugPrintf : return "DebugPrintf"; case ValidationFeatureEnableEXT::eSynchronizationValidation : return "SynchronizationValidation"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9328,7 +9533,7 @@ namespace VULKAN_HPP_NAMESPACE case VendorId::eKazan : return "Kazan"; case VendorId::eCodeplay : return "Codeplay"; case VendorId::eMESA : return "MESA"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9344,7 +9549,7 @@ namespace VULKAN_HPP_NAMESPACE { case VertexInputRate::eVertex : return "Vertex"; case VertexInputRate::eInstance : return "Instance"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9372,7 +9577,7 @@ namespace VULKAN_HPP_NAMESPACE case ViewportCoordinateSwizzleNV::eNegativeZ : return "NegativeZ"; case ViewportCoordinateSwizzleNV::ePositiveW : return "PositiveW"; case ViewportCoordinateSwizzleNV::eNegativeW : return "NegativeW"; - default: return "invalid"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast<uint32_t>( value ) ) + " )"; } } @@ -9417,6 +9622,47 @@ namespace VULKAN_HPP_NAMESPACE }; + using AccelerationStructureCreateFlagsKHR = Flags<AccelerationStructureCreateFlagBitsKHR>; + + template <> struct FlagTraits<AccelerationStructureCreateFlagBitsKHR> + { + enum : VkFlags + { + allFlags = VkFlags(AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator|( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator&( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator^( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator~( AccelerationStructureCreateFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( AccelerationStructureCreateFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCreateFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay ) result += "DeviceAddressCaptureReplay | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + using AccessFlags = Flags<AccessFlagBits>; template <> struct FlagTraits<AccessFlagBits> @@ -9604,7 +9850,7 @@ namespace VULKAN_HPP_NAMESPACE { enum : VkFlags { - allFlags = VkFlags(BufferUsageFlagBits::eTransferSrc) | VkFlags(BufferUsageFlagBits::eTransferDst) | VkFlags(BufferUsageFlagBits::eUniformTexelBuffer) | VkFlags(BufferUsageFlagBits::eStorageTexelBuffer) | VkFlags(BufferUsageFlagBits::eUniformBuffer) | VkFlags(BufferUsageFlagBits::eStorageBuffer) | VkFlags(BufferUsageFlagBits::eIndexBuffer) | VkFlags(BufferUsageFlagBits::eVertexBuffer) | VkFlags(BufferUsageFlagBits::eIndirectBuffer) | VkFlags(BufferUsageFlagBits::eShaderDeviceAddress) | VkFlags(BufferUsageFlagBits::eTransformFeedbackBufferEXT) | VkFlags(BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT) | VkFlags(BufferUsageFlagBits::eConditionalRenderingEXT) | VkFlags(BufferUsageFlagBits::eRayTracingKHR) + allFlags = VkFlags(BufferUsageFlagBits::eTransferSrc) | VkFlags(BufferUsageFlagBits::eTransferDst) | VkFlags(BufferUsageFlagBits::eUniformTexelBuffer) | VkFlags(BufferUsageFlagBits::eStorageTexelBuffer) | VkFlags(BufferUsageFlagBits::eUniformBuffer) | VkFlags(BufferUsageFlagBits::eStorageBuffer) | VkFlags(BufferUsageFlagBits::eIndexBuffer) | VkFlags(BufferUsageFlagBits::eVertexBuffer) | VkFlags(BufferUsageFlagBits::eIndirectBuffer) | VkFlags(BufferUsageFlagBits::eShaderDeviceAddress) | VkFlags(BufferUsageFlagBits::eTransformFeedbackBufferEXT) | VkFlags(BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT) | VkFlags(BufferUsageFlagBits::eConditionalRenderingEXT) | VkFlags(BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR) | VkFlags(BufferUsageFlagBits::eAccelerationStructureStorageKHR) | VkFlags(BufferUsageFlagBits::eShaderBindingTableKHR) }; }; @@ -9647,7 +9893,9 @@ namespace VULKAN_HPP_NAMESPACE if ( value & BufferUsageFlagBits::eTransformFeedbackBufferEXT ) result += "TransformFeedbackBufferEXT | "; if ( value & BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT ) result += "TransformFeedbackCounterBufferEXT | "; if ( value & BufferUsageFlagBits::eConditionalRenderingEXT ) result += "ConditionalRenderingEXT | "; - if ( value & BufferUsageFlagBits::eRayTracingKHR ) result += "RayTracingKHR | "; + if ( value & BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR ) result += "AccelerationStructureBuildInputReadOnlyKHR | "; + if ( value & BufferUsageFlagBits::eAccelerationStructureStorageKHR ) result += "AccelerationStructureStorageKHR | "; + if ( value & BufferUsageFlagBits::eShaderBindingTableKHR ) result += "ShaderBindingTableKHR | "; return "{ " + result.substr(0, result.size() - 3) + " }"; } @@ -10538,6 +10786,22 @@ namespace VULKAN_HPP_NAMESPACE return "{ " + result.substr(0, result.size() - 3) + " }"; } + enum class DeviceMemoryReportFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportFlagBitsEXT ) + { + return "(void)"; + } + + using DeviceMemoryReportFlagsEXT = Flags<DeviceMemoryReportFlagBitsEXT>; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportFlagsEXT ) + { + + return "{}"; + } + using DeviceQueueCreateFlags = Flags<DeviceQueueCreateFlagBits>; @@ -11146,7 +11410,7 @@ namespace VULKAN_HPP_NAMESPACE { enum : VkFlags { - allFlags = VkFlags(FormatFeatureFlagBits::eSampledImage) | VkFlags(FormatFeatureFlagBits::eStorageImage) | VkFlags(FormatFeatureFlagBits::eStorageImageAtomic) | VkFlags(FormatFeatureFlagBits::eUniformTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBufferAtomic) | VkFlags(FormatFeatureFlagBits::eVertexBuffer) | VkFlags(FormatFeatureFlagBits::eColorAttachment) | VkFlags(FormatFeatureFlagBits::eColorAttachmentBlend) | VkFlags(FormatFeatureFlagBits::eDepthStencilAttachment) | VkFlags(FormatFeatureFlagBits::eBlitSrc) | VkFlags(FormatFeatureFlagBits::eBlitDst) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterLinear) | VkFlags(FormatFeatureFlagBits::eTransferSrc) | VkFlags(FormatFeatureFlagBits::eTransferDst) | VkFlags(FormatFeatureFlagBits::eMidpointChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable) | VkFlags(FormatFeatureFlagBits::eDisjoint) | VkFlags(FormatFeatureFlagBits::eCositedChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterMinmax) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterCubicIMG) | VkFlags(FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR) | VkFlags(FormatFeatureFlagBits::eFragmentDensityMapEXT) + allFlags = VkFlags(FormatFeatureFlagBits::eSampledImage) | VkFlags(FormatFeatureFlagBits::eStorageImage) | VkFlags(FormatFeatureFlagBits::eStorageImageAtomic) | VkFlags(FormatFeatureFlagBits::eUniformTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBufferAtomic) | VkFlags(FormatFeatureFlagBits::eVertexBuffer) | VkFlags(FormatFeatureFlagBits::eColorAttachment) | VkFlags(FormatFeatureFlagBits::eColorAttachmentBlend) | VkFlags(FormatFeatureFlagBits::eDepthStencilAttachment) | VkFlags(FormatFeatureFlagBits::eBlitSrc) | VkFlags(FormatFeatureFlagBits::eBlitDst) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterLinear) | VkFlags(FormatFeatureFlagBits::eTransferSrc) | VkFlags(FormatFeatureFlagBits::eTransferDst) | VkFlags(FormatFeatureFlagBits::eMidpointChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable) | VkFlags(FormatFeatureFlagBits::eDisjoint) | VkFlags(FormatFeatureFlagBits::eCositedChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterMinmax) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterCubicIMG) | VkFlags(FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR) | VkFlags(FormatFeatureFlagBits::eFragmentDensityMapEXT) | VkFlags(FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR) }; }; @@ -11202,6 +11466,7 @@ namespace VULKAN_HPP_NAMESPACE if ( value & FormatFeatureFlagBits::eSampledImageFilterCubicIMG ) result += "SampledImageFilterCubicIMG | "; if ( value & FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR ) result += "AccelerationStructureVertexBufferKHR | "; if ( value & FormatFeatureFlagBits::eFragmentDensityMapEXT ) result += "FragmentDensityMapEXT | "; + if ( value & FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR ) result += "FragmentShadingRateAttachmentKHR | "; return "{ " + result.substr(0, result.size() - 3) + " }"; } @@ -12075,7 +12340,7 @@ namespace VULKAN_HPP_NAMESPACE { enum : VkFlags { - allFlags = VkFlags(PipelineCreateFlagBits::eDisableOptimization) | VkFlags(PipelineCreateFlagBits::eAllowDerivatives) | VkFlags(PipelineCreateFlagBits::eDerivative) | VkFlags(PipelineCreateFlagBits::eViewIndexFromDeviceIndex) | VkFlags(PipelineCreateFlagBits::eDispatchBase) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipAabbsKHR) | VkFlags(PipelineCreateFlagBits::eDeferCompileNV) | VkFlags(PipelineCreateFlagBits::eCaptureStatisticsKHR) | VkFlags(PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR) | VkFlags(PipelineCreateFlagBits::eIndirectBindableNV) | VkFlags(PipelineCreateFlagBits::eLibraryKHR) | VkFlags(PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT) | VkFlags(PipelineCreateFlagBits::eEarlyReturnOnFailureEXT) + allFlags = VkFlags(PipelineCreateFlagBits::eDisableOptimization) | VkFlags(PipelineCreateFlagBits::eAllowDerivatives) | VkFlags(PipelineCreateFlagBits::eDerivative) | VkFlags(PipelineCreateFlagBits::eViewIndexFromDeviceIndex) | VkFlags(PipelineCreateFlagBits::eDispatchBase) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipAabbsKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR) | VkFlags(PipelineCreateFlagBits::eDeferCompileNV) | VkFlags(PipelineCreateFlagBits::eCaptureStatisticsKHR) | VkFlags(PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR) | VkFlags(PipelineCreateFlagBits::eIndirectBindableNV) | VkFlags(PipelineCreateFlagBits::eLibraryKHR) | VkFlags(PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT) | VkFlags(PipelineCreateFlagBits::eEarlyReturnOnFailureEXT) }; }; @@ -12116,6 +12381,7 @@ namespace VULKAN_HPP_NAMESPACE if ( value & PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR ) result += "RayTracingNoNullIntersectionShadersKHR | "; if ( value & PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR ) result += "RayTracingSkipTrianglesKHR | "; if ( value & PipelineCreateFlagBits::eRayTracingSkipAabbsKHR ) result += "RayTracingSkipAabbsKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR ) result += "RayTracingShaderGroupHandleCaptureReplayKHR | "; if ( value & PipelineCreateFlagBits::eDeferCompileNV ) result += "DeferCompileNV | "; if ( value & PipelineCreateFlagBits::eCaptureStatisticsKHR ) result += "CaptureStatisticsKHR | "; if ( value & PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR ) result += "CaptureInternalRepresentationsKHR | "; @@ -12378,7 +12644,7 @@ namespace VULKAN_HPP_NAMESPACE { enum : VkFlags { - allFlags = VkFlags(PipelineStageFlagBits::eTopOfPipe) | VkFlags(PipelineStageFlagBits::eDrawIndirect) | VkFlags(PipelineStageFlagBits::eVertexInput) | VkFlags(PipelineStageFlagBits::eVertexShader) | VkFlags(PipelineStageFlagBits::eTessellationControlShader) | VkFlags(PipelineStageFlagBits::eTessellationEvaluationShader) | VkFlags(PipelineStageFlagBits::eGeometryShader) | VkFlags(PipelineStageFlagBits::eFragmentShader) | VkFlags(PipelineStageFlagBits::eEarlyFragmentTests) | VkFlags(PipelineStageFlagBits::eLateFragmentTests) | VkFlags(PipelineStageFlagBits::eColorAttachmentOutput) | VkFlags(PipelineStageFlagBits::eComputeShader) | VkFlags(PipelineStageFlagBits::eTransfer) | VkFlags(PipelineStageFlagBits::eBottomOfPipe) | VkFlags(PipelineStageFlagBits::eHost) | VkFlags(PipelineStageFlagBits::eAllGraphics) | VkFlags(PipelineStageFlagBits::eAllCommands) | VkFlags(PipelineStageFlagBits::eTransformFeedbackEXT) | VkFlags(PipelineStageFlagBits::eConditionalRenderingEXT) | VkFlags(PipelineStageFlagBits::eRayTracingShaderKHR) | VkFlags(PipelineStageFlagBits::eAccelerationStructureBuildKHR) | VkFlags(PipelineStageFlagBits::eShadingRateImageNV) | VkFlags(PipelineStageFlagBits::eTaskShaderNV) | VkFlags(PipelineStageFlagBits::eMeshShaderNV) | VkFlags(PipelineStageFlagBits::eFragmentDensityProcessEXT) | VkFlags(PipelineStageFlagBits::eCommandPreprocessNV) + allFlags = VkFlags(PipelineStageFlagBits::eTopOfPipe) | VkFlags(PipelineStageFlagBits::eDrawIndirect) | VkFlags(PipelineStageFlagBits::eVertexInput) | VkFlags(PipelineStageFlagBits::eVertexShader) | VkFlags(PipelineStageFlagBits::eTessellationControlShader) | VkFlags(PipelineStageFlagBits::eTessellationEvaluationShader) | VkFlags(PipelineStageFlagBits::eGeometryShader) | VkFlags(PipelineStageFlagBits::eFragmentShader) | VkFlags(PipelineStageFlagBits::eEarlyFragmentTests) | VkFlags(PipelineStageFlagBits::eLateFragmentTests) | VkFlags(PipelineStageFlagBits::eColorAttachmentOutput) | VkFlags(PipelineStageFlagBits::eComputeShader) | VkFlags(PipelineStageFlagBits::eTransfer) | VkFlags(PipelineStageFlagBits::eBottomOfPipe) | VkFlags(PipelineStageFlagBits::eHost) | VkFlags(PipelineStageFlagBits::eAllGraphics) | VkFlags(PipelineStageFlagBits::eAllCommands) | VkFlags(PipelineStageFlagBits::eTransformFeedbackEXT) | VkFlags(PipelineStageFlagBits::eConditionalRenderingEXT) | VkFlags(PipelineStageFlagBits::eAccelerationStructureBuildKHR) | VkFlags(PipelineStageFlagBits::eRayTracingShaderKHR) | VkFlags(PipelineStageFlagBits::eShadingRateImageNV) | VkFlags(PipelineStageFlagBits::eTaskShaderNV) | VkFlags(PipelineStageFlagBits::eMeshShaderNV) | VkFlags(PipelineStageFlagBits::eFragmentDensityProcessEXT) | VkFlags(PipelineStageFlagBits::eCommandPreprocessNV) }; }; @@ -12427,8 +12693,8 @@ namespace VULKAN_HPP_NAMESPACE if ( value & PipelineStageFlagBits::eAllCommands ) result += "AllCommands | "; if ( value & PipelineStageFlagBits::eTransformFeedbackEXT ) result += "TransformFeedbackEXT | "; if ( value & PipelineStageFlagBits::eConditionalRenderingEXT ) result += "ConditionalRenderingEXT | "; - if ( value & PipelineStageFlagBits::eRayTracingShaderKHR ) result += "RayTracingShaderKHR | "; if ( value & PipelineStageFlagBits::eAccelerationStructureBuildKHR ) result += "AccelerationStructureBuildKHR | "; + if ( value & PipelineStageFlagBits::eRayTracingShaderKHR ) result += "RayTracingShaderKHR | "; if ( value & PipelineStageFlagBits::eShadingRateImageNV ) result += "ShadingRateImageNV | "; if ( value & PipelineStageFlagBits::eTaskShaderNV ) result += "TaskShaderNV | "; if ( value & PipelineStageFlagBits::eMeshShaderNV ) result += "MeshShaderNV | "; @@ -12876,6 +13142,13 @@ namespace VULKAN_HPP_NAMESPACE return "{ " + result.substr(0, result.size() - 3) + " }"; } + enum class SemaphoreCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreCreateFlagBits ) + { + return "(void)"; + } using SemaphoreCreateFlags = Flags<SemaphoreCreateFlagBits>; @@ -13849,15 +14122,6 @@ namespace VULKAN_HPP_NAMESPACE : SystemError( make_error_code( Result::eErrorInvalidShaderNV ), message ) {} }; - class IncompatibleVersionKHRError : public SystemError - { - public: - IncompatibleVersionKHRError( std::string const& message ) - : SystemError( make_error_code( Result::eErrorIncompatibleVersionKHR ), message ) {} - IncompatibleVersionKHRError( char const * message ) - : SystemError( make_error_code( Result::eErrorIncompatibleVersionKHR ), message ) {} - }; - class InvalidDrmFormatModifierPlaneLayoutEXTError : public SystemError { public: @@ -13913,7 +14177,6 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorIncompatibleDisplayKHR: throw IncompatibleDisplayKHRError( message ); case Result::eErrorValidationFailedEXT: throw ValidationFailedEXTError( message ); case Result::eErrorInvalidShaderNV: throw InvalidShaderNVError( message ); - case Result::eErrorIncompatibleVersionKHR: throw IncompatibleVersionKHRError( message ); case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT: throw InvalidDrmFormatModifierPlaneLayoutEXTError( message ); case Result::eErrorNotPermittedEXT: throw NotPermittedEXTError( message ); case Result::eErrorFullScreenExclusiveModeLostEXT: throw FullScreenExclusiveModeLostEXTError( message ); @@ -14089,6 +14352,7 @@ namespace VULKAN_HPP_NAMESPACE { #ifdef VULKAN_HPP_NO_EXCEPTIONS ignore(message); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); #else if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) @@ -14104,6 +14368,7 @@ namespace VULKAN_HPP_NAMESPACE { #ifdef VULKAN_HPP_NO_EXCEPTIONS ignore(message); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); #else if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) @@ -14141,6 +14406,7 @@ namespace VULKAN_HPP_NAMESPACE { # ifdef VULKAN_HPP_NO_EXCEPTIONS ignore( message ); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); # else if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) @@ -14177,6 +14443,7 @@ namespace VULKAN_HPP_NAMESPACE { # ifdef VULKAN_HPP_NO_EXCEPTIONS ignore( message ); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); # else if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) @@ -14398,9 +14665,7 @@ namespace VULKAN_HPP_NAMESPACE { static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; }; - using AccelerationStructureNV = AccelerationStructureKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS union DeviceOrHostAddressConstKHR { DeviceOrHostAddressConstKHR( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const& rhs ) VULKAN_HPP_NOEXCEPT @@ -14452,17 +14717,15 @@ namespace VULKAN_HPP_NAMESPACE const void* hostAddress; #endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureGeometryTrianglesDataKHR { static const bool allowDuplicate = false; static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureGeometryTrianglesDataKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - AccelerationStructureGeometryTrianglesDataKHR(VULKAN_HPP_NAMESPACE::Format vertexFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData_ = {}) VULKAN_HPP_NOEXCEPT - : vertexFormat( vertexFormat_ ), vertexData( vertexData_ ), vertexStride( vertexStride_ ), indexType( indexType_ ), indexData( indexData_ ), transformData( transformData_ ) + AccelerationStructureGeometryTrianglesDataKHR(VULKAN_HPP_NAMESPACE::Format vertexFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ = {}, uint32_t maxVertex_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData_ = {}) VULKAN_HPP_NOEXCEPT + : vertexFormat( vertexFormat_ ), vertexData( vertexData_ ), vertexStride( vertexStride_ ), maxVertex( maxVertex_ ), indexType( indexType_ ), indexData( indexData_ ), transformData( transformData_ ) {} AccelerationStructureGeometryTrianglesDataKHR( AccelerationStructureGeometryTrianglesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -14509,6 +14772,12 @@ namespace VULKAN_HPP_NAMESPACE return *this; } + AccelerationStructureGeometryTrianglesDataKHR & setMaxVertex( uint32_t maxVertex_ ) VULKAN_HPP_NOEXCEPT + { + maxVertex = maxVertex_; + return *this; + } + AccelerationStructureGeometryTrianglesDataKHR & setIndexType( VULKAN_HPP_NAMESPACE::IndexType indexType_ ) VULKAN_HPP_NOEXCEPT { indexType = indexType_; @@ -14547,6 +14816,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Format vertexFormat = VULKAN_HPP_NAMESPACE::Format::eUndefined; VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData = {}; VULKAN_HPP_NAMESPACE::DeviceSize vertexStride = {}; + uint32_t maxVertex = {}; VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData = {}; VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData = {}; @@ -14560,9 +14830,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureGeometryTrianglesDataKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureGeometryAabbsDataKHR { static const bool allowDuplicate = false; @@ -14640,9 +14908,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureGeometryAabbsDataKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureGeometryInstancesDataKHR { static const bool allowDuplicate = false; @@ -14720,9 +14986,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureGeometryInstancesDataKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS union AccelerationStructureGeometryDataKHR { AccelerationStructureGeometryDataKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR const& rhs ) VULKAN_HPP_NOEXCEPT @@ -14786,9 +15050,7 @@ namespace VULKAN_HPP_NAMESPACE VkAccelerationStructureGeometryInstancesDataKHR instances; #endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureGeometryKHR { static const bool allowDuplicate = false; @@ -14873,9 +15135,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureGeometryKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS union DeviceOrHostAddressKHR { DeviceOrHostAddressKHR( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const& rhs ) VULKAN_HPP_NOEXCEPT @@ -14927,17 +15187,15 @@ namespace VULKAN_HPP_NAMESPACE void* hostAddress; #endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureBuildGeometryInfoKHR { static const bool allowDuplicate = false; static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureBuildGeometryInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - AccelerationStructureBuildGeometryInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 update_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::Bool32 geometryArrayOfPointers_ = {}, uint32_t geometryCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {}) VULKAN_HPP_NOEXCEPT - : type( type_ ), flags( flags_ ), update( update_ ), srcAccelerationStructure( srcAccelerationStructure_ ), dstAccelerationStructure( dstAccelerationStructure_ ), geometryArrayOfPointers( geometryArrayOfPointers_ ), geometryCount( geometryCount_ ), ppGeometries( ppGeometries_ ), scratchData( scratchData_ ) + AccelerationStructureBuildGeometryInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR::eBuild, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_ = {}, uint32_t geometryCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), flags( flags_ ), mode( mode_ ), srcAccelerationStructure( srcAccelerationStructure_ ), dstAccelerationStructure( dstAccelerationStructure_ ), geometryCount( geometryCount_ ), pGeometries( pGeometries_ ), ppGeometries( ppGeometries_ ), scratchData( scratchData_ ) {} AccelerationStructureBuildGeometryInfoKHR( AccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -14946,6 +15204,21 @@ namespace VULKAN_HPP_NAMESPACE { *this = rhs; } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR> const & geometries_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const > const & pGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {} ) + : type( type_ ), flags( flags_ ), mode( mode_ ), srcAccelerationStructure( srcAccelerationStructure_ ), dstAccelerationStructure( dstAccelerationStructure_ ), geometryCount( static_cast<uint32_t>( geometries_.size() ) ), pGeometries( geometries_.data() ), ppGeometries( pGeometries_.data() ), scratchData( scratchData_ ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( geometries_.size() == pGeometries_.size() ); +#else + if ( geometries_.size() != pGeometries_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureBuildGeometryInfoKHR::AccelerationStructureBuildGeometryInfoKHR: geometries_.size() != pGeometries_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) AccelerationStructureBuildGeometryInfoKHR & operator=( VkAccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT @@ -14978,9 +15251,9 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - AccelerationStructureBuildGeometryInfoKHR & setUpdate( VULKAN_HPP_NAMESPACE::Bool32 update_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildGeometryInfoKHR & setMode( VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_ ) VULKAN_HPP_NOEXCEPT { - update = update_; + mode = mode_; return *this; } @@ -14996,24 +15269,42 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - AccelerationStructureBuildGeometryInfoKHR & setGeometryArrayOfPointers( VULKAN_HPP_NAMESPACE::Bool32 geometryArrayOfPointers_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildGeometryInfoKHR & setGeometryCount( uint32_t geometryCount_ ) VULKAN_HPP_NOEXCEPT { - geometryArrayOfPointers = geometryArrayOfPointers_; + geometryCount = geometryCount_; return *this; } - AccelerationStructureBuildGeometryInfoKHR & setGeometryCount( uint32_t geometryCount_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildGeometryInfoKHR & setPGeometries( const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries_ ) VULKAN_HPP_NOEXCEPT { - geometryCount = geometryCount_; + pGeometries = pGeometries_; return *this; } +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setGeometries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR> const & geometries_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = static_cast<uint32_t>( geometries_.size() ); + pGeometries = geometries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setPpGeometries( const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries_ ) VULKAN_HPP_NOEXCEPT { ppGeometries = ppGeometries_; return *this; } +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setPGeometries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const > const & pGeometries_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = static_cast<uint32_t>( pGeometries_.size() ); + ppGeometries = pGeometries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setScratchData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const & scratchData_ ) VULKAN_HPP_NOEXCEPT { scratchData = scratchData_; @@ -15039,11 +15330,11 @@ namespace VULKAN_HPP_NAMESPACE const void* pNext = {}; VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel; VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags = {}; - VULKAN_HPP_NAMESPACE::Bool32 update = {}; + VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode = VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR::eBuild; VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure = {}; VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure = {}; - VULKAN_HPP_NAMESPACE::Bool32 geometryArrayOfPointers = {}; uint32_t geometryCount = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries = {}; const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries = {}; VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData = {}; @@ -15056,78 +15347,76 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureBuildGeometryInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct AccelerationStructureBuildOffsetInfoKHR + struct AccelerationStructureBuildRangeInfoKHR { #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureBuildOffsetInfoKHR(uint32_t primitiveCount_ = {}, uint32_t primitiveOffset_ = {}, uint32_t firstVertex_ = {}, uint32_t transformOffset_ = {}) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildRangeInfoKHR(uint32_t primitiveCount_ = {}, uint32_t primitiveOffset_ = {}, uint32_t firstVertex_ = {}, uint32_t transformOffset_ = {}) VULKAN_HPP_NOEXCEPT : primitiveCount( primitiveCount_ ), primitiveOffset( primitiveOffset_ ), firstVertex( firstVertex_ ), transformOffset( transformOffset_ ) {} - VULKAN_HPP_CONSTEXPR AccelerationStructureBuildOffsetInfoKHR( AccelerationStructureBuildOffsetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildRangeInfoKHR( AccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - AccelerationStructureBuildOffsetInfoKHR( VkAccelerationStructureBuildOffsetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR( VkAccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - AccelerationStructureBuildOffsetInfoKHR & operator=( VkAccelerationStructureBuildOffsetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & operator=( VkAccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR const *>( &rhs ); return *this; } - AccelerationStructureBuildOffsetInfoKHR & operator=( AccelerationStructureBuildOffsetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & operator=( AccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureBuildOffsetInfoKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureBuildRangeInfoKHR ) ); return *this; } - AccelerationStructureBuildOffsetInfoKHR & setPrimitiveCount( uint32_t primitiveCount_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & setPrimitiveCount( uint32_t primitiveCount_ ) VULKAN_HPP_NOEXCEPT { primitiveCount = primitiveCount_; return *this; } - AccelerationStructureBuildOffsetInfoKHR & setPrimitiveOffset( uint32_t primitiveOffset_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & setPrimitiveOffset( uint32_t primitiveOffset_ ) VULKAN_HPP_NOEXCEPT { primitiveOffset = primitiveOffset_; return *this; } - AccelerationStructureBuildOffsetInfoKHR & setFirstVertex( uint32_t firstVertex_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & setFirstVertex( uint32_t firstVertex_ ) VULKAN_HPP_NOEXCEPT { firstVertex = firstVertex_; return *this; } - AccelerationStructureBuildOffsetInfoKHR & setTransformOffset( uint32_t transformOffset_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildRangeInfoKHR & setTransformOffset( uint32_t transformOffset_ ) VULKAN_HPP_NOEXCEPT { transformOffset = transformOffset_; return *this; } - operator VkAccelerationStructureBuildOffsetInfoKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkAccelerationStructureBuildRangeInfoKHR const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkAccelerationStructureBuildOffsetInfoKHR*>( this ); + return *reinterpret_cast<const VkAccelerationStructureBuildRangeInfoKHR*>( this ); } - operator VkAccelerationStructureBuildOffsetInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkAccelerationStructureBuildRangeInfoKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkAccelerationStructureBuildOffsetInfoKHR*>( this ); + return *reinterpret_cast<VkAccelerationStructureBuildRangeInfoKHR*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( AccelerationStructureBuildOffsetInfoKHR const& ) const = default; + auto operator<=>( AccelerationStructureBuildRangeInfoKHR const& ) const = default; #else - bool operator==( AccelerationStructureBuildOffsetInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( AccelerationStructureBuildRangeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return ( primitiveCount == rhs.primitiveCount ) && ( primitiveOffset == rhs.primitiveOffset ) @@ -15135,7 +15424,7 @@ namespace VULKAN_HPP_NAMESPACE && ( transformOffset == rhs.transformOffset ); } - bool operator!=( AccelerationStructureBuildOffsetInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( AccelerationStructureBuildRangeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -15150,148 +15439,220 @@ namespace VULKAN_HPP_NAMESPACE uint32_t transformOffset = {}; }; - static_assert( sizeof( AccelerationStructureBuildOffsetInfoKHR ) == sizeof( VkAccelerationStructureBuildOffsetInfoKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<AccelerationStructureBuildOffsetInfoKHR>::value, "struct wrapper is not a standard layout!" ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + static_assert( sizeof( AccelerationStructureBuildRangeInfoKHR ) == sizeof( VkAccelerationStructureBuildRangeInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<AccelerationStructureBuildRangeInfoKHR>::value, "struct wrapper is not a standard layout!" ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct AccelerationStructureCreateGeometryTypeInfoKHR + struct AccelerationStructureBuildSizesInfoKHR { static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureCreateGeometryTypeInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureBuildSizesInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureCreateGeometryTypeInfoKHR(VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles, uint32_t maxPrimitiveCount_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16, uint32_t maxVertexCount_ = {}, VULKAN_HPP_NAMESPACE::Format vertexFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::Bool32 allowsTransforms_ = {}) VULKAN_HPP_NOEXCEPT - : geometryType( geometryType_ ), maxPrimitiveCount( maxPrimitiveCount_ ), indexType( indexType_ ), maxVertexCount( maxVertexCount_ ), vertexFormat( vertexFormat_ ), allowsTransforms( allowsTransforms_ ) + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildSizesInfoKHR(VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructureSize( accelerationStructureSize_ ), updateScratchSize( updateScratchSize_ ), buildScratchSize( buildScratchSize_ ) {} - VULKAN_HPP_CONSTEXPR AccelerationStructureCreateGeometryTypeInfoKHR( AccelerationStructureCreateGeometryTypeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildSizesInfoKHR( AccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - AccelerationStructureCreateGeometryTypeInfoKHR( VkAccelerationStructureCreateGeometryTypeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR( VkAccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - AccelerationStructureCreateGeometryTypeInfoKHR & operator=( VkAccelerationStructureCreateGeometryTypeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & operator=( VkAccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR const *>( &rhs ); return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & operator=( AccelerationStructureCreateGeometryTypeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & operator=( AccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureCreateGeometryTypeInfoKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureBuildSizesInfoKHR ) ); return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & setGeometryType( VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & setAccelerationStructureSize( VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize_ ) VULKAN_HPP_NOEXCEPT { - geometryType = geometryType_; + accelerationStructureSize = accelerationStructureSize_; return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & setMaxPrimitiveCount( uint32_t maxPrimitiveCount_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & setUpdateScratchSize( VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize_ ) VULKAN_HPP_NOEXCEPT { - maxPrimitiveCount = maxPrimitiveCount_; + updateScratchSize = updateScratchSize_; return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & setIndexType( VULKAN_HPP_NAMESPACE::IndexType indexType_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureBuildSizesInfoKHR & setBuildScratchSize( VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ ) VULKAN_HPP_NOEXCEPT { - indexType = indexType_; + buildScratchSize = buildScratchSize_; return *this; } - AccelerationStructureCreateGeometryTypeInfoKHR & setMaxVertexCount( uint32_t maxVertexCount_ ) VULKAN_HPP_NOEXCEPT + + operator VkAccelerationStructureBuildSizesInfoKHR const&() const VULKAN_HPP_NOEXCEPT { - maxVertexCount = maxVertexCount_; - return *this; + return *reinterpret_cast<const VkAccelerationStructureBuildSizesInfoKHR*>( this ); } - AccelerationStructureCreateGeometryTypeInfoKHR & setVertexFormat( VULKAN_HPP_NAMESPACE::Format vertexFormat_ ) VULKAN_HPP_NOEXCEPT + operator VkAccelerationStructureBuildSizesInfoKHR &() VULKAN_HPP_NOEXCEPT { - vertexFormat = vertexFormat_; - return *this; + return *reinterpret_cast<VkAccelerationStructureBuildSizesInfoKHR*>( this ); } - AccelerationStructureCreateGeometryTypeInfoKHR & setAllowsTransforms( VULKAN_HPP_NAMESPACE::Bool32 allowsTransforms_ ) VULKAN_HPP_NOEXCEPT + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureBuildSizesInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureBuildSizesInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { - allowsTransforms = allowsTransforms_; - return *this; + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureSize == rhs.accelerationStructureSize ) + && ( updateScratchSize == rhs.updateScratchSize ) + && ( buildScratchSize == rhs.buildScratchSize ); } - - operator VkAccelerationStructureCreateGeometryTypeInfoKHR const&() const VULKAN_HPP_NOEXCEPT + bool operator!=( AccelerationStructureBuildSizesInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkAccelerationStructureCreateGeometryTypeInfoKHR*>( this ); + return !operator==( rhs ); } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureBuildSizesInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize = {}; + VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize = {}; + VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize = {}; - operator VkAccelerationStructureCreateGeometryTypeInfoKHR &() VULKAN_HPP_NOEXCEPT + }; + static_assert( sizeof( AccelerationStructureBuildSizesInfoKHR ) == sizeof( VkAccelerationStructureBuildSizesInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<AccelerationStructureBuildSizesInfoKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eAccelerationStructureBuildSizesInfoKHR> + { + using Type = AccelerationStructureBuildSizesInfoKHR; + }; + + class Buffer + { + public: + using CType = VkBuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eBuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer; + + public: + VULKAN_HPP_CONSTEXPR Buffer() VULKAN_HPP_NOEXCEPT + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Buffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Buffer( VkBuffer buffer ) VULKAN_HPP_NOEXCEPT + : m_buffer( buffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Buffer & operator=(VkBuffer buffer) VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkAccelerationStructureCreateGeometryTypeInfoKHR*>( this ); + m_buffer = buffer; + return *this; } +#endif + Buffer & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_buffer = VK_NULL_HANDLE; + return *this; + } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( AccelerationStructureCreateGeometryTypeInfoKHR const& ) const = default; + auto operator<=>( Buffer const& ) const = default; #else - bool operator==( AccelerationStructureCreateGeometryTypeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT { - return ( sType == rhs.sType ) - && ( pNext == rhs.pNext ) - && ( geometryType == rhs.geometryType ) - && ( maxPrimitiveCount == rhs.maxPrimitiveCount ) - && ( indexType == rhs.indexType ) - && ( maxVertexCount == rhs.maxVertexCount ) - && ( vertexFormat == rhs.vertexFormat ) - && ( allowsTransforms == rhs.allowsTransforms ); + return m_buffer == rhs.m_buffer; } - bool operator!=( AccelerationStructureCreateGeometryTypeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT { - return !operator==( rhs ); + return m_buffer != rhs.m_buffer; + } + + bool operator<(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_buffer < rhs.m_buffer; } #endif + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const VULKAN_HPP_NOEXCEPT + { + return m_buffer; + } + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_buffer != VK_NULL_HANDLE; + } - public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureCreateGeometryTypeInfoKHR; - const void* pNext = {}; - VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles; - uint32_t maxPrimitiveCount = {}; - VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; - uint32_t maxVertexCount = {}; - VULKAN_HPP_NAMESPACE::Format vertexFormat = VULKAN_HPP_NAMESPACE::Format::eUndefined; - VULKAN_HPP_NAMESPACE::Bool32 allowsTransforms = {}; + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_buffer == VK_NULL_HANDLE; + } + private: + VkBuffer m_buffer; }; - static_assert( sizeof( AccelerationStructureCreateGeometryTypeInfoKHR ) == sizeof( VkAccelerationStructureCreateGeometryTypeInfoKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<AccelerationStructureCreateGeometryTypeInfoKHR>::value, "struct wrapper is not a standard layout!" ); + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Buffer ) == sizeof( VkBuffer ), "handle and wrapper have different size!" ); template <> - struct CppType<StructureType, StructureType::eAccelerationStructureCreateGeometryTypeInfoKHR> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type<ObjectType::eBuffer> { - using Type = AccelerationStructureCreateGeometryTypeInfoKHR; + using type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + template <> + struct CppType<VULKAN_HPP_NAMESPACE::ObjectType, VULKAN_HPP_NAMESPACE::ObjectType::eBuffer> + { + using Type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + + template <> + struct CppType<VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer> + { + using Type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + + template <> + struct isVulkanHandleType<VULKAN_HPP_NAMESPACE::Buffer> + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureCreateInfoKHR { static const bool allowDuplicate = false; static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureCreateInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoKHR(VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ = {}, uint32_t maxGeometryCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos_ = {}, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}) VULKAN_HPP_NOEXCEPT - : compactedSize( compactedSize_ ), type( type_ ), flags( flags_ ), maxGeometryCount( maxGeometryCount_ ), pGeometryInfos( pGeometryInfos_ ), deviceAddress( deviceAddress_ ) + VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}) VULKAN_HPP_NOEXCEPT + : createFlags( createFlags_ ), buffer( buffer_ ), offset( offset_ ), size( size_ ), type( type_ ), deviceAddress( deviceAddress_ ) {} VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoKHR( AccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -15300,12 +15661,6 @@ namespace VULKAN_HPP_NAMESPACE { *this = rhs; } - -#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - AccelerationStructureCreateInfoKHR( VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_, VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR> const & geometryInfos_, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {} ) - : compactedSize( compactedSize_ ), type( type_ ), flags( flags_ ), maxGeometryCount( static_cast<uint32_t>( geometryInfos_.size() ) ), pGeometryInfos( geometryInfos_.data() ), deviceAddress( deviceAddress_ ) - {} -#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) AccelerationStructureCreateInfoKHR & operator=( VkAccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT @@ -15326,44 +15681,35 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - AccelerationStructureCreateInfoKHR & setCompactedSize( VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureCreateInfoKHR & setCreateFlags( VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags_ ) VULKAN_HPP_NOEXCEPT { - compactedSize = compactedSize_; + createFlags = createFlags_; return *this; } - AccelerationStructureCreateInfoKHR & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureCreateInfoKHR & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT { - type = type_; - return *this; - } - - AccelerationStructureCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT - { - flags = flags_; + buffer = buffer_; return *this; } - AccelerationStructureCreateInfoKHR & setMaxGeometryCount( uint32_t maxGeometryCount_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureCreateInfoKHR & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT { - maxGeometryCount = maxGeometryCount_; + offset = offset_; return *this; } - AccelerationStructureCreateInfoKHR & setPGeometryInfos( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureCreateInfoKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT { - pGeometryInfos = pGeometryInfos_; + size = size_; return *this; } -#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - AccelerationStructureCreateInfoKHR & setGeometryInfos( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR> const & geometryInfos_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureCreateInfoKHR & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ ) VULKAN_HPP_NOEXCEPT { - maxGeometryCount = static_cast<uint32_t>( geometryInfos_.size() ); - pGeometryInfos = geometryInfos_.data(); + type = type_; return *this; } -#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) AccelerationStructureCreateInfoKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT { @@ -15390,11 +15736,11 @@ namespace VULKAN_HPP_NAMESPACE { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) - && ( compactedSize == rhs.compactedSize ) + && ( createFlags == rhs.createFlags ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( size == rhs.size ) && ( type == rhs.type ) - && ( flags == rhs.flags ) - && ( maxGeometryCount == rhs.maxGeometryCount ) - && ( pGeometryInfos == rhs.pGeometryInfos ) && ( deviceAddress == rhs.deviceAddress ); } @@ -15409,11 +15755,11 @@ namespace VULKAN_HPP_NAMESPACE public: const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureCreateInfoKHR; const void* pNext = {}; - VULKAN_HPP_NAMESPACE::DeviceSize compactedSize = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel; - VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags = {}; - uint32_t maxGeometryCount = {}; - const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos = {}; VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; }; @@ -15425,107 +15771,6 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureCreateInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - - class Buffer - { - public: - using CType = VkBuffer; - - static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eBuffer; - static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer; - - public: - VULKAN_HPP_CONSTEXPR Buffer() VULKAN_HPP_NOEXCEPT - : m_buffer(VK_NULL_HANDLE) - {} - - VULKAN_HPP_CONSTEXPR Buffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT - : m_buffer(VK_NULL_HANDLE) - {} - - VULKAN_HPP_TYPESAFE_EXPLICIT Buffer( VkBuffer buffer ) VULKAN_HPP_NOEXCEPT - : m_buffer( buffer ) - {} - -#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) - Buffer & operator=(VkBuffer buffer) VULKAN_HPP_NOEXCEPT - { - m_buffer = buffer; - return *this; - } -#endif - - Buffer & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT - { - m_buffer = VK_NULL_HANDLE; - return *this; - } - -#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( Buffer const& ) const = default; -#else - bool operator==( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer == rhs.m_buffer; - } - - bool operator!=(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer != rhs.m_buffer; - } - - bool operator<(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer < rhs.m_buffer; - } -#endif - - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const VULKAN_HPP_NOEXCEPT - { - return m_buffer; - } - - explicit operator bool() const VULKAN_HPP_NOEXCEPT - { - return m_buffer != VK_NULL_HANDLE; - } - - bool operator!() const VULKAN_HPP_NOEXCEPT - { - return m_buffer == VK_NULL_HANDLE; - } - - private: - VkBuffer m_buffer; - }; - static_assert( sizeof( VULKAN_HPP_NAMESPACE::Buffer ) == sizeof( VkBuffer ), "handle and wrapper have different size!" ); - - template <> - struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type<ObjectType::eBuffer> - { - using type = VULKAN_HPP_NAMESPACE::Buffer; - }; - - template <> - struct CppType<VULKAN_HPP_NAMESPACE::ObjectType, VULKAN_HPP_NAMESPACE::ObjectType::eBuffer> - { - using Type = VULKAN_HPP_NAMESPACE::Buffer; - }; - - - template <> - struct CppType<VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer> - { - using Type = VULKAN_HPP_NAMESPACE::Buffer; - }; - - - template <> - struct isVulkanHandleType<VULKAN_HPP_NAMESPACE::Buffer> - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; - }; struct GeometryTrianglesNV { @@ -16215,7 +16460,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = AccelerationStructureCreateInfoNV; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS struct AccelerationStructureDeviceAddressInfoKHR { static const bool allowDuplicate = false; @@ -16302,7 +16546,6 @@ namespace VULKAN_HPP_NAMESPACE { using Type = AccelerationStructureDeviceAddressInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct TransformMatrixKHR { @@ -16486,110 +16729,105 @@ namespace VULKAN_HPP_NAMESPACE static_assert( std::is_standard_layout<AccelerationStructureInstanceKHR>::value, "struct wrapper is not a standard layout!" ); using AccelerationStructureInstanceNV = AccelerationStructureInstanceKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct AccelerationStructureMemoryRequirementsInfoKHR + class AccelerationStructureNV { - static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureMemoryRequirementsInfoKHR; + public: + using CType = VkAccelerationStructureNV; -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeKHR::eObject, VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType_ = VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR::eHost, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ = {}) VULKAN_HPP_NOEXCEPT - : type( type_ ), buildType( buildType_ ), accelerationStructure( accelerationStructure_ ) - {} + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureNV; - VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoKHR( AccelerationStructureMemoryRequirementsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + public: + VULKAN_HPP_CONSTEXPR AccelerationStructureNV() VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV(VK_NULL_HANDLE) + {} - AccelerationStructureMemoryRequirementsInfoKHR( VkAccelerationStructureMemoryRequirementsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = rhs; - } -#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV(VK_NULL_HANDLE) + {} - AccelerationStructureMemoryRequirementsInfoKHR & operator=( VkAccelerationStructureMemoryRequirementsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoKHR const *>( &rhs ); - return *this; - } + VULKAN_HPP_TYPESAFE_EXPLICIT AccelerationStructureNV( VkAccelerationStructureNV accelerationStructureNV ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV( accelerationStructureNV ) + {} - AccelerationStructureMemoryRequirementsInfoKHR & operator=( AccelerationStructureMemoryRequirementsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + AccelerationStructureNV & operator=(VkAccelerationStructureNV accelerationStructureNV) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureMemoryRequirementsInfoKHR ) ); + m_accelerationStructureNV = accelerationStructureNV; return *this; } +#endif - AccelerationStructureMemoryRequirementsInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureNV & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT { - pNext = pNext_; + m_accelerationStructureNV = VK_NULL_HANDLE; return *this; } - AccelerationStructureMemoryRequirementsInfoKHR & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeKHR type_ ) VULKAN_HPP_NOEXCEPT +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureNV const& ) const = default; +#else + bool operator==( AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT { - type = type_; - return *this; + return m_accelerationStructureNV == rhs.m_accelerationStructureNV; } - AccelerationStructureMemoryRequirementsInfoKHR & setBuildType( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType_ ) VULKAN_HPP_NOEXCEPT + bool operator!=(AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT { - buildType = buildType_; - return *this; + return m_accelerationStructureNV != rhs.m_accelerationStructureNV; } - AccelerationStructureMemoryRequirementsInfoKHR & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + bool operator<(AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT { - accelerationStructure = accelerationStructure_; - return *this; + return m_accelerationStructureNV < rhs.m_accelerationStructureNV; } +#endif - - operator VkAccelerationStructureMemoryRequirementsInfoKHR const&() const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureNV() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoKHR*>( this ); + return m_accelerationStructureNV; } - operator VkAccelerationStructureMemoryRequirementsInfoKHR &() VULKAN_HPP_NOEXCEPT + explicit operator bool() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkAccelerationStructureMemoryRequirementsInfoKHR*>( this ); + return m_accelerationStructureNV != VK_NULL_HANDLE; } - -#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( AccelerationStructureMemoryRequirementsInfoKHR const& ) const = default; -#else - bool operator==( AccelerationStructureMemoryRequirementsInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!() const VULKAN_HPP_NOEXCEPT { - return ( sType == rhs.sType ) - && ( pNext == rhs.pNext ) - && ( type == rhs.type ) - && ( buildType == rhs.buildType ) - && ( accelerationStructure == rhs.accelerationStructure ); + return m_accelerationStructureNV == VK_NULL_HANDLE; } - bool operator!=( AccelerationStructureMemoryRequirementsInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } -#endif + private: + VkAccelerationStructureNV m_accelerationStructureNV; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::AccelerationStructureNV ) == sizeof( VkAccelerationStructureNV ), "handle and wrapper have different size!" ); + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type<ObjectType::eAccelerationStructureNV> + { + using type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; + }; + template <> + struct CppType<VULKAN_HPP_NAMESPACE::ObjectType, VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureNV> + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; + }; - public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureMemoryRequirementsInfoKHR; - const void* pNext = {}; - VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeKHR type = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeKHR::eObject; - VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType = VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR::eHost; - VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure = {}; + template <> + struct CppType<VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureNV> + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; }; - static_assert( sizeof( AccelerationStructureMemoryRequirementsInfoKHR ) == sizeof( VkAccelerationStructureMemoryRequirementsInfoKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<AccelerationStructureMemoryRequirementsInfoKHR>::value, "struct wrapper is not a standard layout!" ); + template <> - struct CppType<StructureType, StructureType::eAccelerationStructureMemoryRequirementsInfoKHR> + struct isVulkanHandleType<VULKAN_HPP_NAMESPACE::AccelerationStructureNV> { - using Type = AccelerationStructureMemoryRequirementsInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct AccelerationStructureMemoryRequirementsInfoNV { @@ -16597,7 +16835,7 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureMemoryRequirementsInfoNV; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV::eObject, VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}) VULKAN_HPP_NOEXCEPT : type( type_ ), accelerationStructure( accelerationStructure_ ) {} @@ -16673,7 +16911,7 @@ namespace VULKAN_HPP_NAMESPACE public: const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureMemoryRequirementsInfoNV; const void* pNext = {}; - VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV::eObject; VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure = {}; }; @@ -16686,72 +16924,71 @@ namespace VULKAN_HPP_NAMESPACE using Type = AccelerationStructureMemoryRequirementsInfoNV; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct AccelerationStructureVersionKHR + struct AccelerationStructureVersionInfoKHR { static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureVersionKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureVersionInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR AccelerationStructureVersionKHR(const uint8_t* versionData_ = {}) VULKAN_HPP_NOEXCEPT - : versionData( versionData_ ) + VULKAN_HPP_CONSTEXPR AccelerationStructureVersionInfoKHR(const uint8_t* pVersionData_ = {}) VULKAN_HPP_NOEXCEPT + : pVersionData( pVersionData_ ) {} - VULKAN_HPP_CONSTEXPR AccelerationStructureVersionKHR( AccelerationStructureVersionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR AccelerationStructureVersionInfoKHR( AccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - AccelerationStructureVersionKHR( VkAccelerationStructureVersionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureVersionInfoKHR( VkAccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - AccelerationStructureVersionKHR & operator=( VkAccelerationStructureVersionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureVersionInfoKHR & operator=( VkAccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureVersionKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::AccelerationStructureVersionInfoKHR const *>( &rhs ); return *this; } - AccelerationStructureVersionKHR & operator=( AccelerationStructureVersionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + AccelerationStructureVersionInfoKHR & operator=( AccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureVersionKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( AccelerationStructureVersionInfoKHR ) ); return *this; } - AccelerationStructureVersionKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureVersionInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - AccelerationStructureVersionKHR & setVersionData( const uint8_t* versionData_ ) VULKAN_HPP_NOEXCEPT + AccelerationStructureVersionInfoKHR & setPVersionData( const uint8_t* pVersionData_ ) VULKAN_HPP_NOEXCEPT { - versionData = versionData_; + pVersionData = pVersionData_; return *this; } - operator VkAccelerationStructureVersionKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkAccelerationStructureVersionInfoKHR const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkAccelerationStructureVersionKHR*>( this ); + return *reinterpret_cast<const VkAccelerationStructureVersionInfoKHR*>( this ); } - operator VkAccelerationStructureVersionKHR &() VULKAN_HPP_NOEXCEPT + operator VkAccelerationStructureVersionInfoKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkAccelerationStructureVersionKHR*>( this ); + return *reinterpret_cast<VkAccelerationStructureVersionInfoKHR*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( AccelerationStructureVersionKHR const& ) const = default; + auto operator<=>( AccelerationStructureVersionInfoKHR const& ) const = default; #else - bool operator==( AccelerationStructureVersionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( AccelerationStructureVersionInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) - && ( versionData == rhs.versionData ); + && ( pVersionData == rhs.pVersionData ); } - bool operator!=( AccelerationStructureVersionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( AccelerationStructureVersionInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -16760,20 +16997,19 @@ namespace VULKAN_HPP_NAMESPACE public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureVersionKHR; + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureVersionInfoKHR; const void* pNext = {}; - const uint8_t* versionData = {}; + const uint8_t* pVersionData = {}; }; - static_assert( sizeof( AccelerationStructureVersionKHR ) == sizeof( VkAccelerationStructureVersionKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<AccelerationStructureVersionKHR>::value, "struct wrapper is not a standard layout!" ); + static_assert( sizeof( AccelerationStructureVersionInfoKHR ) == sizeof( VkAccelerationStructureVersionInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<AccelerationStructureVersionInfoKHR>::value, "struct wrapper is not a standard layout!" ); template <> - struct CppType<StructureType, StructureType::eAccelerationStructureVersionKHR> + struct CppType<StructureType, StructureType::eAccelerationStructureVersionInfoKHR> { - using Type = AccelerationStructureVersionKHR; + using Type = AccelerationStructureVersionInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ class SwapchainKHR { @@ -19212,80 +19448,80 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; }; - struct BindAccelerationStructureMemoryInfoKHR + struct BindAccelerationStructureMemoryInfoNV { static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindAccelerationStructureMemoryInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindAccelerationStructureMemoryInfoNV; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, uint32_t deviceIndexCount_ = {}, const uint32_t* pDeviceIndices_ = {}) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, uint32_t deviceIndexCount_ = {}, const uint32_t* pDeviceIndices_ = {}) VULKAN_HPP_NOEXCEPT : accelerationStructure( accelerationStructure_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), deviceIndexCount( deviceIndexCount_ ), pDeviceIndices( pDeviceIndices_ ) {} - VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoKHR( BindAccelerationStructureMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoNV( BindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; - BindAccelerationStructureMemoryInfoKHR( VkBindAccelerationStructureMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV( VkBindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - BindAccelerationStructureMemoryInfoKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_, VULKAN_HPP_NAMESPACE::DeviceMemory memory_, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const uint32_t> const & deviceIndices_ ) + BindAccelerationStructureMemoryInfoNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_, VULKAN_HPP_NAMESPACE::DeviceMemory memory_, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const uint32_t> const & deviceIndices_ ) : accelerationStructure( accelerationStructure_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), deviceIndexCount( static_cast<uint32_t>( deviceIndices_.size() ) ), pDeviceIndices( deviceIndices_.data() ) {} #endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - BindAccelerationStructureMemoryInfoKHR & operator=( VkBindAccelerationStructureMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & operator=( VkBindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV const *>( &rhs ); return *this; } - BindAccelerationStructureMemoryInfoKHR & operator=( BindAccelerationStructureMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & operator=( BindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( BindAccelerationStructureMemoryInfoKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( BindAccelerationStructureMemoryInfoNV ) ); return *this; } - BindAccelerationStructureMemoryInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - BindAccelerationStructureMemoryInfoKHR & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ ) VULKAN_HPP_NOEXCEPT { accelerationStructure = accelerationStructure_; return *this; } - BindAccelerationStructureMemoryInfoKHR & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT { memory = memory_; return *this; } - BindAccelerationStructureMemoryInfoKHR & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT { memoryOffset = memoryOffset_; return *this; } - BindAccelerationStructureMemoryInfoKHR & setDeviceIndexCount( uint32_t deviceIndexCount_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setDeviceIndexCount( uint32_t deviceIndexCount_ ) VULKAN_HPP_NOEXCEPT { deviceIndexCount = deviceIndexCount_; return *this; } - BindAccelerationStructureMemoryInfoKHR & setPDeviceIndices( const uint32_t* pDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setPDeviceIndices( const uint32_t* pDeviceIndices_ ) VULKAN_HPP_NOEXCEPT { pDeviceIndices = pDeviceIndices_; return *this; } #if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - BindAccelerationStructureMemoryInfoKHR & setDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const uint32_t> const & deviceIndices_ ) VULKAN_HPP_NOEXCEPT + BindAccelerationStructureMemoryInfoNV & setDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const uint32_t> const & deviceIndices_ ) VULKAN_HPP_NOEXCEPT { deviceIndexCount = static_cast<uint32_t>( deviceIndices_.size() ); pDeviceIndices = deviceIndices_.data(); @@ -19294,21 +19530,21 @@ namespace VULKAN_HPP_NAMESPACE #endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - operator VkBindAccelerationStructureMemoryInfoKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkBindAccelerationStructureMemoryInfoNV const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkBindAccelerationStructureMemoryInfoKHR*>( this ); + return *reinterpret_cast<const VkBindAccelerationStructureMemoryInfoNV*>( this ); } - operator VkBindAccelerationStructureMemoryInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkBindAccelerationStructureMemoryInfoNV &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkBindAccelerationStructureMemoryInfoKHR*>( this ); + return *reinterpret_cast<VkBindAccelerationStructureMemoryInfoNV*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( BindAccelerationStructureMemoryInfoKHR const& ) const = default; + auto operator<=>( BindAccelerationStructureMemoryInfoNV const& ) const = default; #else - bool operator==( BindAccelerationStructureMemoryInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( BindAccelerationStructureMemoryInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) @@ -19319,7 +19555,7 @@ namespace VULKAN_HPP_NAMESPACE && ( pDeviceIndices == rhs.pDeviceIndices ); } - bool operator!=( BindAccelerationStructureMemoryInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( BindAccelerationStructureMemoryInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -19328,24 +19564,23 @@ namespace VULKAN_HPP_NAMESPACE public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindAccelerationStructureMemoryInfoKHR; + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindAccelerationStructureMemoryInfoNV; const void* pNext = {}; - VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure = {}; VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; uint32_t deviceIndexCount = {}; const uint32_t* pDeviceIndices = {}; }; - static_assert( sizeof( BindAccelerationStructureMemoryInfoKHR ) == sizeof( VkBindAccelerationStructureMemoryInfoKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<BindAccelerationStructureMemoryInfoKHR>::value, "struct wrapper is not a standard layout!" ); + static_assert( sizeof( BindAccelerationStructureMemoryInfoNV ) == sizeof( VkBindAccelerationStructureMemoryInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<BindAccelerationStructureMemoryInfoNV>::value, "struct wrapper is not a standard layout!" ); template <> - struct CppType<StructureType, StructureType::eBindAccelerationStructureMemoryInfoKHR> + struct CppType<StructureType, StructureType::eBindAccelerationStructureMemoryInfoNV> { - using Type = BindAccelerationStructureMemoryInfoKHR; + using Type = BindAccelerationStructureMemoryInfoNV; }; - using BindAccelerationStructureMemoryInfoNV = BindAccelerationStructureMemoryInfoKHR; struct BindBufferMemoryDeviceGroupInfo { @@ -25697,7 +25932,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = CooperativeMatrixPropertiesNV; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS struct CopyAccelerationStructureInfoKHR { static const bool allowDuplicate = false; @@ -25800,9 +26034,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = CopyAccelerationStructureInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct CopyAccelerationStructureToMemoryInfoKHR { static const bool allowDuplicate = false; @@ -25887,7 +26119,6 @@ namespace VULKAN_HPP_NAMESPACE { using Type = CopyAccelerationStructureToMemoryInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct CopyBufferInfo2KHR { @@ -26149,6 +26380,93 @@ namespace VULKAN_HPP_NAMESPACE using Type = CopyBufferToImageInfo2KHR; }; + struct CopyCommandTransformInfoQCOM + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyCommandTransformInfoQCOM; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyCommandTransformInfoQCOM(VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity) VULKAN_HPP_NOEXCEPT + : transform( transform_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyCommandTransformInfoQCOM( CopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyCommandTransformInfoQCOM( VkCopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyCommandTransformInfoQCOM & operator=( VkCopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::CopyCommandTransformInfoQCOM const *>( &rhs ); + return *this; + } + + CopyCommandTransformInfoQCOM & operator=( CopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( CopyCommandTransformInfoQCOM ) ); + return *this; + } + + CopyCommandTransformInfoQCOM & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyCommandTransformInfoQCOM & setTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + + operator VkCopyCommandTransformInfoQCOM const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkCopyCommandTransformInfoQCOM*>( this ); + } + + operator VkCopyCommandTransformInfoQCOM &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkCopyCommandTransformInfoQCOM*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyCommandTransformInfoQCOM const& ) const = default; +#else + bool operator==( CopyCommandTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( transform == rhs.transform ); + } + + bool operator!=( CopyCommandTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyCommandTransformInfoQCOM; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + + }; + static_assert( sizeof( CopyCommandTransformInfoQCOM ) == sizeof( VkCopyCommandTransformInfoQCOM ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<CopyCommandTransformInfoQCOM>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eCopyCommandTransformInfoQCOM> + { + using Type = CopyCommandTransformInfoQCOM; + }; + class DescriptorSet { public: @@ -26779,7 +27097,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = CopyImageToBufferInfo2KHR; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS struct CopyMemoryToAccelerationStructureInfoKHR { static const bool allowDuplicate = false; @@ -26864,7 +27181,6 @@ namespace VULKAN_HPP_NAMESPACE { using Type = CopyMemoryToAccelerationStructureInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ #ifdef VK_USE_PLATFORM_WIN32_KHR struct D3D12FenceSubmitInfoKHR @@ -28354,191 +28670,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = DedicatedAllocationMemoryAllocateInfoNV; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS - class DeferredOperationKHR - { - public: - using CType = VkDeferredOperationKHR; - - static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR; - static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; - - public: - VULKAN_HPP_CONSTEXPR DeferredOperationKHR() VULKAN_HPP_NOEXCEPT - : m_deferredOperationKHR(VK_NULL_HANDLE) - {} - - VULKAN_HPP_CONSTEXPR DeferredOperationKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT - : m_deferredOperationKHR(VK_NULL_HANDLE) - {} - - VULKAN_HPP_TYPESAFE_EXPLICIT DeferredOperationKHR( VkDeferredOperationKHR deferredOperationKHR ) VULKAN_HPP_NOEXCEPT - : m_deferredOperationKHR( deferredOperationKHR ) - {} - -#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) - DeferredOperationKHR & operator=(VkDeferredOperationKHR deferredOperationKHR) VULKAN_HPP_NOEXCEPT - { - m_deferredOperationKHR = deferredOperationKHR; - return *this; - } -#endif - - DeferredOperationKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT - { - m_deferredOperationKHR = VK_NULL_HANDLE; - return *this; - } - -#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( DeferredOperationKHR const& ) const = default; -#else - bool operator==( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR == rhs.m_deferredOperationKHR; - } - - bool operator!=(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR != rhs.m_deferredOperationKHR; - } - - bool operator<(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR < rhs.m_deferredOperationKHR; - } -#endif - - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeferredOperationKHR() const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR; - } - - explicit operator bool() const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR != VK_NULL_HANDLE; - } - - bool operator!() const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR == VK_NULL_HANDLE; - } - - private: - VkDeferredOperationKHR m_deferredOperationKHR; - }; - static_assert( sizeof( VULKAN_HPP_NAMESPACE::DeferredOperationKHR ) == sizeof( VkDeferredOperationKHR ), "handle and wrapper have different size!" ); - - template <> - struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type<ObjectType::eDeferredOperationKHR> - { - using type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; - }; - - template <> - struct CppType<VULKAN_HPP_NAMESPACE::ObjectType, VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR> - { - using Type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; - }; - - - - template <> - struct isVulkanHandleType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR> - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; - }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct DeferredOperationInfoKHR - { - static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeferredOperationInfoKHR; - -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR DeferredOperationInfoKHR(VULKAN_HPP_NAMESPACE::DeferredOperationKHR operationHandle_ = {}) VULKAN_HPP_NOEXCEPT - : operationHandle( operationHandle_ ) - {} - - VULKAN_HPP_CONSTEXPR DeferredOperationInfoKHR( DeferredOperationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - - DeferredOperationInfoKHR( VkDeferredOperationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = rhs; - } -#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - - DeferredOperationInfoKHR & operator=( VkDeferredOperationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::DeferredOperationInfoKHR const *>( &rhs ); - return *this; - } - - DeferredOperationInfoKHR & operator=( DeferredOperationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - memcpy( static_cast<void *>( this ), &rhs, sizeof( DeferredOperationInfoKHR ) ); - return *this; - } - - DeferredOperationInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT - { - pNext = pNext_; - return *this; - } - - DeferredOperationInfoKHR & setOperationHandle( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operationHandle_ ) VULKAN_HPP_NOEXCEPT - { - operationHandle = operationHandle_; - return *this; - } - - - operator VkDeferredOperationInfoKHR const&() const VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast<const VkDeferredOperationInfoKHR*>( this ); - } - - operator VkDeferredOperationInfoKHR &() VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast<VkDeferredOperationInfoKHR*>( this ); - } - - -#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( DeferredOperationInfoKHR const& ) const = default; -#else - bool operator==( DeferredOperationInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT - { - return ( sType == rhs.sType ) - && ( pNext == rhs.pNext ) - && ( operationHandle == rhs.operationHandle ); - } - - bool operator!=( DeferredOperationInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } -#endif - - - - public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeferredOperationInfoKHR; - const void* pNext = {}; - VULKAN_HPP_NAMESPACE::DeferredOperationKHR operationHandle = {}; - - }; - static_assert( sizeof( DeferredOperationInfoKHR ) == sizeof( VkDeferredOperationInfoKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<DeferredOperationInfoKHR>::value, "struct wrapper is not a standard layout!" ); - - template <> - struct CppType<StructureType, StructureType::eDeferredOperationInfoKHR> - { - using Type = DeferredOperationInfoKHR; - }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - struct DescriptorBufferInfo { @@ -31203,6 +31334,109 @@ namespace VULKAN_HPP_NAMESPACE using Type = DeviceCreateInfo; }; + struct DeviceDeviceMemoryReportCreateInfoEXT + { + static const bool allowDuplicate = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceDeviceMemoryReportCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceDeviceMemoryReportCreateInfoEXT(VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ = {}, PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback_ = {}, void* pUserData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pfnUserCallback( pfnUserCallback_ ), pUserData( pUserData_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceDeviceMemoryReportCreateInfoEXT( DeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceDeviceMemoryReportCreateInfoEXT( VkDeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceDeviceMemoryReportCreateInfoEXT & operator=( VkDeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::DeviceDeviceMemoryReportCreateInfoEXT const *>( &rhs ); + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & operator=( DeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( DeviceDeviceMemoryReportCreateInfoEXT ) ); + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPfnUserCallback( PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback_ ) VULKAN_HPP_NOEXCEPT + { + pfnUserCallback = pfnUserCallback_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + + operator VkDeviceDeviceMemoryReportCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>( this ); + } + + operator VkDeviceDeviceMemoryReportCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceDeviceMemoryReportCreateInfoEXT const& ) const = default; +#else + bool operator==( DeviceDeviceMemoryReportCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pfnUserCallback == rhs.pfnUserCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DeviceDeviceMemoryReportCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceDeviceMemoryReportCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags = {}; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback = {}; + void* pUserData = {}; + + }; + static_assert( sizeof( DeviceDeviceMemoryReportCreateInfoEXT ) == sizeof( VkDeviceDeviceMemoryReportCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<DeviceDeviceMemoryReportCreateInfoEXT>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eDeviceDeviceMemoryReportCreateInfoEXT> + { + using Type = DeviceDeviceMemoryReportCreateInfoEXT; + }; + struct DeviceDiagnosticsConfigCreateInfoNV { static const bool allowDuplicate = false; @@ -34557,91 +34791,76 @@ namespace VULKAN_HPP_NAMESPACE static_assert( sizeof( ViewportWScalingNV ) == sizeof( VkViewportWScalingNV ), "struct and wrapper have different size!" ); static_assert( std::is_standard_layout<ViewportWScalingNV>::value, "struct wrapper is not a standard layout!" ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct StridedBufferRegionKHR + struct StridedDeviceAddressRegionKHR { #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR StridedBufferRegionKHR(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT - : buffer( buffer_ ), offset( offset_ ), stride( stride_ ), size( size_ ) + VULKAN_HPP_CONSTEXPR StridedDeviceAddressRegionKHR(VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : deviceAddress( deviceAddress_ ), stride( stride_ ), size( size_ ) {} - VULKAN_HPP_CONSTEXPR StridedBufferRegionKHR( StridedBufferRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR StridedDeviceAddressRegionKHR( StridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - StridedBufferRegionKHR( VkStridedBufferRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR( VkStridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } - - explicit StridedBufferRegionKHR( IndirectCommandsStreamNV const& indirectCommandsStreamNV, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} ) - : buffer( indirectCommandsStreamNV.buffer ) - , offset( indirectCommandsStreamNV.offset ) - , stride( stride_ ) - , size( size_ ) - {} #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - StridedBufferRegionKHR & operator=( VkStridedBufferRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR & operator=( VkStridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR const *>( &rhs ); return *this; } - StridedBufferRegionKHR & operator=( StridedBufferRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR & operator=( StridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( StridedBufferRegionKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( StridedDeviceAddressRegionKHR ) ); return *this; } - StridedBufferRegionKHR & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT { - buffer = buffer_; - return *this; - } - - StridedBufferRegionKHR & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT - { - offset = offset_; + deviceAddress = deviceAddress_; return *this; } - StridedBufferRegionKHR & setStride( VULKAN_HPP_NAMESPACE::DeviceSize stride_ ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR & setStride( VULKAN_HPP_NAMESPACE::DeviceSize stride_ ) VULKAN_HPP_NOEXCEPT { stride = stride_; return *this; } - StridedBufferRegionKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + StridedDeviceAddressRegionKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT { size = size_; return *this; } - operator VkStridedBufferRegionKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkStridedDeviceAddressRegionKHR const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkStridedBufferRegionKHR*>( this ); + return *reinterpret_cast<const VkStridedDeviceAddressRegionKHR*>( this ); } - operator VkStridedBufferRegionKHR &() VULKAN_HPP_NOEXCEPT + operator VkStridedDeviceAddressRegionKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkStridedBufferRegionKHR*>( this ); + return *reinterpret_cast<VkStridedDeviceAddressRegionKHR*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( StridedBufferRegionKHR const& ) const = default; + auto operator<=>( StridedDeviceAddressRegionKHR const& ) const = default; #else - bool operator==( StridedBufferRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( StridedDeviceAddressRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { - return ( buffer == rhs.buffer ) - && ( offset == rhs.offset ) + return ( deviceAddress == rhs.deviceAddress ) && ( stride == rhs.stride ) && ( size == rhs.size ); } - bool operator!=( StridedBufferRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( StridedDeviceAddressRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -34650,15 +34869,13 @@ namespace VULKAN_HPP_NAMESPACE public: - VULKAN_HPP_NAMESPACE::Buffer buffer = {}; - VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; VULKAN_HPP_NAMESPACE::DeviceSize stride = {}; VULKAN_HPP_NAMESPACE::DeviceSize size = {}; }; - static_assert( sizeof( StridedBufferRegionKHR ) == sizeof( VkStridedBufferRegionKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<StridedBufferRegionKHR>::value, "struct wrapper is not a standard layout!" ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + static_assert( sizeof( StridedDeviceAddressRegionKHR ) == sizeof( VkStridedDeviceAddressRegionKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<StridedDeviceAddressRegionKHR>::value, "struct wrapper is not a standard layout!" ); class CommandBuffer { @@ -34714,981 +34931,797 @@ namespace VULKAN_HPP_NAMESPACE } #endif - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type begin( const CommandBufferBeginInfo & beginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type begin( const CommandBufferBeginInfo & beginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &counterBufferOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & counterBufferOffsets VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, ArrayProxy<const uint32_t> const &dynamicOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, ArrayProxy<const uint32_t> const & dynamicOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &sizes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & sizes VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &sizes, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &strides, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & sizes VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & strides VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageBlit> const ®ions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageBlit> const & regions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureIndirectKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfo, VULKAN_HPP_NAMESPACE::Buffer indirectBuffer, VULKAN_HPP_NAMESPACE::DeviceSize indirectOffset, uint32_t indirectStride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureIndirectKHR( const AccelerationStructureBuildGeometryInfoKHR & info, VULKAN_HPP_NAMESPACE::Buffer indirectBuffer, VULKAN_HPP_NAMESPACE::DeviceSize indirectOffset, uint32_t indirectStride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructuresIndirectKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::DeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const &infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const > const &pOffsetInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructuresIndirectKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceAddress> const & indirectDeviceAddresses, ArrayProxy<const uint32_t> const & indirectStrides, ArrayProxy<const uint32_t* const > const & pMaxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructuresKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void buildAccelerationStructuresKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const > const & pBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearAttachments( ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearAttachment> const &attachments, ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearRect> const &rects, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearAttachments( ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearAttachment> const & attachments, ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearRect> const & rects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const &ranges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const & ranges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const &ranges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const & ranges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferCopy> const ®ions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferCopy> const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const ®ions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageCopy> const ®ions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageCopy> const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const ®ions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerEndEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerEndEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugMarkerEndEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endConditionalRenderingEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endConditionalRenderingEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endDebugUtilsLabelEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endDebugUtilsLabelEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endConditionalRenderingEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endDebugUtilsLabelEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endRenderPass( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &counterBufferOffsets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & counterBufferOffsets VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void executeCommands( ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void executeCommands( ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const &memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const &bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const &imageMemoryBarriers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const & memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const & bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const & imageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> const &values, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> const & values, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const &descriptorWrites, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const & descriptorWrites, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageResolve> const ®ions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageResolve> const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setBlendConstants( const float blendConstants[4], Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setBlendConstants( const float blendConstants[4], Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setBlendConstants( const float blendConstants[4], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setCheckpointNV( const void* pCheckpointMarker, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy<const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV> const &customSampleOrders, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy<const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV> const & customSampleOrders, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDeviceMask( uint32_t deviceMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDeviceMask( uint32_t deviceMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDeviceMask( uint32_t deviceMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDeviceMaskKHR( uint32_t deviceMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &discardRectangles, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & discardRectangles, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &exclusiveScissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & exclusiveScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setFragmentShadingRateEnumNV( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLineWidth( float lineWidth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLineWidth( float lineWidth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setFragmentShadingRateKHR( const VULKAN_HPP_NAMESPACE::Extent2D* pFragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setFragmentShadingRateKHR( const Extent2D & fragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setLineWidth( float lineWidth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setScissor( uint32_t firstScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &scissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setScissorWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &scissors, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setScissor( uint32_t firstScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & scissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setScissorWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & scissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewport( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const &viewports, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewport( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const & viewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV> const &shadingRatePalettes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV> const & shadingRatePalettes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ViewportWScalingNV> const &viewportWScalings, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ViewportWScalingNV> const & viewportWScalings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setViewportWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const &viewports, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setViewportWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const & viewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysIndirectKHR( const StridedBufferRegionKHR & raygenShaderBindingTable, const StridedBufferRegionKHR & missShaderBindingTable, const StridedBufferRegionKHR & hitShaderBindingTable, const StridedBufferRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void traceRaysIndirectKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysKHR( const StridedBufferRegionKHR & raygenShaderBindingTable, const StridedBufferRegionKHR & missShaderBindingTable, const StridedBufferRegionKHR & hitShaderBindingTable, const StridedBufferRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy<const T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy<const T> const & data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void waitEvents( ArrayProxy<const VULKAN_HPP_NAMESPACE::Event> const &events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const &memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const &bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const &imageMemoryBarriers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void waitEvents( ArrayProxy<const VULKAN_HPP_NAMESPACE::Event> const & events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const & memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const & bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const & imageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeAccelerationStructuresPropertiesNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeAccelerationStructuresPropertiesNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureNV> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result end(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result end( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type end(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type end( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandBuffer() const VULKAN_HPP_NOEXCEPT { return m_commandBuffer; @@ -35830,6 +35863,100 @@ namespace VULKAN_HPP_NAMESPACE using Type = MemoryAllocateInfo; }; + class DeferredOperationKHR + { + public: + using CType = VkDeferredOperationKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR DeferredOperationKHR() VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DeferredOperationKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DeferredOperationKHR( VkDeferredOperationKHR deferredOperationKHR ) VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR( deferredOperationKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DeferredOperationKHR & operator=(VkDeferredOperationKHR deferredOperationKHR) VULKAN_HPP_NOEXCEPT + { + m_deferredOperationKHR = deferredOperationKHR; + return *this; + } +#endif + + DeferredOperationKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_deferredOperationKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeferredOperationKHR const& ) const = default; +#else + bool operator==( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR == rhs.m_deferredOperationKHR; + } + + bool operator!=(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR != rhs.m_deferredOperationKHR; + } + + bool operator<(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR < rhs.m_deferredOperationKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeferredOperationKHR() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR == VK_NULL_HANDLE; + } + + private: + VkDeferredOperationKHR m_deferredOperationKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DeferredOperationKHR ) == sizeof( VkDeferredOperationKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type<ObjectType::eDeferredOperationKHR> + { + using type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; + }; + + template <> + struct CppType<VULKAN_HPP_NAMESPACE::ObjectType, VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR> + { + using Type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; + }; + + + + template <> + struct isVulkanHandleType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR> + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PipelineCache { public: @@ -39446,7 +39573,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = QueryPoolCreateInfo; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS struct RayTracingShaderGroupCreateInfoKHR { static const bool allowDuplicate = false; @@ -39573,9 +39699,7 @@ namespace VULKAN_HPP_NAMESPACE { using Type = RayTracingShaderGroupCreateInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct PipelineLibraryCreateInfoKHR { static const bool allowDuplicate = false; @@ -39685,17 +39809,15 @@ namespace VULKAN_HPP_NAMESPACE { using Type = PipelineLibraryCreateInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct RayTracingPipelineInterfaceCreateInfoKHR { static const bool allowDuplicate = false; static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingPipelineInterfaceCreateInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR(uint32_t maxPayloadSize_ = {}, uint32_t maxAttributeSize_ = {}, uint32_t maxCallableSize_ = {}) VULKAN_HPP_NOEXCEPT - : maxPayloadSize( maxPayloadSize_ ), maxAttributeSize( maxAttributeSize_ ), maxCallableSize( maxCallableSize_ ) + VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR(uint32_t maxPipelineRayPayloadSize_ = {}, uint32_t maxPipelineRayHitAttributeSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxPipelineRayPayloadSize( maxPipelineRayPayloadSize_ ), maxPipelineRayHitAttributeSize( maxPipelineRayHitAttributeSize_ ) {} VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR( RayTracingPipelineInterfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -39724,21 +39846,15 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - RayTracingPipelineInterfaceCreateInfoKHR & setMaxPayloadSize( uint32_t maxPayloadSize_ ) VULKAN_HPP_NOEXCEPT + RayTracingPipelineInterfaceCreateInfoKHR & setMaxPipelineRayPayloadSize( uint32_t maxPipelineRayPayloadSize_ ) VULKAN_HPP_NOEXCEPT { - maxPayloadSize = maxPayloadSize_; + maxPipelineRayPayloadSize = maxPipelineRayPayloadSize_; return *this; } - RayTracingPipelineInterfaceCreateInfoKHR & setMaxAttributeSize( uint32_t maxAttributeSize_ ) VULKAN_HPP_NOEXCEPT + RayTracingPipelineInterfaceCreateInfoKHR & setMaxPipelineRayHitAttributeSize( uint32_t maxPipelineRayHitAttributeSize_ ) VULKAN_HPP_NOEXCEPT { - maxAttributeSize = maxAttributeSize_; - return *this; - } - - RayTracingPipelineInterfaceCreateInfoKHR & setMaxCallableSize( uint32_t maxCallableSize_ ) VULKAN_HPP_NOEXCEPT - { - maxCallableSize = maxCallableSize_; + maxPipelineRayHitAttributeSize = maxPipelineRayHitAttributeSize_; return *this; } @@ -39761,9 +39877,8 @@ namespace VULKAN_HPP_NAMESPACE { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) - && ( maxPayloadSize == rhs.maxPayloadSize ) - && ( maxAttributeSize == rhs.maxAttributeSize ) - && ( maxCallableSize == rhs.maxCallableSize ); + && ( maxPipelineRayPayloadSize == rhs.maxPipelineRayPayloadSize ) + && ( maxPipelineRayHitAttributeSize == rhs.maxPipelineRayHitAttributeSize ); } bool operator!=( RayTracingPipelineInterfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT @@ -39777,9 +39892,8 @@ namespace VULKAN_HPP_NAMESPACE public: const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingPipelineInterfaceCreateInfoKHR; const void* pNext = {}; - uint32_t maxPayloadSize = {}; - uint32_t maxAttributeSize = {}; - uint32_t maxCallableSize = {}; + uint32_t maxPipelineRayPayloadSize = {}; + uint32_t maxPipelineRayHitAttributeSize = {}; }; static_assert( sizeof( RayTracingPipelineInterfaceCreateInfoKHR ) == sizeof( VkRayTracingPipelineInterfaceCreateInfoKHR ), "struct and wrapper have different size!" ); @@ -39790,17 +39904,15 @@ namespace VULKAN_HPP_NAMESPACE { using Type = RayTracingPipelineInterfaceCreateInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS struct RayTracingPipelineCreateInfoKHR { static const bool allowDuplicate = false; static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingPipelineCreateInfoKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoKHR(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, uint32_t groupCount_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups_ = {}, uint32_t maxRecursionDepth_ = {}, VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR libraries_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT - : flags( flags_ ), stageCount( stageCount_ ), pStages( pStages_ ), groupCount( groupCount_ ), pGroups( pGroups_ ), maxRecursionDepth( maxRecursionDepth_ ), libraries( libraries_ ), pLibraryInterface( pLibraryInterface_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoKHR(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, uint32_t groupCount_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups_ = {}, uint32_t maxPipelineRayRecursionDepth_ = {}, const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stageCount( stageCount_ ), pStages( pStages_ ), groupCount( groupCount_ ), pGroups( pGroups_ ), maxPipelineRayRecursionDepth( maxPipelineRayRecursionDepth_ ), pLibraryInfo( pLibraryInfo_ ), pLibraryInterface( pLibraryInterface_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) {} VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoKHR( RayTracingPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -39811,8 +39923,8 @@ namespace VULKAN_HPP_NAMESPACE } #if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - RayTracingPipelineCreateInfoKHR( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo> const & stages_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR> const & groups_ = {}, uint32_t maxRecursionDepth_ = {}, VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR libraries_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {} ) - : flags( flags_ ), stageCount( static_cast<uint32_t>( stages_.size() ) ), pStages( stages_.data() ), groupCount( static_cast<uint32_t>( groups_.size() ) ), pGroups( groups_.data() ), maxRecursionDepth( maxRecursionDepth_ ), libraries( libraries_ ), pLibraryInterface( pLibraryInterface_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + RayTracingPipelineCreateInfoKHR( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo> const & stages_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR> const & groups_ = {}, uint32_t maxPipelineRayRecursionDepth_ = {}, const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {} ) + : flags( flags_ ), stageCount( static_cast<uint32_t>( stages_.size() ) ), pStages( stages_.data() ), groupCount( static_cast<uint32_t>( groups_.size() ) ), pGroups( groups_.data() ), maxPipelineRayRecursionDepth( maxPipelineRayRecursionDepth_ ), pLibraryInfo( pLibraryInfo_ ), pLibraryInterface( pLibraryInterface_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) {} #endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) @@ -39883,15 +39995,15 @@ namespace VULKAN_HPP_NAMESPACE } #endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) - RayTracingPipelineCreateInfoKHR & setMaxRecursionDepth( uint32_t maxRecursionDepth_ ) VULKAN_HPP_NOEXCEPT + RayTracingPipelineCreateInfoKHR & setMaxPipelineRayRecursionDepth( uint32_t maxPipelineRayRecursionDepth_ ) VULKAN_HPP_NOEXCEPT { - maxRecursionDepth = maxRecursionDepth_; + maxPipelineRayRecursionDepth = maxPipelineRayRecursionDepth_; return *this; } - RayTracingPipelineCreateInfoKHR & setLibraries( VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR const & libraries_ ) VULKAN_HPP_NOEXCEPT + RayTracingPipelineCreateInfoKHR & setPLibraryInfo( const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ ) VULKAN_HPP_NOEXCEPT { - libraries = libraries_; + pLibraryInfo = pLibraryInfo_; return *this; } @@ -39901,6 +40013,12 @@ namespace VULKAN_HPP_NAMESPACE return *this; } + RayTracingPipelineCreateInfoKHR & setPDynamicState( const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ ) VULKAN_HPP_NOEXCEPT + { + pDynamicState = pDynamicState_; + return *this; + } + RayTracingPipelineCreateInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT { layout = layout_; @@ -39943,9 +40061,10 @@ namespace VULKAN_HPP_NAMESPACE && ( pStages == rhs.pStages ) && ( groupCount == rhs.groupCount ) && ( pGroups == rhs.pGroups ) - && ( maxRecursionDepth == rhs.maxRecursionDepth ) - && ( libraries == rhs.libraries ) + && ( maxPipelineRayRecursionDepth == rhs.maxPipelineRayRecursionDepth ) + && ( pLibraryInfo == rhs.pLibraryInfo ) && ( pLibraryInterface == rhs.pLibraryInterface ) + && ( pDynamicState == rhs.pDynamicState ) && ( layout == rhs.layout ) && ( basePipelineHandle == rhs.basePipelineHandle ) && ( basePipelineIndex == rhs.basePipelineIndex ); @@ -39967,9 +40086,10 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages = {}; uint32_t groupCount = {}; const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups = {}; - uint32_t maxRecursionDepth = {}; - VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR libraries = {}; + uint32_t maxPipelineRayRecursionDepth = {}; + const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo = {}; const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface = {}; + const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState = {}; VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle = {}; int32_t basePipelineIndex = {}; @@ -39983,7 +40103,6 @@ namespace VULKAN_HPP_NAMESPACE { using Type = RayTracingPipelineCreateInfoKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct RayTracingShaderGroupCreateInfoNV { @@ -43528,74 +43647,80 @@ namespace VULKAN_HPP_NAMESPACE } #endif - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<CheckpointDataNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<CheckpointDataNV,Allocator> getCheckpointDataNV(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<CheckpointDataNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, CheckpointDataNV>::value, int>::type = 0> - std::vector<CheckpointDataNV,Allocator> getCheckpointDataNV(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename CheckpointDataNVAllocator = std::allocator<CheckpointDataNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<CheckpointDataNV, CheckpointDataNVAllocator> getCheckpointDataNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename CheckpointDataNVAllocator = std::allocator<CheckpointDataNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CheckpointDataNVAllocator, typename std::enable_if<std::is_same<typename B::value_type, CheckpointDataNV>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<CheckpointDataNV, CheckpointDataNVAllocator> getCheckpointDataNV( CheckpointDataNVAllocator & checkpointDataNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindSparse( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindSparseInfo> const &bindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindSparse( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindSparseInfo> const & bindInfo, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endDebugUtilsLabelEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void endDebugUtilsLabelEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void endDebugUtilsLabelEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result presentKHR( const PresentInfoKHR & presentInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result presentKHR( const PresentInfoKHR & presentInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type submit( ArrayProxy<const VULKAN_HPP_NAMESPACE::SubmitInfo> const &submits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type submit( ArrayProxy<const VULKAN_HPP_NAMESPACE::SubmitInfo> const & submits, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitIdle(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type waitIdle(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueue() const VULKAN_HPP_NOEXCEPT { return m_queue; @@ -47345,7 +47470,8 @@ namespace VULKAN_HPP_NAMESPACE class Device; template <typename Dispatch> class UniqueHandleTraits<AccelerationStructureKHR, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; using UniqueAccelerationStructureKHR = UniqueHandle<AccelerationStructureKHR, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; - using UniqueAccelerationStructureNV = UniqueHandle<AccelerationStructureKHR, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; + template <typename Dispatch> class UniqueHandleTraits<AccelerationStructureNV, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; + using UniqueAccelerationStructureNV = UniqueHandle<AccelerationStructureNV, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; template <typename Dispatch> class UniqueHandleTraits<Buffer, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; using UniqueBuffer = UniqueHandle<Buffer, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; template <typename Dispatch> class UniqueHandleTraits<BufferView, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; @@ -47354,10 +47480,8 @@ namespace VULKAN_HPP_NAMESPACE using UniqueCommandBuffer = UniqueHandle<CommandBuffer, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; template <typename Dispatch> class UniqueHandleTraits<CommandPool, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; using UniqueCommandPool = UniqueHandle<CommandPool, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; -#ifdef VK_ENABLE_BETA_EXTENSIONS template <typename Dispatch> class UniqueHandleTraits<DeferredOperationKHR, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; using UniqueDeferredOperationKHR = UniqueHandle<DeferredOperationKHR, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <typename Dispatch> class UniqueHandleTraits<DescriptorPool, Dispatch> { public: using deleter = ObjectDestroy<Device, Dispatch>; }; using UniqueDescriptorPool = UniqueHandle<DescriptorPool, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; template <typename Dispatch> class UniqueHandleTraits<DescriptorSet, Dispatch> { public: using deleter = PoolFree<Device, DescriptorPool, Dispatch>; }; @@ -47462,2035 +47586,2186 @@ namespace VULKAN_HPP_NAMESPACE } #endif + #ifdef VK_USE_PLATFORM_WIN32_KHR #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<uint32_t> acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<uint32_t> acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<uint32_t> acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL>::type acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<uint32_t> acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL>::type acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL, Dispatch>>::type acquirePerformanceConfigurationINTELUnique( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<CommandBuffer>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<CommandBuffer>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, CommandBuffer>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<CommandBuffer, Dispatch>>> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<CommandBuffer,Dispatch>,Allocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<CommandBuffer, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<CommandBuffer, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<CommandBuffer,Dispatch>,Allocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DescriptorSet>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DescriptorSet>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DescriptorSet>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<DescriptorSet, Dispatch>>> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<DescriptorSet,Dispatch>,Allocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<DescriptorSet, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<DescriptorSet, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<DescriptorSet,Dispatch>,Allocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename CommandBufferAllocator = std::allocator<CommandBuffer>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<CommandBuffer, CommandBufferAllocator>>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename CommandBufferAllocator = std::allocator<CommandBuffer>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CommandBufferAllocator, typename std::enable_if<std::is_same<typename B::value_type, CommandBuffer>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<CommandBuffer, CommandBufferAllocator>>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename CommandBufferAllocator = std::allocator<UniqueHandle<CommandBuffer, Dispatch>>> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename CommandBufferAllocator = std::allocator<UniqueHandle<CommandBuffer, Dispatch>>, typename B = CommandBufferAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<CommandBuffer, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceMemory>::type allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<DeviceMemory,Dispatch>>::type allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename DescriptorSetAllocator = std::allocator<DescriptorSet>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<DescriptorSet, DescriptorSetAllocator>>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DescriptorSetAllocator = std::allocator<DescriptorSet>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DescriptorSetAllocator, typename std::enable_if<std::is_same<typename B::value_type, DescriptorSet>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<DescriptorSet, DescriptorSetAllocator>>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename DescriptorSetAllocator = std::allocator<UniqueHandle<DescriptorSet, Dispatch>>> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename DescriptorSetAllocator = std::allocator<UniqueHandle<DescriptorSet, Dispatch>>, typename B = DescriptorSetAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<DescriptorSet, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindAccelerationStructureMemoryKHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindAccelerationStructureMemoryKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceMemory>::type allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DeviceMemory, Dispatch>>::type allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindAccelerationStructureMemoryNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindAccelerationStructureMemoryNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV> const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindBufferMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const &bindInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type bindImageMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result buildAccelerationStructureKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result buildAccelerationStructureKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const &infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const > const &pOffsetInfos, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Result buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const > const & pBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR>::type createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<AccelerationStructureKHR,Dispatch>>::type createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR>::type createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR, Dispatch>>::type createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureNV>::type createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<AccelerationStructureNV,Dispatch>>::type createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureNV>::type createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::AccelerationStructureNV, Dispatch>>::type createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Buffer>::type createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Buffer,Dispatch>>::type createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Buffer>::type createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Buffer, Dispatch>>::type createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::BufferView>::type createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<BufferView,Dispatch>>::type createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::BufferView>::type createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::BufferView, Dispatch>>::type createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::CommandPool>::type createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<CommandPool,Dispatch>>::type createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::CommandPool>::type createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::CommandPool, Dispatch>>::type createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<Pipeline> createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline,Dispatch>> createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<Pipeline> createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline, Dispatch>> createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR>::type createDeferredOperationKHR( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<DeferredOperationKHR,Dispatch>>::type createDeferredOperationKHRUnique( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR>::type createDeferredOperationKHR( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DeferredOperationKHR, Dispatch>>::type createDeferredOperationKHRUnique( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorPool>::type createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<DescriptorPool,Dispatch>>::type createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorPool>::type createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorPool, Dispatch>>::type createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorSetLayout>::type createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<DescriptorSetLayout,Dispatch>>::type createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorSetLayout>::type createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorSetLayout, Dispatch>>::type createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<DescriptorUpdateTemplate,Dispatch>>::type createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>>::type createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<DescriptorUpdateTemplate,Dispatch>>::type createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>>::type createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Event>::type createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Event,Dispatch>>::type createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Event>::type createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Event, Dispatch>>::type createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Framebuffer>::type createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Framebuffer,Dispatch>>::type createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Framebuffer>::type createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Framebuffer, Dispatch>>::type createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<Pipeline> createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline,Dispatch>> createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<Pipeline> createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline, Dispatch>> createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Image>::type createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Image,Dispatch>>::type createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Image>::type createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Image, Dispatch>>::type createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageView>::type createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<ImageView,Dispatch>>::type createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageView>::type createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ImageView, Dispatch>>::type createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV>::type createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<IndirectCommandsLayoutNV,Dispatch>>::type createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV>::type createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV, Dispatch>>::type createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineCache>::type createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<PipelineCache,Dispatch>>::type createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineCache>::type createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PipelineCache, Dispatch>>::type createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineLayout>::type createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<PipelineLayout,Dispatch>>::type createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineLayout>::type createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PipelineLayout, Dispatch>>::type createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT>::type createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<PrivateDataSlotEXT,Dispatch>>::type createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT>::type createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT, Dispatch>>::type createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::QueryPool>::type createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<QueryPool,Dispatch>>::type createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::QueryPool>::type createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::QueryPool, Dispatch>>::type createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<Pipeline> createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline,Dispatch>> createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<Pipeline> createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline, Dispatch>> createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline,Allocator>> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<Pipeline> createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline,Dispatch>> createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineAllocator = std::allocator<Pipeline>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<Pipeline, PipelineAllocator>> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<Pipeline> createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename PipelineAllocator = std::allocator<UniqueHandle<Pipeline, Dispatch>>, typename B = PipelineAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue<UniqueHandle<Pipeline, Dispatch>> createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Sampler>::type createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Sampler,Dispatch>>::type createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Sampler>::type createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Sampler, Dispatch>>::type createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SamplerYcbcrConversion,Dispatch>>::type createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>>::type createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SamplerYcbcrConversion,Dispatch>>::type createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>>::type createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Semaphore>::type createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Semaphore,Dispatch>>::type createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Semaphore>::type createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Semaphore, Dispatch>>::type createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ShaderModule>::type createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<ShaderModule,Dispatch>>::type createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ShaderModule>::type createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ShaderModule, Dispatch>>::type createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SwapchainKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SwapchainKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SwapchainKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<SwapchainKHR>::type createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<SwapchainKHR, Dispatch>>> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR,Dispatch>,Allocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename Allocator = std::allocator<UniqueHandle<SwapchainKHR, Dispatch>>, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<SwapchainKHR, Dispatch>>::value, int>::type = 0> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR,Dispatch>,Allocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SwapchainKHR,Dispatch>>::type createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename SwapchainKHRAllocator = std::allocator<SwapchainKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<SwapchainKHR, SwapchainKHRAllocator>>::type createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SwapchainKHRAllocator = std::allocator<SwapchainKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SwapchainKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, SwapchainKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<SwapchainKHR, SwapchainKHRAllocator>>::type createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<SwapchainKHR>::type createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename SwapchainKHRAllocator = std::allocator<UniqueHandle<SwapchainKHR, Dispatch>>> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename SwapchainKHRAllocator = std::allocator<UniqueHandle<SwapchainKHR, Dispatch>>, typename B = SwapchainKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<SwapchainKHR, Dispatch>>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SwapchainKHR, Dispatch>>::type createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SwapchainKHR>::type createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SwapchainKHR,Dispatch>>::type createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SwapchainKHR>::type createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SwapchainKHR, Dispatch>>::type createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::ValidationCacheEXT>::type createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<ValidationCacheEXT,Dispatch>>::type createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::ValidationCacheEXT>::type createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ValidationCacheEXT, Dispatch>>::type createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyEvent( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyEvent( VULKAN_HPP_NAMESPACE::Event event VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyImage( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyImage( VULKAN_HPP_NAMESPACE::Image image VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitIdle(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type waitIdle(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type flushMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const & memoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type flushMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const &memoryRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Result freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Result free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR* pSizeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const AccelerationStructureBuildGeometryInfoKHR & buildInfo, ArrayProxy<const uint32_t> const & maxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, ArrayProxy<T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, ArrayProxy<T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, size_t dataSize, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getAccelerationStructureMemoryRequirementsKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoKHR* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getAccelerationStructureMemoryRequirementsKHR( const AccelerationStructureMemoryRequirementsInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getAccelerationStructureMemoryRequirementsKHR( const AccelerationStructureMemoryRequirementsInfoKHR & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - DeviceAddress getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceAddress getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const ×tampInfos, ArrayProxy<uint64_t> const ×tamps, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const ×tampInfos, ArrayProxy<uint64_t> const ×tamps, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Uint64_tAllocator = std::allocator<uint64_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::pair<std::vector<uint64_t, Uint64_tAllocator>, uint64_t>>::type getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const & timestampInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Uint64_tAllocator = std::allocator<uint64_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint64_tAllocator, typename std::enable_if<std::is_same<typename B::value_type, uint64_t>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::pair<std::vector<uint64_t, Uint64_tAllocator>, uint64_t>>::type getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const & timestampInfos, Uint64_tAllocator & uint64_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint32_t getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint32_t getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint32_t getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionKHR* version, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionInfoKHR* pVersionInfo, VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR* pCompatibility, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionKHR & version, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR>::type getGroupPresentCapabilitiesKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR>::type getGroupPresentCapabilitiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::DeviceSize getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceSize getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint64_t getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::Queue getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Queue getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::Queue getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Queue getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT>::type getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT>::type getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageMemoryRequirements>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageMemoryRequirements,Allocator> getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageMemoryRequirements>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements>::value, int>::type = 0> - std::vector<SparseImageMemoryRequirements,Allocator> getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageMemoryRequirementsAllocator = std::allocator<SparseImageMemoryRequirements>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageMemoryRequirementsAllocator = std::allocator<SparseImageMemoryRequirements>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirementsAllocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, SparseImageMemoryRequirementsAllocator & sparseImageMemoryRequirementsAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageMemoryRequirements2,Allocator> getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type = 0> - std::vector<SparseImageMemoryRequirements2,Allocator> getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageMemoryRequirements2Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageMemoryRequirements2Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirements2Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageMemoryRequirements2,Allocator> getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type = 0> - std::vector<SparseImageMemoryRequirements2,Allocator> getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageMemoryRequirements2Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageMemoryRequirements2Allocator = std::allocator<SparseImageMemoryRequirements2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirements2Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::SubresourceLayout getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX>::type getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX>::type getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint32_t getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint32_t getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint32_t getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + uint32_t getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<struct AHardwareBuffer*>::type getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<struct AHardwareBuffer*>::type getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR>::type getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR>::type getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT>::type getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT>::type getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR>::type getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR>::type getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PastPresentationTimingGOOGLE>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PastPresentationTimingGOOGLE>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PastPresentationTimingGOOGLE>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PastPresentationTimingGOOGLEAllocator = std::allocator<PastPresentationTimingGOOGLE>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator>>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PastPresentationTimingGOOGLEAllocator = std::allocator<PastPresentationTimingGOOGLE>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PastPresentationTimingGOOGLEAllocator, typename std::enable_if<std::is_same<typename B::value_type, PastPresentationTimingGOOGLE>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator>>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, PastPresentationTimingGOOGLEAllocator & pastPresentationTimingGOOGLEAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceValueINTEL>::type getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceValueINTEL>::type getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PipelineExecutableInternalRepresentationKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR,Allocator>>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PipelineExecutableInternalRepresentationKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableInternalRepresentationKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR,Allocator>>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PipelineExecutableInternalRepresentationKHRAllocator = std::allocator<PipelineExecutableInternalRepresentationKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator>>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineExecutableInternalRepresentationKHRAllocator = std::allocator<PipelineExecutableInternalRepresentationKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutableInternalRepresentationKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableInternalRepresentationKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator>>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableInternalRepresentationKHRAllocator & pipelineExecutableInternalRepresentationKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PipelineExecutablePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR,Allocator>>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PipelineExecutablePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutablePropertiesKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR,Allocator>>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PipelineExecutablePropertiesKHRAllocator = std::allocator<PipelineExecutablePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator>>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineExecutablePropertiesKHRAllocator = std::allocator<PipelineExecutablePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutablePropertiesKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutablePropertiesKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator>>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, PipelineExecutablePropertiesKHRAllocator & pipelineExecutablePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PipelineExecutableStatisticKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableStatisticKHR,Allocator>>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PipelineExecutableStatisticKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableStatisticKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableStatisticKHR,Allocator>>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PipelineExecutableStatisticKHRAllocator = std::allocator<PipelineExecutableStatisticKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator>>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PipelineExecutableStatisticKHRAllocator = std::allocator<PipelineExecutableStatisticKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutableStatisticKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableStatisticKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator>>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableStatisticKHRAllocator & pipelineExecutableStatisticKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint64_t getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD uint64_t getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD Result getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy<T> const &data, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<std::vector<T,Allocator>> getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD ResultValue<std::vector<T,Allocator>> getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD ResultValue<T> getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD ResultValue<T> getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - -#ifdef VK_ENABLE_BETA_EXTENSIONS template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy<T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - -#ifdef VK_ENABLE_BETA_EXTENSIONS template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy<T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy<T> const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + DeviceSize getRayTracingShaderGroupStackSizeKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE>::type getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE>::type getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::Extent2D getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<int>::type getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<HANDLE>::type getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<uint64_t>::type getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Image>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Image,Allocator>>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Image>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Image>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Image,Allocator>>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename ImageAllocator = std::allocator<Image>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Image, ImageAllocator>>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename ImageAllocator = std::allocator<Image>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ImageAllocator, typename std::enable_if<std::is_same<typename B::value_type, Image>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Image, ImageAllocator>>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, ImageAllocator & imageAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t,Allocator>>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Uint8_tAllocator = std::allocator<uint8_t>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type invalidateMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const &memoryRanges, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type invalidateMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const & memoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void*>::type mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void*>::type mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::PipelineCache> const &srcCaches, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::PipelineCache> const & srcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ValidationCacheEXT> const &srcCaches, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ValidationCacheEXT> const & srcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void releaseProfilingLockKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void releaseProfilingLockKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void releaseProfilingLockKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Result resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type resetFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const &fences, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type resetFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const & fences, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setHdrMetadataEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainKHR> const &swapchains, ArrayProxy<const VULKAN_HPP_NAMESPACE::HdrMetadataEXT> const &metadata, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setHdrMetadataEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainKHR> const & swapchains, ArrayProxy<const VULKAN_HPP_NAMESPACE::HdrMetadataEXT> const & metadata, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void uninitializePerformanceApiINTEL(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void uninitializePerformanceApiINTEL(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void uninitializePerformanceApiINTEL( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void updateDescriptorSets( ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const &descriptorWrites, ArrayProxy<const VULKAN_HPP_NAMESPACE::CopyDescriptorSet> const &descriptorCopies, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void updateDescriptorSets( ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const & descriptorWrites, ArrayProxy<const VULKAN_HPP_NAMESPACE::CopyDescriptorSet> const & descriptorCopies, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitForFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const &fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitForFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const & fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy<T> const &data, size_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy<T> const &data, size_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename T, typename Allocator = std::allocator<T>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<std::vector<T,Allocator>>::type writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename T, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<T>::type writeAccelerationStructuresPropertyKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDevice() const VULKAN_HPP_NOEXCEPT { @@ -51751,6 +52026,83 @@ namespace VULKAN_HPP_NAMESPACE }; using FormatProperties2KHR = FormatProperties2; + struct PhysicalDeviceFragmentShadingRateKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateKHR(VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts_ = {}, VULKAN_HPP_NAMESPACE::Extent2D fragmentSize_ = {}) VULKAN_HPP_NOEXCEPT + : sampleCounts( sampleCounts_ ), fragmentSize( fragmentSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateKHR( PhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateKHR( VkPhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateKHR & operator=( VkPhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateKHR & operator=( PhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateKHR*>( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceFragmentShadingRateKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleCounts == rhs.sampleCounts ) + && ( fragmentSize == rhs.fragmentSize ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts = {}; + VULKAN_HPP_NAMESPACE::Extent2D fragmentSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRateKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceFragmentShadingRateKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceFragmentShadingRateKHR> + { + using Type = PhysicalDeviceFragmentShadingRateKHR; + }; + struct PhysicalDeviceImageFormatInfo2 { static const bool allowDuplicate = false; @@ -53878,572 +54230,636 @@ namespace VULKAN_HPP_NAMESPACE } #endif + #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<void>::type acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Device>::type createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Device,Dispatch>>::type createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Device>::type createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Device, Dispatch>>::type createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayModeKHR>::type createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayModeKHR>::type createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DisplayModeKHR, Dispatch>>::type createDisplayModeKHRUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateDeviceExtensionProperties( Optional<const std::string> layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateDeviceExtensionProperties( Optional<const std::string> layerName, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename ExtensionPropertiesAllocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateDeviceExtensionProperties( Optional<const std::string> layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename ExtensionPropertiesAllocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ExtensionPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateDeviceExtensionProperties( Optional<const std::string> layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateDeviceLayerProperties(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateDeviceLayerProperties(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename LayerPropertiesAllocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateDeviceLayerProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename LayerPropertiesAllocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = LayerPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateDeviceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template <typename Allocator = std::allocator<PerformanceCounterDescriptionKHR>, - typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + template <typename Allocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PerformanceCounterDescriptionKHR,Allocator>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy<VULKAN_HPP_NAMESPACE::PerformanceCounterKHR> const &counters, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template <typename Allocator = std::allocator<PerformanceCounterDescriptionKHR>, - typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, - typename B = Allocator, - typename std::enable_if<std::is_same<typename B::value_type, PerformanceCounterDescriptionKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PerformanceCounterDescriptionKHR,Allocator>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy<VULKAN_HPP_NAMESPACE::PerformanceCounterKHR> const &counters, Allocator const& vectorAllocator, Dispatch const &d ) const; - template <typename PerformanceCounterKHRAllocator = std::allocator<PerformanceCounterKHR>, typename PerformanceCounterDescriptionKHRAllocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + template <typename Allocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PerformanceCounterDescriptionKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PerformanceCounterDescriptionKHR,Allocator>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy<VULKAN_HPP_NAMESPACE::PerformanceCounterKHR> const &counters, Allocator const& vectorAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PerformanceCounterKHRAllocator = std::allocator<PerformanceCounterKHR>, typename PerformanceCounterDescriptionKHRAllocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> VULKAN_HPP_NODISCARD typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template <typename PerformanceCounterKHRAllocator = std::allocator<PerformanceCounterKHR>, typename PerformanceCounterDescriptionKHRAllocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B1 = PerformanceCounterKHRAllocator, typename B2 = PerformanceCounterDescriptionKHRAllocator, typename std::enable_if < std::is_same<typename B1::value_type, PerformanceCounterKHR>::value && std::is_same<typename B2::value_type, PerformanceCounterDescriptionKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & countersAllocator, PerformanceCounterDescriptionKHRAllocator & counterDescriptionsAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PerformanceCounterKHRAllocator = std::allocator<PerformanceCounterKHR>, typename PerformanceCounterDescriptionKHRAllocator = std::allocator<PerformanceCounterDescriptionKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B1 = PerformanceCounterKHRAllocator, typename B2 = PerformanceCounterDescriptionKHRAllocator, typename std::enable_if<std::is_same<typename B1::value_type, PerformanceCounterKHR>::value && std::is_same<typename B2::value_type, PerformanceCounterDescriptionKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & performanceCounterKHRAllocator, PerformanceCounterDescriptionKHRAllocator & performanceCounterDescriptionKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayModeProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModeProperties2KHR,Allocator>>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayModeProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayModeProperties2KHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModeProperties2KHR,Allocator>>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayModeProperties2KHRAllocator = std::allocator<DisplayModeProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator>>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayModeProperties2KHRAllocator = std::allocator<DisplayModeProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayModeProperties2KHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayModeProperties2KHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator>>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModeProperties2KHRAllocator & displayModeProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayModePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayModePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayModePropertiesKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayModePropertiesKHRAllocator = std::allocator<DisplayModePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator>>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayModePropertiesKHRAllocator = std::allocator<DisplayModePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayModePropertiesKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayModePropertiesKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator>>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModePropertiesKHRAllocator & displayModePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR>::type getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR>::type getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR>::type getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR>::type getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayKHRAllocator = std::allocator<DisplayKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayKHR, DisplayKHRAllocator>>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayKHRAllocator = std::allocator<DisplayKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayKHR, DisplayKHRAllocator>>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, DisplayKHRAllocator & displayKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<TimeDomainEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<TimeDomainEXT,Allocator>>::type getCalibrateableTimeDomainsEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<TimeDomainEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, TimeDomainEXT>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<TimeDomainEXT,Allocator>>::type getCalibrateableTimeDomainsEXT(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename TimeDomainEXTAllocator = std::allocator<TimeDomainEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<TimeDomainEXT, TimeDomainEXTAllocator>>::type getCalibrateableTimeDomainsEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename TimeDomainEXTAllocator = std::allocator<TimeDomainEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = TimeDomainEXTAllocator, typename std::enable_if<std::is_same<typename B::value_type, TimeDomainEXT>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<TimeDomainEXT, TimeDomainEXTAllocator>>::type getCalibrateableTimeDomainsEXT( TimeDomainEXTAllocator & timeDomainEXTAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<CooperativeMatrixPropertiesNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV,Allocator>>::type getCooperativeMatrixPropertiesNV(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<CooperativeMatrixPropertiesNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, CooperativeMatrixPropertiesNV>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV,Allocator>>::type getCooperativeMatrixPropertiesNV(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename CooperativeMatrixPropertiesNVAllocator = std::allocator<CooperativeMatrixPropertiesNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator>>::type getCooperativeMatrixPropertiesNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename CooperativeMatrixPropertiesNVAllocator = std::allocator<CooperativeMatrixPropertiesNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CooperativeMatrixPropertiesNVAllocator, typename std::enable_if<std::is_same<typename B::value_type, CooperativeMatrixPropertiesNV>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator>>::type getCooperativeMatrixPropertiesNV( CooperativeMatrixPropertiesNVAllocator & cooperativeMatrixPropertiesNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_DIRECTFB_EXT - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename DisplayPlaneProperties2KHRAllocator = std::allocator<DisplayPlaneProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator>>::type getDisplayPlaneProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayPlaneProperties2KHRAllocator = std::allocator<DisplayPlaneProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPlaneProperties2KHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlaneProperties2KHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator>>::type getDisplayPlaneProperties2KHR( DisplayPlaneProperties2KHRAllocator & displayPlaneProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayPlaneProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlaneProperties2KHR,Allocator>>::type getDisplayPlaneProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayPlaneProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlaneProperties2KHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlaneProperties2KHR,Allocator>>::type getDisplayPlaneProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayPlanePropertiesKHRAllocator = std::allocator<DisplayPlanePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator>>::type getDisplayPlanePropertiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayPlanePropertiesKHRAllocator = std::allocator<DisplayPlanePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPlanePropertiesKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlanePropertiesKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator>>::type getDisplayPlanePropertiesKHR( DisplayPlanePropertiesKHRAllocator & displayPlanePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayPlanePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type getDisplayPlanePropertiesKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayPlanePropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlanePropertiesKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type getDisplayPlanePropertiesKHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayProperties2KHRAllocator = std::allocator<DisplayProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator>>::type getDisplayProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayProperties2KHRAllocator = std::allocator<DisplayProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayProperties2KHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayProperties2KHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator>>::type getDisplayProperties2KHR( DisplayProperties2KHRAllocator & displayProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayProperties2KHR,Allocator>>::type getDisplayProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayProperties2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayProperties2KHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayProperties2KHR,Allocator>>::type getDisplayProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename DisplayPropertiesKHRAllocator = std::allocator<DisplayPropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator>>::type getDisplayPropertiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename DisplayPropertiesKHRAllocator = std::allocator<DisplayPropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPropertiesKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPropertiesKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator>>::type getDisplayPropertiesKHR( DisplayPropertiesKHRAllocator & displayPropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<DisplayPropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type getDisplayPropertiesKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<DisplayPropertiesKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, DisplayPropertiesKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type getDisplayPropertiesKHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV>::type getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV>::type getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures getFeatures( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures getFeatures(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getFeatures2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getFeatures2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getFeatures2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getFeatures2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::FormatProperties getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getFragmentShadingRatesKHR( uint32_t* pFragmentShadingRateCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename PhysicalDeviceFragmentShadingRateKHRAllocator = std::allocator<PhysicalDeviceFragmentShadingRateKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator>>::type getFragmentShadingRatesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PhysicalDeviceFragmentShadingRateKHRAllocator = std::allocator<PhysicalDeviceFragmentShadingRateKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceFragmentShadingRateKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceFragmentShadingRateKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator>>::type getFragmentShadingRatesKHR( PhysicalDeviceFragmentShadingRateKHRAllocator & physicalDeviceFragmentShadingRateKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties>::type getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties>::type getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties getMemoryProperties(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties getMemoryProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getMemoryProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getMemoryProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getMemoryProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getMemoryProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<Rect2D>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Rect2D,Allocator>>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<Rect2D>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, Rect2D>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Rect2D,Allocator>>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename Rect2DAllocator = std::allocator<Rect2D>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Rect2D, Rect2DAllocator>>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Rect2DAllocator = std::allocator<Rect2D>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Rect2DAllocator, typename std::enable_if<std::is_same<typename B::value_type, Rect2D>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<Rect2D, Rect2DAllocator>>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Rect2DAllocator & rect2DAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties getProperties(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties getProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - StructureChain<X, Y, Z...> getProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD StructureChain<X, Y, Z...> getProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - uint32_t getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD uint32_t getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<QueueFamilyProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<QueueFamilyProperties,Allocator> getQueueFamilyProperties(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<QueueFamilyProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties>::value, int>::type = 0> - std::vector<QueueFamilyProperties,Allocator> getQueueFamilyProperties(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename QueueFamilyPropertiesAllocator = std::allocator<QueueFamilyProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> getQueueFamilyProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename QueueFamilyPropertiesAllocator = std::allocator<QueueFamilyProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> getQueueFamilyProperties( QueueFamilyPropertiesAllocator & queueFamilyPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<QueueFamilyProperties2,Allocator> getQueueFamilyProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type = 0> - std::vector<QueueFamilyProperties2,Allocator> getQueueFamilyProperties2(Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename StructureChain, typename Allocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<StructureChain,Allocator> getQueueFamilyProperties2(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename StructureChain, typename Allocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type = 0> - std::vector<StructureChain,Allocator> getQueueFamilyProperties2(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename QueueFamilyProperties2Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> getQueueFamilyProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename QueueFamilyProperties2Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyProperties2Allocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> getQueueFamilyProperties2( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename StructureChain, typename StructureChainAllocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<StructureChain, StructureChainAllocator> getQueueFamilyProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename StructureChain, typename StructureChainAllocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = StructureChainAllocator, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<StructureChain, StructureChainAllocator> getQueueFamilyProperties2( StructureChainAllocator & structureChainAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<QueueFamilyProperties2,Allocator> getQueueFamilyProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type = 0> - std::vector<QueueFamilyProperties2,Allocator> getQueueFamilyProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const; - template<typename StructureChain, typename Allocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<StructureChain,Allocator> getQueueFamilyProperties2KHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename StructureChain, typename Allocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type = 0> - std::vector<StructureChain,Allocator> getQueueFamilyProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename QueueFamilyProperties2Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> getQueueFamilyProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename QueueFamilyProperties2Allocator = std::allocator<QueueFamilyProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyProperties2Allocator, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> getQueueFamilyProperties2KHR( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename StructureChain, typename StructureChainAllocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<StructureChain, StructureChainAllocator> getQueueFamilyProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename StructureChain, typename StructureChainAllocator = std::allocator<StructureChain>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = StructureChainAllocator, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<StructureChain, StructureChainAllocator> getQueueFamilyProperties2KHR( StructureChainAllocator & structureChainAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageFormatProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageFormatProperties,Allocator> getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageFormatProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties>::value, int>::type = 0> - std::vector<SparseImageFormatProperties,Allocator> getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageFormatPropertiesAllocator = std::allocator<SparseImageFormatProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageFormatPropertiesAllocator = std::allocator<SparseImageFormatProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, SparseImageFormatPropertiesAllocator & sparseImageFormatPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageFormatProperties2,Allocator> getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type = 0> - std::vector<SparseImageFormatProperties2,Allocator> getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageFormatProperties2Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageFormatProperties2Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatProperties2Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - std::vector<SparseImageFormatProperties2,Allocator> getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type = 0> - std::vector<SparseImageFormatProperties2,Allocator> getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SparseImageFormatProperties2Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SparseImageFormatProperties2Allocator = std::allocator<SparseImageFormatProperties2>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatProperties2Allocator, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<FramebufferMixedSamplesCombinationNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV,Allocator>>::type getSupportedFramebufferMixedSamplesCombinationsNV(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<FramebufferMixedSamplesCombinationNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, FramebufferMixedSamplesCombinationNV>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV,Allocator>>::type getSupportedFramebufferMixedSamplesCombinationsNV(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename FramebufferMixedSamplesCombinationNVAllocator = std::allocator<FramebufferMixedSamplesCombinationNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator>>::type getSupportedFramebufferMixedSamplesCombinationsNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename FramebufferMixedSamplesCombinationNVAllocator = std::allocator<FramebufferMixedSamplesCombinationNV>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = FramebufferMixedSamplesCombinationNVAllocator, typename std::enable_if<std::is_same<typename B::value_type, FramebufferMixedSamplesCombinationNV>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator>>::type getSupportedFramebufferMixedSamplesCombinationsNV( FramebufferMixedSamplesCombinationNVAllocator & framebufferMixedSamplesCombinationNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT>::type getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT>::type getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename X, typename Y, typename ...Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename X, typename Y, typename... Z, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<StructureChain<X, Y, Z...>>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR>::type getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR>::type getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SurfaceFormat2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SurfaceFormat2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormat2KHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SurfaceFormat2KHRAllocator = std::allocator<SurfaceFormat2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator>>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SurfaceFormat2KHRAllocator = std::allocator<SurfaceFormat2KHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SurfaceFormat2KHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormat2KHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator>>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, SurfaceFormat2KHRAllocator & surfaceFormat2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<SurfaceFormatKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<SurfaceFormatKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormatKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename SurfaceFormatKHRAllocator = std::allocator<SurfaceFormatKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator>>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename SurfaceFormatKHRAllocator = std::allocator<SurfaceFormatKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SurfaceFormatKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormatKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator>>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, SurfaceFormatKHRAllocator & surfaceFormatKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PresentModeKHRAllocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PresentModeKHRAllocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PresentModeKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PresentModeKHRAllocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PresentModeKHRAllocator = std::allocator<PresentModeKHR>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PresentModeKHRAllocator, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Bool32>::type getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Bool32>::type getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PhysicalDeviceToolPropertiesEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT,Allocator>>::type getToolPropertiesEXT(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PhysicalDeviceToolPropertiesEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceToolPropertiesEXT>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT,Allocator>>::type getToolPropertiesEXT(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PhysicalDeviceToolPropertiesEXTAllocator = std::allocator<PhysicalDeviceToolPropertiesEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator>>::type getToolPropertiesEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PhysicalDeviceToolPropertiesEXTAllocator = std::allocator<PhysicalDeviceToolPropertiesEXT>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceToolPropertiesEXTAllocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceToolPropertiesEXT>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator>>::type getToolPropertiesEXT( PhysicalDeviceToolPropertiesEXTAllocator & physicalDeviceToolPropertiesEXTAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WAYLAND_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; -#else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_XCB_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XCB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayKHR>::type getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayKHR>::type getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DisplayKHR, Dispatch>>::type getRandROutputDisplayEXTUnique( Display & dpy, RROutput rrOutput, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + Result releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #else - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<void>::type releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<void>::type releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPhysicalDevice() const VULKAN_HPP_NOEXCEPT { return m_physicalDevice; @@ -55173,6 +55589,93 @@ namespace VULKAN_HPP_NAMESPACE using Type = DeviceMemoryOverallocationCreateInfoAMD; }; + struct DeviceMemoryReportCallbackDataEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceMemoryReportCallbackDataEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceMemoryReportCallbackDataEXT(VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT type_ = VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT::eAllocate, uint64_t memoryObjectId_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::ObjectType objectType_ = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown, uint64_t objectHandle_ = {}, uint32_t heapIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), type( type_ ), memoryObjectId( memoryObjectId_ ), size( size_ ), objectType( objectType_ ), objectHandle( objectHandle_ ), heapIndex( heapIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemoryReportCallbackDataEXT( DeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceMemoryReportCallbackDataEXT( VkDeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceMemoryReportCallbackDataEXT & operator=( VkDeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::DeviceMemoryReportCallbackDataEXT const *>( &rhs ); + return *this; + } + + DeviceMemoryReportCallbackDataEXT & operator=( DeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( DeviceMemoryReportCallbackDataEXT ) ); + return *this; + } + + + operator VkDeviceMemoryReportCallbackDataEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkDeviceMemoryReportCallbackDataEXT*>( this ); + } + + operator VkDeviceMemoryReportCallbackDataEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkDeviceMemoryReportCallbackDataEXT*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceMemoryReportCallbackDataEXT const& ) const = default; +#else + bool operator==( DeviceMemoryReportCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( type == rhs.type ) + && ( memoryObjectId == rhs.memoryObjectId ) + && ( size == rhs.size ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( heapIndex == rhs.heapIndex ); + } + + bool operator!=( DeviceMemoryReportCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceMemoryReportCallbackDataEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT type = VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT::eAllocate; + uint64_t memoryObjectId = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown; + uint64_t objectHandle = {}; + uint32_t heapIndex = {}; + + }; + static_assert( sizeof( DeviceMemoryReportCallbackDataEXT ) == sizeof( VkDeviceMemoryReportCallbackDataEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<DeviceMemoryReportCallbackDataEXT>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eDeviceMemoryReportCallbackDataEXT> + { + using Type = DeviceMemoryReportCallbackDataEXT; + }; + struct DevicePrivateDataCreateInfoEXT { static const bool allowDuplicate = true; @@ -57552,6 +58055,101 @@ namespace VULKAN_HPP_NAMESPACE using Type = FilterCubicImageViewImageFormatPropertiesEXT; }; + struct FragmentShadingRateAttachmentInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFragmentShadingRateAttachmentInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FragmentShadingRateAttachmentInfoKHR(const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment_ = {}, VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize_ = {}) VULKAN_HPP_NOEXCEPT + : pFragmentShadingRateAttachment( pFragmentShadingRateAttachment_ ), shadingRateAttachmentTexelSize( shadingRateAttachmentTexelSize_ ) + {} + + VULKAN_HPP_CONSTEXPR FragmentShadingRateAttachmentInfoKHR( FragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FragmentShadingRateAttachmentInfoKHR( VkFragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FragmentShadingRateAttachmentInfoKHR & operator=( VkFragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::FragmentShadingRateAttachmentInfoKHR const *>( &rhs ); + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & operator=( FragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( FragmentShadingRateAttachmentInfoKHR ) ); + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setPFragmentShadingRateAttachment( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment_ ) VULKAN_HPP_NOEXCEPT + { + pFragmentShadingRateAttachment = pFragmentShadingRateAttachment_; + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setShadingRateAttachmentTexelSize( VULKAN_HPP_NAMESPACE::Extent2D const & shadingRateAttachmentTexelSize_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateAttachmentTexelSize = shadingRateAttachmentTexelSize_; + return *this; + } + + + operator VkFragmentShadingRateAttachmentInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkFragmentShadingRateAttachmentInfoKHR*>( this ); + } + + operator VkFragmentShadingRateAttachmentInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FragmentShadingRateAttachmentInfoKHR const& ) const = default; +#else + bool operator==( FragmentShadingRateAttachmentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pFragmentShadingRateAttachment == rhs.pFragmentShadingRateAttachment ) + && ( shadingRateAttachmentTexelSize == rhs.shadingRateAttachmentTexelSize ); + } + + bool operator!=( FragmentShadingRateAttachmentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFragmentShadingRateAttachmentInfoKHR; + const void* pNext = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment = {}; + VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize = {}; + + }; + static_assert( sizeof( FragmentShadingRateAttachmentInfoKHR ) == sizeof( VkFragmentShadingRateAttachmentInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<FragmentShadingRateAttachmentInfoKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eFragmentShadingRateAttachmentInfoKHR> + { + using Type = FragmentShadingRateAttachmentInfoKHR; + }; + struct FramebufferAttachmentImageInfo { static const bool allowDuplicate = false; @@ -61067,6 +61665,214 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceASTCDecodeFeaturesEXT; }; + struct PhysicalDeviceAccelerationStructureFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructureFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructure( accelerationStructure_ ), accelerationStructureCaptureReplay( accelerationStructureCaptureReplay_ ), accelerationStructureIndirectBuild( accelerationStructureIndirectBuild_ ), accelerationStructureHostCommands( accelerationStructureHostCommands_ ), descriptorBindingAccelerationStructureUpdateAfterBind( descriptorBindingAccelerationStructureUpdateAfterBind_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructureFeaturesKHR( PhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceAccelerationStructureFeaturesKHR( VkPhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceAccelerationStructureFeaturesKHR & operator=( VkPhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceAccelerationStructureFeaturesKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & operator=( PhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceAccelerationStructureFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructure( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructure = accelerationStructure_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCaptureReplay = accelerationStructureCaptureReplay_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureIndirectBuild( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureIndirectBuild = accelerationStructureIndirectBuild_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureHostCommands( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureHostCommands = accelerationStructureHostCommands_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setDescriptorBindingAccelerationStructureUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingAccelerationStructureUpdateAfterBind = descriptorBindingAccelerationStructureUpdateAfterBind_; + return *this; + } + + + operator VkPhysicalDeviceAccelerationStructureFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceAccelerationStructureFeaturesKHR*>( this ); + } + + operator VkPhysicalDeviceAccelerationStructureFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceAccelerationStructureFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceAccelerationStructureFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ) + && ( accelerationStructureCaptureReplay == rhs.accelerationStructureCaptureReplay ) + && ( accelerationStructureIndirectBuild == rhs.accelerationStructureIndirectBuild ) + && ( accelerationStructureHostCommands == rhs.accelerationStructureHostCommands ) + && ( descriptorBindingAccelerationStructureUpdateAfterBind == rhs.descriptorBindingAccelerationStructureUpdateAfterBind ); + } + + bool operator!=( PhysicalDeviceAccelerationStructureFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind = {}; + + }; + static_assert( sizeof( PhysicalDeviceAccelerationStructureFeaturesKHR ) == sizeof( VkPhysicalDeviceAccelerationStructureFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceAccelerationStructureFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR> + { + using Type = PhysicalDeviceAccelerationStructureFeaturesKHR; + }; + + struct PhysicalDeviceAccelerationStructurePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructurePropertiesKHR(uint64_t maxGeometryCount_ = {}, uint64_t maxInstanceCount_ = {}, uint64_t maxPrimitiveCount_ = {}, uint32_t maxPerStageDescriptorAccelerationStructures_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ = {}, uint32_t maxDescriptorSetAccelerationStructures_ = {}, uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures_ = {}, uint32_t minAccelerationStructureScratchOffsetAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : maxGeometryCount( maxGeometryCount_ ), maxInstanceCount( maxInstanceCount_ ), maxPrimitiveCount( maxPrimitiveCount_ ), maxPerStageDescriptorAccelerationStructures( maxPerStageDescriptorAccelerationStructures_ ), maxPerStageDescriptorUpdateAfterBindAccelerationStructures( maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ ), maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ), maxDescriptorSetUpdateAfterBindAccelerationStructures( maxDescriptorSetUpdateAfterBindAccelerationStructures_ ), minAccelerationStructureScratchOffsetAlignment( minAccelerationStructureScratchOffsetAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructurePropertiesKHR( PhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceAccelerationStructurePropertiesKHR( VkPhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceAccelerationStructurePropertiesKHR & operator=( VkPhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceAccelerationStructurePropertiesKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceAccelerationStructurePropertiesKHR & operator=( PhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceAccelerationStructurePropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceAccelerationStructurePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceAccelerationStructurePropertiesKHR*>( this ); + } + + operator VkPhysicalDeviceAccelerationStructurePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceAccelerationStructurePropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceAccelerationStructurePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxGeometryCount == rhs.maxGeometryCount ) + && ( maxInstanceCount == rhs.maxInstanceCount ) + && ( maxPrimitiveCount == rhs.maxPrimitiveCount ) + && ( maxPerStageDescriptorAccelerationStructures == rhs.maxPerStageDescriptorAccelerationStructures ) + && ( maxPerStageDescriptorUpdateAfterBindAccelerationStructures == rhs.maxPerStageDescriptorUpdateAfterBindAccelerationStructures ) + && ( maxDescriptorSetAccelerationStructures == rhs.maxDescriptorSetAccelerationStructures ) + && ( maxDescriptorSetUpdateAfterBindAccelerationStructures == rhs.maxDescriptorSetUpdateAfterBindAccelerationStructures ) + && ( minAccelerationStructureScratchOffsetAlignment == rhs.minAccelerationStructureScratchOffsetAlignment ); + } + + bool operator!=( PhysicalDeviceAccelerationStructurePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR; + void* pNext = {}; + uint64_t maxGeometryCount = {}; + uint64_t maxInstanceCount = {}; + uint64_t maxPrimitiveCount = {}; + uint32_t maxPerStageDescriptorAccelerationStructures = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures = {}; + uint32_t maxDescriptorSetAccelerationStructures = {}; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures = {}; + uint32_t minAccelerationStructureScratchOffsetAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceAccelerationStructurePropertiesKHR ) == sizeof( VkPhysicalDeviceAccelerationStructurePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceAccelerationStructurePropertiesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR> + { + using Type = PhysicalDeviceAccelerationStructurePropertiesKHR; + }; + struct PhysicalDeviceBlendOperationAdvancedFeaturesEXT { static const bool allowDuplicate = false; @@ -63123,6 +63929,93 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceDeviceGeneratedCommandsPropertiesNV; }; + struct PhysicalDeviceDeviceMemoryReportFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceMemoryReportFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport_ = {}) VULKAN_HPP_NOEXCEPT + : deviceMemoryReport( deviceMemoryReport_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceMemoryReportFeaturesEXT( PhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDeviceMemoryReportFeaturesEXT( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & operator=( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceDeviceMemoryReportFeaturesEXT const *>( &rhs ); + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & operator=( PhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceDeviceMemoryReportFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & setDeviceMemoryReport( VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport_ ) VULKAN_HPP_NOEXCEPT + { + deviceMemoryReport = deviceMemoryReport_; + return *this; + } + + + operator VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>( this ); + } + + operator VkPhysicalDeviceDeviceMemoryReportFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMemoryReport == rhs.deviceMemoryReport ); + } + + bool operator!=( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport = {}; + + }; + static_assert( sizeof( PhysicalDeviceDeviceMemoryReportFeaturesEXT ) == sizeof( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceDeviceMemoryReportFeaturesEXT>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT> + { + using Type = PhysicalDeviceDeviceMemoryReportFeaturesEXT; + }; + struct PhysicalDeviceDiagnosticsConfigFeaturesNV { static const bool allowDuplicate = false; @@ -64352,6 +65245,406 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceFragmentShaderInterlockFeaturesEXT; }; + struct PhysicalDeviceFragmentShadingRateEnumsFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums_ = {}, VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates_ = {}, VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentShadingRateEnums( fragmentShadingRateEnums_ ), supersampleFragmentShadingRates( supersampleFragmentShadingRates_ ), noInvocationFragmentShadingRates( noInvocationFragmentShadingRates_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsFeaturesNV( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & operator=( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const *>( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & operator=( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setFragmentShadingRateEnums( VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShadingRateEnums = fragmentShadingRateEnums_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setSupersampleFragmentShadingRates( VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates_ ) VULKAN_HPP_NOEXCEPT + { + supersampleFragmentShadingRates = supersampleFragmentShadingRates_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setNoInvocationFragmentShadingRates( VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates_ ) VULKAN_HPP_NOEXCEPT + { + noInvocationFragmentShadingRates = noInvocationFragmentShadingRates_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentShadingRateEnums == rhs.fragmentShadingRateEnums ) + && ( supersampleFragmentShadingRates == rhs.supersampleFragmentShadingRates ) + && ( noInvocationFragmentShadingRates == rhs.noInvocationFragmentShadingRates ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums = {}; + VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates = {}; + VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV ) == sizeof( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceFragmentShadingRateEnumsFeaturesNV>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV> + { + using Type = PhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + }; + + struct PhysicalDeviceFragmentShadingRateEnumsPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsPropertiesNV(VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1) VULKAN_HPP_NOEXCEPT + : maxFragmentShadingRateInvocationCount( maxFragmentShadingRateInvocationCount_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsPropertiesNV( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & operator=( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const *>( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & operator=( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & setMaxFragmentShadingRateInvocationCount( VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount_ ) VULKAN_HPP_NOEXCEPT + { + maxFragmentShadingRateInvocationCount = maxFragmentShadingRateInvocationCount_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxFragmentShadingRateInvocationCount == rhs.maxFragmentShadingRateInvocationCount ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV ) == sizeof( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceFragmentShadingRateEnumsPropertiesNV>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV> + { + using Type = PhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + }; + + struct PhysicalDeviceFragmentShadingRateFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineFragmentShadingRate( pipelineFragmentShadingRate_ ), primitiveFragmentShadingRate( primitiveFragmentShadingRate_ ), attachmentFragmentShadingRate( attachmentFragmentShadingRate_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateFeaturesKHR( PhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateFeaturesKHR( VkPhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateFeaturesKHR & operator=( VkPhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateFeaturesKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & operator=( PhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPipelineFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + pipelineFragmentShadingRate = pipelineFragmentShadingRate_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPrimitiveFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + primitiveFragmentShadingRate = primitiveFragmentShadingRate_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setAttachmentFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + attachmentFragmentShadingRate = attachmentFragmentShadingRate_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineFragmentShadingRate == rhs.pipelineFragmentShadingRate ) + && ( primitiveFragmentShadingRate == rhs.primitiveFragmentShadingRate ) + && ( attachmentFragmentShadingRate == rhs.attachmentFragmentShadingRate ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate = {}; + VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate = {}; + VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateFeaturesKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRateFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceFragmentShadingRateFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR> + { + using Type = PhysicalDeviceFragmentShadingRateFeaturesKHR; + }; + + struct PhysicalDeviceFragmentShadingRatePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRatePropertiesKHR(VULKAN_HPP_NAMESPACE::Extent2D minFragmentShadingRateAttachmentTexelSize_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxFragmentShadingRateAttachmentTexelSize_ = {}, uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRateWithMultipleViewports_ = {}, VULKAN_HPP_NAMESPACE::Bool32 layeredShadingRateAttachments_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateNonTrivialCombinerOps_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxFragmentSize_ = {}, uint32_t maxFragmentSizeAspectRatio_ = {}, uint32_t maxFragmentShadingRateCoverageSamples_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateRasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderDepthStencilWrites_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithSampleMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderSampleMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithConservativeRasterization_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithFragmentShaderInterlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithCustomSampleLocations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateStrictMultiplyCombiner_ = {}) VULKAN_HPP_NOEXCEPT + : minFragmentShadingRateAttachmentTexelSize( minFragmentShadingRateAttachmentTexelSize_ ), maxFragmentShadingRateAttachmentTexelSize( maxFragmentShadingRateAttachmentTexelSize_ ), maxFragmentShadingRateAttachmentTexelSizeAspectRatio( maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ ), primitiveFragmentShadingRateWithMultipleViewports( primitiveFragmentShadingRateWithMultipleViewports_ ), layeredShadingRateAttachments( layeredShadingRateAttachments_ ), fragmentShadingRateNonTrivialCombinerOps( fragmentShadingRateNonTrivialCombinerOps_ ), maxFragmentSize( maxFragmentSize_ ), maxFragmentSizeAspectRatio( maxFragmentSizeAspectRatio_ ), maxFragmentShadingRateCoverageSamples( maxFragmentShadingRateCoverageSamples_ ), maxFragmentShadingRateRasterizationSamples( maxFragmentShadingRateRasterizationSamples_ ), fragmentShadingRateWithShaderDepthStencilWrites( fragmentShadingRateWithShaderDepthStencilWrites_ ), fragmentShadingRateWithSampleMask( fragmentShadingRateWithSampleMask_ ), fragmentShadingRateWithShaderSampleMask( fragmentShadingRateWithShaderSampleMask_ ), fragmentShadingRateWithConservativeRasterization( fragmentShadingRateWithConservativeRasterization_ ), fragmentShadingRateWithFragmentShaderInterlock( fragmentShadingRateWithFragmentShaderInterlock_ ), fragmentShadingRateWithCustomSampleLocations( fragmentShadingRateWithCustomSampleLocations_ ), fragmentShadingRateStrictMultiplyCombiner( fragmentShadingRateStrictMultiplyCombiner_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRatePropertiesKHR( PhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRatePropertiesKHR( VkPhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRatePropertiesKHR & operator=( VkPhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRatePropertiesKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRatePropertiesKHR & operator=( PhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRatePropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRatePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>( this ); + } + + operator VkPhysicalDeviceFragmentShadingRatePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRatePropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRatePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minFragmentShadingRateAttachmentTexelSize == rhs.minFragmentShadingRateAttachmentTexelSize ) + && ( maxFragmentShadingRateAttachmentTexelSize == rhs.maxFragmentShadingRateAttachmentTexelSize ) + && ( maxFragmentShadingRateAttachmentTexelSizeAspectRatio == rhs.maxFragmentShadingRateAttachmentTexelSizeAspectRatio ) + && ( primitiveFragmentShadingRateWithMultipleViewports == rhs.primitiveFragmentShadingRateWithMultipleViewports ) + && ( layeredShadingRateAttachments == rhs.layeredShadingRateAttachments ) + && ( fragmentShadingRateNonTrivialCombinerOps == rhs.fragmentShadingRateNonTrivialCombinerOps ) + && ( maxFragmentSize == rhs.maxFragmentSize ) + && ( maxFragmentSizeAspectRatio == rhs.maxFragmentSizeAspectRatio ) + && ( maxFragmentShadingRateCoverageSamples == rhs.maxFragmentShadingRateCoverageSamples ) + && ( maxFragmentShadingRateRasterizationSamples == rhs.maxFragmentShadingRateRasterizationSamples ) + && ( fragmentShadingRateWithShaderDepthStencilWrites == rhs.fragmentShadingRateWithShaderDepthStencilWrites ) + && ( fragmentShadingRateWithSampleMask == rhs.fragmentShadingRateWithSampleMask ) + && ( fragmentShadingRateWithShaderSampleMask == rhs.fragmentShadingRateWithShaderSampleMask ) + && ( fragmentShadingRateWithConservativeRasterization == rhs.fragmentShadingRateWithConservativeRasterization ) + && ( fragmentShadingRateWithFragmentShaderInterlock == rhs.fragmentShadingRateWithFragmentShaderInterlock ) + && ( fragmentShadingRateWithCustomSampleLocations == rhs.fragmentShadingRateWithCustomSampleLocations ) + && ( fragmentShadingRateStrictMultiplyCombiner == rhs.fragmentShadingRateStrictMultiplyCombiner ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRatePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D minFragmentShadingRateAttachmentTexelSize = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxFragmentShadingRateAttachmentTexelSize = {}; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio = {}; + VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRateWithMultipleViewports = {}; + VULKAN_HPP_NAMESPACE::Bool32 layeredShadingRateAttachments = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateNonTrivialCombinerOps = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxFragmentSize = {}; + uint32_t maxFragmentSizeAspectRatio = {}; + uint32_t maxFragmentShadingRateCoverageSamples = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateRasterizationSamples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderDepthStencilWrites = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithSampleMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderSampleMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithConservativeRasterization = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithFragmentShaderInterlock = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithCustomSampleLocations = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateStrictMultiplyCombiner = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRatePropertiesKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRatePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceFragmentShadingRatePropertiesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR> + { + using Type = PhysicalDeviceFragmentShadingRatePropertiesKHR; + }; + struct PhysicalDeviceGroupProperties { static const bool allowDuplicate = false; @@ -67267,128 +68560,186 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDevicePushDescriptorPropertiesKHR; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct PhysicalDeviceRayTracingFeaturesKHR + struct PhysicalDeviceRayQueryFeaturesKHR { static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayQueryFeaturesKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 rayTracing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplayMixed_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingAccelerationStructureCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectTraceRays_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectAccelerationStructureBuild_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingHostAccelerationStructureCommands_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPrimitiveCulling_ = {}) VULKAN_HPP_NOEXCEPT - : rayTracing( rayTracing_ ), rayTracingShaderGroupHandleCaptureReplay( rayTracingShaderGroupHandleCaptureReplay_ ), rayTracingShaderGroupHandleCaptureReplayMixed( rayTracingShaderGroupHandleCaptureReplayMixed_ ), rayTracingAccelerationStructureCaptureReplay( rayTracingAccelerationStructureCaptureReplay_ ), rayTracingIndirectTraceRays( rayTracingIndirectTraceRays_ ), rayTracingIndirectAccelerationStructureBuild( rayTracingIndirectAccelerationStructureBuild_ ), rayTracingHostAccelerationStructureCommands( rayTracingHostAccelerationStructureCommands_ ), rayQuery( rayQuery_ ), rayTracingPrimitiveCulling( rayTracingPrimitiveCulling_ ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayQueryFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ = {}) VULKAN_HPP_NOEXCEPT + : rayQuery( rayQuery_ ) {} - VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingFeaturesKHR( PhysicalDeviceRayTracingFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayQueryFeaturesKHR( PhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceRayTracingFeaturesKHR( VkPhysicalDeviceRayTracingFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayQueryFeaturesKHR( VkPhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - PhysicalDeviceRayTracingFeaturesKHR & operator=( VkPhysicalDeviceRayTracingFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayQueryFeaturesKHR & operator=( VkPhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingFeaturesKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceRayQueryFeaturesKHR const *>( &rhs ); return *this; } - PhysicalDeviceRayTracingFeaturesKHR & operator=( PhysicalDeviceRayTracingFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayQueryFeaturesKHR & operator=( PhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceRayTracingFeaturesKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceRayQueryFeaturesKHR ) ); return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayQueryFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracing( VULKAN_HPP_NAMESPACE::Bool32 rayTracing_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayQueryFeaturesKHR & setRayQuery( VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ ) VULKAN_HPP_NOEXCEPT { - rayTracing = rayTracing_; + rayQuery = rayQuery_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingShaderGroupHandleCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + + operator VkPhysicalDeviceRayQueryFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceRayQueryFeaturesKHR*>( this ); + } + + operator VkPhysicalDeviceRayQueryFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRayQueryFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceRayQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rayQuery == rhs.rayQuery ); + } + + bool operator!=( PhysicalDeviceRayQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayQueryFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayQuery = {}; + + }; + static_assert( sizeof( PhysicalDeviceRayQueryFeaturesKHR ) == sizeof( VkPhysicalDeviceRayQueryFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceRayQueryFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceRayQueryFeaturesKHR> + { + using Type = PhysicalDeviceRayQueryFeaturesKHR; + }; + + struct PhysicalDeviceRayTracingPipelineFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelineFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling_ = {}) VULKAN_HPP_NOEXCEPT + : rayTracingPipeline( rayTracingPipeline_ ), rayTracingPipelineShaderGroupHandleCaptureReplay( rayTracingPipelineShaderGroupHandleCaptureReplay_ ), rayTracingPipelineShaderGroupHandleCaptureReplayMixed( rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ ), rayTracingPipelineTraceRaysIndirect( rayTracingPipelineTraceRaysIndirect_ ), rayTraversalPrimitiveCulling( rayTraversalPrimitiveCulling_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelineFeaturesKHR( PhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRayTracingPipelineFeaturesKHR( VkPhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRayTracingPipelineFeaturesKHR & operator=( VkPhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - rayTracingShaderGroupHandleCaptureReplay = rayTracingShaderGroupHandleCaptureReplay_; + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingPipelineFeaturesKHR const *>( &rhs ); return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingShaderGroupHandleCaptureReplayMixed( VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplayMixed_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & operator=( PhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - rayTracingShaderGroupHandleCaptureReplayMixed = rayTracingShaderGroupHandleCaptureReplayMixed_; + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceRayTracingPipelineFeaturesKHR ) ); return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingAccelerationStructureCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 rayTracingAccelerationStructureCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT { - rayTracingAccelerationStructureCaptureReplay = rayTracingAccelerationStructureCaptureReplay_; + pNext = pNext_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingIndirectTraceRays( VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectTraceRays_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipeline( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline_ ) VULKAN_HPP_NOEXCEPT { - rayTracingIndirectTraceRays = rayTracingIndirectTraceRays_; + rayTracingPipeline = rayTracingPipeline_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingIndirectAccelerationStructureBuild( VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectAccelerationStructureBuild_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineShaderGroupHandleCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay_ ) VULKAN_HPP_NOEXCEPT { - rayTracingIndirectAccelerationStructureBuild = rayTracingIndirectAccelerationStructureBuild_; + rayTracingPipelineShaderGroupHandleCaptureReplay = rayTracingPipelineShaderGroupHandleCaptureReplay_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingHostAccelerationStructureCommands( VULKAN_HPP_NAMESPACE::Bool32 rayTracingHostAccelerationStructureCommands_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineShaderGroupHandleCaptureReplayMixed( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ ) VULKAN_HPP_NOEXCEPT { - rayTracingHostAccelerationStructureCommands = rayTracingHostAccelerationStructureCommands_; + rayTracingPipelineShaderGroupHandleCaptureReplayMixed = rayTracingPipelineShaderGroupHandleCaptureReplayMixed_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayQuery( VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineTraceRaysIndirect( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect_ ) VULKAN_HPP_NOEXCEPT { - rayQuery = rayQuery_; + rayTracingPipelineTraceRaysIndirect = rayTracingPipelineTraceRaysIndirect_; return *this; } - PhysicalDeviceRayTracingFeaturesKHR & setRayTracingPrimitiveCulling( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPrimitiveCulling_ ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTraversalPrimitiveCulling( VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling_ ) VULKAN_HPP_NOEXCEPT { - rayTracingPrimitiveCulling = rayTracingPrimitiveCulling_; + rayTraversalPrimitiveCulling = rayTraversalPrimitiveCulling_; return *this; } - operator VkPhysicalDeviceRayTracingFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceRayTracingPipelineFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkPhysicalDeviceRayTracingFeaturesKHR*>( this ); + return *reinterpret_cast<const VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>( this ); } - operator VkPhysicalDeviceRayTracingFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceRayTracingPipelineFeaturesKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkPhysicalDeviceRayTracingFeaturesKHR*>( this ); + return *reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( PhysicalDeviceRayTracingFeaturesKHR const& ) const = default; + auto operator<=>( PhysicalDeviceRayTracingPipelineFeaturesKHR const& ) const = default; #else - bool operator==( PhysicalDeviceRayTracingFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceRayTracingPipelineFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) - && ( rayTracing == rhs.rayTracing ) - && ( rayTracingShaderGroupHandleCaptureReplay == rhs.rayTracingShaderGroupHandleCaptureReplay ) - && ( rayTracingShaderGroupHandleCaptureReplayMixed == rhs.rayTracingShaderGroupHandleCaptureReplayMixed ) - && ( rayTracingAccelerationStructureCaptureReplay == rhs.rayTracingAccelerationStructureCaptureReplay ) - && ( rayTracingIndirectTraceRays == rhs.rayTracingIndirectTraceRays ) - && ( rayTracingIndirectAccelerationStructureBuild == rhs.rayTracingIndirectAccelerationStructureBuild ) - && ( rayTracingHostAccelerationStructureCommands == rhs.rayTracingHostAccelerationStructureCommands ) - && ( rayQuery == rhs.rayQuery ) - && ( rayTracingPrimitiveCulling == rhs.rayTracingPrimitiveCulling ); + && ( rayTracingPipeline == rhs.rayTracingPipeline ) + && ( rayTracingPipelineShaderGroupHandleCaptureReplay == rhs.rayTracingPipelineShaderGroupHandleCaptureReplay ) + && ( rayTracingPipelineShaderGroupHandleCaptureReplayMixed == rhs.rayTracingPipelineShaderGroupHandleCaptureReplayMixed ) + && ( rayTracingPipelineTraceRaysIndirect == rhs.rayTracingPipelineTraceRaysIndirect ) + && ( rayTraversalPrimitiveCulling == rhs.rayTraversalPrimitiveCulling ); } - bool operator!=( PhysicalDeviceRayTracingFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceRayTracingPipelineFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -67397,91 +68748,84 @@ namespace VULKAN_HPP_NAMESPACE public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingFeaturesKHR; + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR; void* pNext = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracing = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplay = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingShaderGroupHandleCaptureReplayMixed = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingAccelerationStructureCaptureReplay = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectTraceRays = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingIndirectAccelerationStructureBuild = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingHostAccelerationStructureCommands = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayQuery = {}; - VULKAN_HPP_NAMESPACE::Bool32 rayTracingPrimitiveCulling = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling = {}; }; - static_assert( sizeof( PhysicalDeviceRayTracingFeaturesKHR ) == sizeof( VkPhysicalDeviceRayTracingFeaturesKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<PhysicalDeviceRayTracingFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); + static_assert( sizeof( PhysicalDeviceRayTracingPipelineFeaturesKHR ) == sizeof( VkPhysicalDeviceRayTracingPipelineFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceRayTracingPipelineFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); template <> - struct CppType<StructureType, StructureType::ePhysicalDeviceRayTracingFeaturesKHR> + struct CppType<StructureType, StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR> { - using Type = PhysicalDeviceRayTracingFeaturesKHR; + using Type = PhysicalDeviceRayTracingPipelineFeaturesKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - struct PhysicalDeviceRayTracingPropertiesKHR + struct PhysicalDeviceRayTracingPipelinePropertiesKHR { static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPropertiesKHR(uint32_t shaderGroupHandleSize_ = {}, uint32_t maxRecursionDepth_ = {}, uint32_t maxShaderGroupStride_ = {}, uint32_t shaderGroupBaseAlignment_ = {}, uint64_t maxGeometryCount_ = {}, uint64_t maxInstanceCount_ = {}, uint64_t maxPrimitiveCount_ = {}, uint32_t maxDescriptorSetAccelerationStructures_ = {}, uint32_t shaderGroupHandleCaptureReplaySize_ = {}) VULKAN_HPP_NOEXCEPT - : shaderGroupHandleSize( shaderGroupHandleSize_ ), maxRecursionDepth( maxRecursionDepth_ ), maxShaderGroupStride( maxShaderGroupStride_ ), shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ), maxGeometryCount( maxGeometryCount_ ), maxInstanceCount( maxInstanceCount_ ), maxPrimitiveCount( maxPrimitiveCount_ ), maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ), shaderGroupHandleCaptureReplaySize( shaderGroupHandleCaptureReplaySize_ ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelinePropertiesKHR(uint32_t shaderGroupHandleSize_ = {}, uint32_t maxRayRecursionDepth_ = {}, uint32_t maxShaderGroupStride_ = {}, uint32_t shaderGroupBaseAlignment_ = {}, uint32_t shaderGroupHandleCaptureReplaySize_ = {}, uint32_t maxRayDispatchInvocationCount_ = {}, uint32_t shaderGroupHandleAlignment_ = {}, uint32_t maxRayHitAttributeSize_ = {}) VULKAN_HPP_NOEXCEPT + : shaderGroupHandleSize( shaderGroupHandleSize_ ), maxRayRecursionDepth( maxRayRecursionDepth_ ), maxShaderGroupStride( maxShaderGroupStride_ ), shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ), shaderGroupHandleCaptureReplaySize( shaderGroupHandleCaptureReplaySize_ ), maxRayDispatchInvocationCount( maxRayDispatchInvocationCount_ ), shaderGroupHandleAlignment( shaderGroupHandleAlignment_ ), maxRayHitAttributeSize( maxRayHitAttributeSize_ ) {} - VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPropertiesKHR( PhysicalDeviceRayTracingPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelinePropertiesKHR( PhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceRayTracingPropertiesKHR( VkPhysicalDeviceRayTracingPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelinePropertiesKHR( VkPhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { *this = rhs; } #endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - PhysicalDeviceRayTracingPropertiesKHR & operator=( VkPhysicalDeviceRayTracingPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelinePropertiesKHR & operator=( VkPhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingPropertiesKHR const *>( &rhs ); + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingPipelinePropertiesKHR const *>( &rhs ); return *this; } - PhysicalDeviceRayTracingPropertiesKHR & operator=( PhysicalDeviceRayTracingPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceRayTracingPipelinePropertiesKHR & operator=( PhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceRayTracingPropertiesKHR ) ); + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceRayTracingPipelinePropertiesKHR ) ); return *this; } - operator VkPhysicalDeviceRayTracingPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceRayTracingPipelinePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<const VkPhysicalDeviceRayTracingPropertiesKHR*>( this ); + return *reinterpret_cast<const VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>( this ); } - operator VkPhysicalDeviceRayTracingPropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceRayTracingPipelinePropertiesKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesKHR*>( this ); + return *reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>( this ); } #if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) - auto operator<=>( PhysicalDeviceRayTracingPropertiesKHR const& ) const = default; + auto operator<=>( PhysicalDeviceRayTracingPipelinePropertiesKHR const& ) const = default; #else - bool operator==( PhysicalDeviceRayTracingPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceRayTracingPipelinePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( shaderGroupHandleSize == rhs.shaderGroupHandleSize ) - && ( maxRecursionDepth == rhs.maxRecursionDepth ) + && ( maxRayRecursionDepth == rhs.maxRayRecursionDepth ) && ( maxShaderGroupStride == rhs.maxShaderGroupStride ) && ( shaderGroupBaseAlignment == rhs.shaderGroupBaseAlignment ) - && ( maxGeometryCount == rhs.maxGeometryCount ) - && ( maxInstanceCount == rhs.maxInstanceCount ) - && ( maxPrimitiveCount == rhs.maxPrimitiveCount ) - && ( maxDescriptorSetAccelerationStructures == rhs.maxDescriptorSetAccelerationStructures ) - && ( shaderGroupHandleCaptureReplaySize == rhs.shaderGroupHandleCaptureReplaySize ); + && ( shaderGroupHandleCaptureReplaySize == rhs.shaderGroupHandleCaptureReplaySize ) + && ( maxRayDispatchInvocationCount == rhs.maxRayDispatchInvocationCount ) + && ( shaderGroupHandleAlignment == rhs.shaderGroupHandleAlignment ) + && ( maxRayHitAttributeSize == rhs.maxRayHitAttributeSize ); } - bool operator!=( PhysicalDeviceRayTracingPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceRayTracingPipelinePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -67490,28 +68834,26 @@ namespace VULKAN_HPP_NAMESPACE public: - const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPropertiesKHR; + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR; void* pNext = {}; uint32_t shaderGroupHandleSize = {}; - uint32_t maxRecursionDepth = {}; + uint32_t maxRayRecursionDepth = {}; uint32_t maxShaderGroupStride = {}; uint32_t shaderGroupBaseAlignment = {}; - uint64_t maxGeometryCount = {}; - uint64_t maxInstanceCount = {}; - uint64_t maxPrimitiveCount = {}; - uint32_t maxDescriptorSetAccelerationStructures = {}; uint32_t shaderGroupHandleCaptureReplaySize = {}; + uint32_t maxRayDispatchInvocationCount = {}; + uint32_t shaderGroupHandleAlignment = {}; + uint32_t maxRayHitAttributeSize = {}; }; - static_assert( sizeof( PhysicalDeviceRayTracingPropertiesKHR ) == sizeof( VkPhysicalDeviceRayTracingPropertiesKHR ), "struct and wrapper have different size!" ); - static_assert( std::is_standard_layout<PhysicalDeviceRayTracingPropertiesKHR>::value, "struct wrapper is not a standard layout!" ); + static_assert( sizeof( PhysicalDeviceRayTracingPipelinePropertiesKHR ) == sizeof( VkPhysicalDeviceRayTracingPipelinePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceRayTracingPipelinePropertiesKHR>::value, "struct wrapper is not a standard layout!" ); template <> - struct CppType<StructureType, StructureType::ePhysicalDeviceRayTracingPropertiesKHR> + struct CppType<StructureType, StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR> { - using Type = PhysicalDeviceRayTracingPropertiesKHR; + using Type = PhysicalDeviceRayTracingPipelinePropertiesKHR; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct PhysicalDeviceRayTracingPropertiesNV { @@ -69110,6 +70452,101 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; using PhysicalDeviceShaderFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; + struct PhysicalDeviceShaderImageAtomicInt64FeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageAtomicInt64FeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics_ = {}) VULKAN_HPP_NOEXCEPT + : shaderImageInt64Atomics( shaderImageInt64Atomics_ ), sparseImageInt64Atomics( sparseImageInt64Atomics_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageAtomicInt64FeaturesEXT( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & operator=( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const *>( &rhs ); + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & operator=( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT ) ); + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setShaderImageInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderImageInt64Atomics = shaderImageInt64Atomics_; + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setSparseImageInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + sparseImageInt64Atomics = sparseImageInt64Atomics_; + return *this; + } + + + operator VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>( this ); + } + + operator VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderImageInt64Atomics == rhs.shaderImageInt64Atomics ) + && ( sparseImageInt64Atomics == rhs.sparseImageInt64Atomics ); + } + + bool operator!=( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT ) == sizeof( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceShaderImageAtomicInt64FeaturesEXT>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT> + { + using Type = PhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + }; + struct PhysicalDeviceShaderImageFootprintFeaturesNV { static const bool allowDuplicate = false; @@ -69536,6 +70973,93 @@ namespace VULKAN_HPP_NAMESPACE }; using PhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR = PhysicalDeviceShaderSubgroupExtendedTypesFeatures; + struct PhysicalDeviceShaderTerminateInvocationFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderTerminateInvocationFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation_ = {}) VULKAN_HPP_NOEXCEPT + : shaderTerminateInvocation( shaderTerminateInvocation_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderTerminateInvocationFeaturesKHR( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & operator=( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderTerminateInvocationFeaturesKHR const *>( &rhs ); + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & operator=( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PhysicalDeviceShaderTerminateInvocationFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & setShaderTerminateInvocation( VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation_ ) VULKAN_HPP_NOEXCEPT + { + shaderTerminateInvocation = shaderTerminateInvocation_; + return *this; + } + + + operator VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>( this ); + } + + operator VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderTerminateInvocation == rhs.shaderTerminateInvocation ); + } + + bool operator!=( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderTerminateInvocationFeaturesKHR ) == sizeof( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PhysicalDeviceShaderTerminateInvocationFeaturesKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR> + { + using Type = PhysicalDeviceShaderTerminateInvocationFeaturesKHR; + }; + struct PhysicalDeviceShadingRateImageFeaturesNV { static const bool allowDuplicate = false; @@ -72863,6 +74387,204 @@ namespace VULKAN_HPP_NAMESPACE using Type = PipelineDiscardRectangleStateCreateInfoEXT; }; + struct PipelineFragmentShadingRateEnumStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateEnumStateCreateInfoNV(VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType_ = VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV::eFragmentSize, VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate_ = VULKAN_HPP_NAMESPACE::FragmentShadingRateNV::e1InvocationPerPixel, std::array<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR,2> const& combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }) VULKAN_HPP_NOEXCEPT + : shadingRateType( shadingRateType_ ), shadingRate( shadingRate_ ), combinerOps( combinerOps_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateEnumStateCreateInfoNV( PipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineFragmentShadingRateEnumStateCreateInfoNV( VkPipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineFragmentShadingRateEnumStateCreateInfoNV & operator=( VkPipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PipelineFragmentShadingRateEnumStateCreateInfoNV const *>( &rhs ); + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & operator=( PipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PipelineFragmentShadingRateEnumStateCreateInfoNV ) ); + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setShadingRateType( VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateType = shadingRateType_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setShadingRate( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate_ ) VULKAN_HPP_NOEXCEPT + { + shadingRate = shadingRate_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setCombinerOps( std::array<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR,2> combinerOps_ ) VULKAN_HPP_NOEXCEPT + { + combinerOps = combinerOps_; + return *this; + } + + + operator VkPipelineFragmentShadingRateEnumStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>( this ); + } + + operator VkPipelineFragmentShadingRateEnumStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineFragmentShadingRateEnumStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineFragmentShadingRateEnumStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateType == rhs.shadingRateType ) + && ( shadingRate == rhs.shadingRate ) + && ( combinerOps == rhs.combinerOps ); + } + + bool operator!=( PipelineFragmentShadingRateEnumStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType = VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV::eFragmentSize; + VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate = VULKAN_HPP_NAMESPACE::FragmentShadingRateNV::e1InvocationPerPixel; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR, 2> combinerOps = {}; + + }; + static_assert( sizeof( PipelineFragmentShadingRateEnumStateCreateInfoNV ) == sizeof( VkPipelineFragmentShadingRateEnumStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PipelineFragmentShadingRateEnumStateCreateInfoNV>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV> + { + using Type = PipelineFragmentShadingRateEnumStateCreateInfoNV; + }; + + struct PipelineFragmentShadingRateStateCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateStateCreateInfoKHR(VULKAN_HPP_NAMESPACE::Extent2D fragmentSize_ = {}, std::array<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR,2> const& combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }) VULKAN_HPP_NOEXCEPT + : fragmentSize( fragmentSize_ ), combinerOps( combinerOps_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateStateCreateInfoKHR( PipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineFragmentShadingRateStateCreateInfoKHR( VkPipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineFragmentShadingRateStateCreateInfoKHR & operator=( VkPipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::PipelineFragmentShadingRateStateCreateInfoKHR const *>( &rhs ); + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & operator=( PipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( PipelineFragmentShadingRateStateCreateInfoKHR ) ); + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setFragmentSize( VULKAN_HPP_NAMESPACE::Extent2D const & fragmentSize_ ) VULKAN_HPP_NOEXCEPT + { + fragmentSize = fragmentSize_; + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setCombinerOps( std::array<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR,2> combinerOps_ ) VULKAN_HPP_NOEXCEPT + { + combinerOps = combinerOps_; + return *this; + } + + + operator VkPipelineFragmentShadingRateStateCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkPipelineFragmentShadingRateStateCreateInfoKHR*>( this ); + } + + operator VkPipelineFragmentShadingRateStateCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineFragmentShadingRateStateCreateInfoKHR const& ) const = default; +#else + bool operator==( PipelineFragmentShadingRateStateCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentSize == rhs.fragmentSize ) + && ( combinerOps == rhs.combinerOps ); + } + + bool operator!=( PipelineFragmentShadingRateStateCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D fragmentSize = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D<VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR, 2> combinerOps = {}; + + }; + static_assert( sizeof( PipelineFragmentShadingRateStateCreateInfoKHR ) == sizeof( VkPipelineFragmentShadingRateStateCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<PipelineFragmentShadingRateStateCreateInfoKHR>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR> + { + using Type = PipelineFragmentShadingRateStateCreateInfoKHR; + }; + struct PipelineRasterizationConservativeStateCreateInfoEXT { static const bool allowDuplicate = false; @@ -77901,7 +79623,6 @@ namespace VULKAN_HPP_NAMESPACE }; using TimelineSemaphoreSubmitInfoKHR = TimelineSemaphoreSubmitInfo; -#ifdef VK_ENABLE_BETA_EXTENSIONS struct TraceRaysIndirectCommandKHR { @@ -77993,7 +79714,6 @@ namespace VULKAN_HPP_NAMESPACE }; static_assert( sizeof( TraceRaysIndirectCommandKHR ) == sizeof( VkTraceRaysIndirectCommandKHR ), "struct and wrapper have different size!" ); static_assert( std::is_standard_layout<TraceRaysIndirectCommandKHR>::value, "struct wrapper is not a standard layout!" ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ struct ValidationFeaturesEXT { @@ -79088,7 +80808,116 @@ namespace VULKAN_HPP_NAMESPACE { using Type = WriteDescriptorSetAccelerationStructureKHR; }; - using WriteDescriptorSetAccelerationStructureNV = WriteDescriptorSetAccelerationStructureKHR; + + struct WriteDescriptorSetAccelerationStructureNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWriteDescriptorSetAccelerationStructureNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureNV(uint32_t accelerationStructureCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructureCount( accelerationStructureCount_ ), pAccelerationStructures( pAccelerationStructures_ ) + {} + + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureNV( WriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WriteDescriptorSetAccelerationStructureNV( VkWriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureNV> const & accelerationStructures_ ) + : accelerationStructureCount( static_cast<uint32_t>( accelerationStructures_.size() ) ), pAccelerationStructures( accelerationStructures_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WriteDescriptorSetAccelerationStructureNV & operator=( VkWriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast<VULKAN_HPP_NAMESPACE::WriteDescriptorSetAccelerationStructureNV const *>( &rhs ); + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & operator=( WriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast<void *>( this ), &rhs, sizeof( WriteDescriptorSetAccelerationStructureNV ) ); + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setAccelerationStructureCount( uint32_t accelerationStructureCount_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = accelerationStructureCount_; + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setPAccelerationStructures( const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + pAccelerationStructures = pAccelerationStructures_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureNV & setAccelerationStructures( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries<const VULKAN_HPP_NAMESPACE::AccelerationStructureNV> const & accelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = static_cast<uint32_t>( accelerationStructures_.size() ); + pAccelerationStructures = accelerationStructures_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWriteDescriptorSetAccelerationStructureNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureNV*>( this ); + } + + operator VkWriteDescriptorSetAccelerationStructureNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WriteDescriptorSetAccelerationStructureNV const& ) const = default; +#else + bool operator==( WriteDescriptorSetAccelerationStructureNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureCount == rhs.accelerationStructureCount ) + && ( pAccelerationStructures == rhs.pAccelerationStructures ); + } + + bool operator!=( WriteDescriptorSetAccelerationStructureNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWriteDescriptorSetAccelerationStructureNV; + const void* pNext = {}; + uint32_t accelerationStructureCount = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures = {}; + + }; + static_assert( sizeof( WriteDescriptorSetAccelerationStructureNV ) == sizeof( VkWriteDescriptorSetAccelerationStructureNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout<WriteDescriptorSetAccelerationStructureNV>::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType<StructureType, StructureType::eWriteDescriptorSetAccelerationStructureNV> + { + using Type = WriteDescriptorSetAccelerationStructureNV; + }; struct WriteDescriptorSetInlineUniformBlockEXT { @@ -79670,301 +81499,329 @@ namespace VULKAN_HPP_NAMESPACE } #endif + #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT>::type createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<DebugReportCallbackEXT,Dispatch>>::type createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT>::type createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT, Dispatch>>::type createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT>::type createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<UniqueHandle<DebugUtilsMessengerEXT,Dispatch>>::type createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT>::type createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT, Dispatch>>::type createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_DIRECTFB_EXT - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_IOS_MVK - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_IOS_MVK*/ + #ifdef VK_USE_PLATFORM_FUCHSIA - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_FUCHSIA*/ + #ifdef VK_USE_PLATFORM_MACOS_MVK - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_MACOS_MVK*/ + #ifdef VK_USE_PLATFORM_METAL_EXT - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_METAL_EXT*/ + #ifdef VK_USE_PLATFORM_GGP - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_GGP*/ + #ifdef VK_USE_PLATFORM_VI_NN - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_VI_NN*/ + #ifdef VK_USE_PLATFORM_WAYLAND_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_XCB_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XCB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_KHR - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_KHR*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type enumeratePhysicalDeviceGroups(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type enumeratePhysicalDeviceGroups(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PhysicalDeviceGroupPropertiesAllocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type enumeratePhysicalDeviceGroups( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PhysicalDeviceGroupPropertiesAllocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceGroupPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type enumeratePhysicalDeviceGroups( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type enumeratePhysicalDeviceGroupsKHR(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type enumeratePhysicalDeviceGroupsKHR(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PhysicalDeviceGroupPropertiesAllocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type enumeratePhysicalDeviceGroupsKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PhysicalDeviceGroupPropertiesAllocator = std::allocator<PhysicalDeviceGroupProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceGroupPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type enumeratePhysicalDeviceGroupsKHR( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<PhysicalDevice>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type enumeratePhysicalDevices(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; - template<typename Allocator = std::allocator<PhysicalDevice>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDevice>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type enumeratePhysicalDevices(Allocator const& vectorAllocator, Dispatch const &d ) const; + template <typename PhysicalDeviceAllocator = std::allocator<PhysicalDevice>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDevice, PhysicalDeviceAllocator>>::type enumeratePhysicalDevices( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template <typename PhysicalDeviceAllocator = std::allocator<PhysicalDevice>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceAllocator, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDevice>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<PhysicalDevice, PhysicalDeviceAllocator>>::type enumeratePhysicalDevices( PhysicalDeviceAllocator & physicalDeviceAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) const VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ VULKAN_HPP_TYPESAFE_EXPLICIT operator VkInstance() const VULKAN_HPP_NOEXCEPT @@ -80018,78 +81875,86 @@ namespace VULKAN_HPP_NAMESPACE using UniqueInstance = UniqueHandle<Instance, VULKAN_HPP_DEFAULT_DISPATCHER_TYPE>; #endif /*VULKAN_HPP_NO_SMART_HANDLE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<UniqueHandle<Instance,Dispatch>>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType<VULKAN_HPP_NAMESPACE::Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Instance, Dispatch>>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); - template<typename Allocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, Allocator const& vectorAllocator, Dispatch const &d ); + template <typename ExtensionPropertiesAllocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); + template <typename ExtensionPropertiesAllocator = std::allocator<ExtensionProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ExtensionPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); - template<typename Allocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type = 0> - VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties(Allocator const& vectorAllocator, Dispatch const &d ); + template <typename LayerPropertiesAllocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateInstanceLayerProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); + template <typename LayerPropertiesAllocator = std::allocator<LayerProperties>, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = LayerPropertiesAllocator, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateInstanceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT; + + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> - typename ResultValueType<uint32_t>::type enumerateInstanceVersion(Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); + template <typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + typename ResultValueType<uint32_t>::type enumerateInstanceVersion( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const &d) VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const & d ) VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkInstance*>( pInstance ) ) ); + return static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkInstance *>( pInstance ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Instance>::type createInstance( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) { VULKAN_HPP_NAMESPACE::Instance instance; - Result result = static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkInstance*>( &instance ) ) ); - return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING"::createInstance" ); + Result result = static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkInstance *>( &instance ) ) ); + return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING "::createInstance" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Instance,Dispatch>>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Instance, Dispatch>>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) { VULKAN_HPP_NAMESPACE::Instance instance; - Result result = static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkInstance*>( &instance ) ) ); - - ObjectDestroy<NoParent,Dispatch> deleter( allocator, d ); - return createResultValue<Instance,Dispatch>( result, instance, VULKAN_HPP_NAMESPACE_STRING"::createInstanceUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateInstance( reinterpret_cast<const VkInstanceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkInstance *>( &instance ) ) ); + ObjectDestroy<NoParent, Dispatch> deleter( allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Instance, Dispatch>( result, instance, VULKAN_HPP_NAMESPACE_STRING "::createInstanceUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const &d) VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d ) VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, reinterpret_cast<VkExtensionProperties*>( pProperties ) ) ); + return static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, reinterpret_cast< VkExtensionProperties *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, Dispatch const &d ) + template <typename ExtensionPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, Dispatch const & d ) { - std::vector<ExtensionProperties,Allocator> properties; + std::vector<ExtensionProperties, ExtensionPropertiesAllocator> properties; uint32_t propertyCount; Result result; do @@ -80098,20 +81963,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceExtensionProperties" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, Allocator const& vectorAllocator, Dispatch const &d ) + + template <typename ExtensionPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type enumerateInstanceExtensionProperties( Optional<const std::string> layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d ) { - std::vector<ExtensionProperties,Allocator> properties( vectorAllocator ); + std::vector<ExtensionProperties, ExtensionPropertiesAllocator> properties( extensionPropertiesAllocator ); uint32_t propertyCount; Result result; do @@ -80120,28 +81986,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceExtensionProperties" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const &d) VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d ) VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumerateInstanceLayerProperties( pPropertyCount, reinterpret_cast<VkLayerProperties*>( pProperties ) ) ); + return static_cast<Result>( d.vkEnumerateInstanceLayerProperties( pPropertyCount, reinterpret_cast< VkLayerProperties *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties(Dispatch const &d ) + template <typename LayerPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateInstanceLayerProperties( Dispatch const & d ) { - std::vector<LayerProperties,Allocator> properties; + std::vector<LayerProperties, LayerPropertiesAllocator> properties; uint32_t propertyCount; Result result; do @@ -80150,20 +82018,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast<VkLayerProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceLayerProperties" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type enumerateInstanceLayerProperties(Allocator const& vectorAllocator, Dispatch const &d ) + + template <typename LayerPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type enumerateInstanceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d ) { - std::vector<LayerProperties,Allocator> properties( vectorAllocator ); + std::vector<LayerProperties, LayerPropertiesAllocator> properties( layerPropertiesAllocator ); uint32_t propertyCount; Result result; do @@ -80172,4651 +82041,4598 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast<VkLayerProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceLayerProperties" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const &d) VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const & d ) VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkEnumerateInstanceVersion( pApiVersion ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<uint32_t>::type enumerateInstanceVersion(Dispatch const &d ) + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<uint32_t>::type enumerateInstanceVersion( Dispatch const & d ) { uint32_t apiVersion; Result result = static_cast<Result>( d.vkEnumerateInstanceVersion( &apiVersion ) ); - return createResultValue( result, apiVersion, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceVersion" ); + return createResultValue( result, apiVersion, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceVersion" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo*>( pBeginInfo ) ) ); + return static_cast<Result>( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo *>( pBeginInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo*>( &beginInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::begin" ); + Result result = static_cast<Result>( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast<const VkCommandBufferBeginInfo *>( &beginInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast<const VkConditionalRenderingBeginInfoEXT*>( pConditionalRenderingBegin ) ); + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast<const VkConditionalRenderingBeginInfoEXT *>( pConditionalRenderingBegin ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast<const VkConditionalRenderingBeginInfoEXT*>( &conditionalRenderingBegin ) ); + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast<const VkConditionalRenderingBeginInfoEXT *>( &conditionalRenderingBegin ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT*>( pLabelInfo ) ); + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT *>( pLabelInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT*>( &labelInfo ) ); + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT *>( &labelInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBeginQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, static_cast<VkQueryControlFlags>( flags ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBeginQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, static_cast<VkQueryControlFlags>( flags ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBeginQueryIndexedEXT( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, static_cast<VkQueryControlFlags>( flags ), index ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBeginQueryIndexedEXT( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, static_cast<VkQueryControlFlags>( flags ), index ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( pRenderPassBegin ), static_cast<VkSubpassContents>( contents ) ); + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( pRenderPassBegin ), static_cast<VkSubpassContents>( contents ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( &renderPassBegin ), static_cast<VkSubpassContents>( contents ) ); + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( &renderPassBegin ), static_cast<VkSubpassContents>( contents ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( pRenderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo*>( pSubpassBeginInfo ) ); + d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( pRenderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo *>( pSubpassBeginInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( &renderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo*>( &subpassBeginInfo ) ); + d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( &renderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo *>( &subpassBeginInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( pRenderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo*>( pSubpassBeginInfo ) ); + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( pRenderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo *>( pSubpassBeginInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo*>( &renderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo*>( &subpassBeginInfo ) ); + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkRenderPassBeginInfo *>( &renderPassBegin ), reinterpret_cast<const VkSubpassBeginInfo *>( &subpassBeginInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast<const VkBuffer*>( pCounterBuffers ), reinterpret_cast<const VkDeviceSize*>( pCounterBufferOffsets ) ); + d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast<const VkBuffer *>( pCounterBuffers ), reinterpret_cast<const VkDeviceSize *>( pCounterBufferOffsets ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &counterBufferOffsets, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & counterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( counterBuffers.size() == counterBufferOffsets.size() ); + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); #else - if ( counterBuffers.size() != counterBufferOffsets.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::beginTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); - } + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::beginTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size() , reinterpret_cast<const VkBuffer*>( counterBuffers.data() ), reinterpret_cast<const VkDeviceSize*>( counterBufferOffsets.data() ) ); + + d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size(), reinterpret_cast<const VkBuffer *>( counterBuffers.data() ), reinterpret_cast<const VkDeviceSize *>( counterBufferOffsets.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSetCount, reinterpret_cast<const VkDescriptorSet*>( pDescriptorSets ), dynamicOffsetCount, pDynamicOffsets ); + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSetCount, reinterpret_cast<const VkDescriptorSet *>( pDescriptorSets ), dynamicOffsetCount, pDynamicOffsets ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, ArrayProxy<const uint32_t> const &dynamicOffsets, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, ArrayProxy<const uint32_t> const & dynamicOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSets.size() , reinterpret_cast<const VkDescriptorSet*>( descriptorSets.data() ), dynamicOffsets.size() , dynamicOffsets.data() ); + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), firstSet, descriptorSets.size(), reinterpret_cast<const VkDescriptorSet *>( descriptorSets.data() ), dynamicOffsets.size(), dynamicOffsets.data() ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBindIndexBuffer( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkIndexType>( indexType ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBindIndexBuffer( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkIndexType>( indexType ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBindPipeline( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipeline>( pipeline ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBindPipeline( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipeline>( pipeline ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBindPipelineShaderGroupNV( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipeline>( pipeline ), groupIndex ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBindPipelineShaderGroupNV( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipeline>( pipeline ), groupIndex ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdBindShadingRateImageNV( m_commandBuffer, static_cast<VkImageView>( imageView ), static_cast<VkImageLayout>( imageLayout ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdBindShadingRateImageNV( m_commandBuffer, static_cast<VkImageView>( imageView ), static_cast<VkImageLayout>( imageLayout ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer*>( pBuffers ), reinterpret_cast<const VkDeviceSize*>( pOffsets ), reinterpret_cast<const VkDeviceSize*>( pSizes ) ); + d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer *>( pBuffers ), reinterpret_cast<const VkDeviceSize *>( pOffsets ), reinterpret_cast<const VkDeviceSize *>( pSizes ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &sizes, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & sizes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); #else if ( buffers.size() != offsets.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != offsets.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( buffers.size() == sizes.size() ); -#else - if ( buffers.size() != sizes.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != sizes.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( offsets.size() == sizes.size() ); -#else - if ( offsets.size() != sizes.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindTransformFeedbackBuffersEXT: offsets.size() != sizes.size()" ); - } + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != sizes.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, buffers.size() , reinterpret_cast<const VkBuffer*>( buffers.data() ), reinterpret_cast<const VkDeviceSize*>( offsets.data() ), reinterpret_cast<const VkDeviceSize*>( sizes.data() ) ); + + d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast<const VkBuffer *>( buffers.data() ), reinterpret_cast<const VkDeviceSize *>( offsets.data() ), reinterpret_cast<const VkDeviceSize *>( sizes.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer*>( pBuffers ), reinterpret_cast<const VkDeviceSize*>( pOffsets ) ); + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer *>( pBuffers ), reinterpret_cast<const VkDeviceSize *>( pOffsets ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); #else if ( buffers.size() != offsets.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" ); - } + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, buffers.size() , reinterpret_cast<const VkBuffer*>( buffers.data() ), reinterpret_cast<const VkDeviceSize*>( offsets.data() ) ); + + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast<const VkBuffer *>( buffers.data() ), reinterpret_cast<const VkDeviceSize *>( offsets.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer*>( pBuffers ), reinterpret_cast<const VkDeviceSize*>( pOffsets ), reinterpret_cast<const VkDeviceSize*>( pSizes ), reinterpret_cast<const VkDeviceSize*>( pStrides ) ); + d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast<const VkBuffer *>( pBuffers ), reinterpret_cast<const VkDeviceSize *>( pOffsets ), reinterpret_cast<const VkDeviceSize *>( pSizes ), reinterpret_cast<const VkDeviceSize *>( pStrides ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &sizes, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &strides, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & buffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & offsets, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & sizes, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & strides, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); + VULKAN_HPP_ASSERT( strides.empty() || buffers.size() == strides.size() ); #else if ( buffers.size() != offsets.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: buffers.size() != offsets.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( buffers.size() == sizes.size() ); -#else - if ( buffers.size() != sizes.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: buffers.size() != sizes.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( buffers.size() == strides.size() ); -#else - if ( buffers.size() != strides.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: buffers.size() != strides.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( offsets.size() == sizes.size() ); -#else - if ( offsets.size() != sizes.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: offsets.size() != sizes.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( offsets.size() == strides.size() ); -#else - if ( offsets.size() != strides.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: offsets.size() != strides.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( sizes.size() == strides.size() ); -#else - if ( sizes.size() != strides.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::bindVertexBuffers2EXT: sizes.size() != strides.size()" ); - } + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != sizes.size()" ); + } + if ( !strides.empty() && buffers.size() != strides.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != strides.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, buffers.size() , reinterpret_cast<const VkBuffer*>( buffers.data() ), reinterpret_cast<const VkDeviceSize*>( offsets.data() ), reinterpret_cast<const VkDeviceSize*>( sizes.data() ), reinterpret_cast<const VkDeviceSize*>( strides.data() ) ); + + d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast<const VkBuffer *>( buffers.data() ), reinterpret_cast<const VkDeviceSize *>( offsets.data() ), reinterpret_cast<const VkDeviceSize *>( sizes.data() ), reinterpret_cast<const VkDeviceSize *>( strides.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageBlit*>( pRegions ), static_cast<VkFilter>( filter ) ); + d.vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageBlit *>( pRegions ), static_cast<VkFilter>( filter ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageBlit> const ®ions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageBlit> const & regions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageBlit*>( regions.data() ), static_cast<VkFilter>( filter ) ); + d.vkCmdBlitImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size(), reinterpret_cast<const VkImageBlit *>( regions.data() ), static_cast<VkFilter>( filter ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast<const VkBlitImageInfo2KHR*>( pBlitImageInfo ) ); + d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast<const VkBlitImageInfo2KHR *>( pBlitImageInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast<const VkBlitImageInfo2KHR*>( &blitImageInfo ) ); + d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast<const VkBlitImageInfo2KHR *>( &blitImageInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureIndirectKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfo, VULKAN_HPP_NAMESPACE::Buffer indirectBuffer, VULKAN_HPP_NAMESPACE::DeviceSize indirectOffset, uint32_t indirectStride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBuildAccelerationStructureIndirectKHR( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( pInfo ), static_cast<VkBuffer>( indirectBuffer ), static_cast<VkDeviceSize>( indirectOffset ), indirectStride ); + d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureInfoNV *>( pInfo ), static_cast<VkBuffer>( instanceData ), static_cast<VkDeviceSize>( instanceOffset ), static_cast<VkBool32>( update ), static_cast<VkAccelerationStructureNV>( dst ), static_cast<VkAccelerationStructureNV>( src ), static_cast<VkBuffer>( scratch ), static_cast<VkDeviceSize>( scratchOffset ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureIndirectKHR( const AccelerationStructureBuildGeometryInfoKHR & info, VULKAN_HPP_NAMESPACE::Buffer indirectBuffer, VULKAN_HPP_NAMESPACE::DeviceSize indirectOffset, uint32_t indirectStride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBuildAccelerationStructureIndirectKHR( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( &info ), static_cast<VkBuffer>( indirectBuffer ), static_cast<VkDeviceSize>( indirectOffset ), indirectStride ); + d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureInfoNV *>( &info ), static_cast<VkBuffer>( instanceData ), static_cast<VkDeviceSize>( instanceOffset ), static_cast<VkBool32>( update ), static_cast<VkAccelerationStructureNV>( dst ), static_cast<VkAccelerationStructureNV>( src ), static_cast<VkBuffer>( scratch ), static_cast<VkDeviceSize>( scratchOffset ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresIndirectKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::DeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBuildAccelerationStructureKHR( m_commandBuffer, infoCount, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( pInfos ), reinterpret_cast<const VkAccelerationStructureBuildOffsetInfoKHR* const*>( ppOffsetInfos ) ); + d.vkCmdBuildAccelerationStructuresIndirectKHR( m_commandBuffer, infoCount, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( pInfos ), reinterpret_cast<const VkDeviceAddress *>( pIndirectDeviceAddresses ), pIndirectStrides, ppMaxPrimitiveCounts ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const &infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const > const &pOffsetInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresIndirectKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceAddress> const & indirectDeviceAddresses, ArrayProxy<const uint32_t> const & indirectStrides, ArrayProxy<const uint32_t* const > const & pMaxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( infos.size() == pOffsetInfos.size() ); + VULKAN_HPP_ASSERT( infos.size() == indirectDeviceAddresses.size() ); + VULKAN_HPP_ASSERT( infos.size() == indirectStrides.size() ); + VULKAN_HPP_ASSERT( infos.size() == pMaxPrimitiveCounts.size() ); #else - if ( infos.size() != pOffsetInfos.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::buildAccelerationStructureKHR: infos.size() != pOffsetInfos.size()" ); - } + if ( infos.size() != indirectDeviceAddresses.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectDeviceAddresses.size()" ); + } + if ( infos.size() != indirectStrides.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectStrides.size()" ); + } + if ( infos.size() != pMaxPrimitiveCounts.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != pMaxPrimitiveCounts.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdBuildAccelerationStructureKHR( m_commandBuffer, infos.size() , reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( infos.data() ), reinterpret_cast<const VkAccelerationStructureBuildOffsetInfoKHR* const*>( pOffsetInfos.data() ) ); + + d.vkCmdBuildAccelerationStructuresIndirectKHR( m_commandBuffer, infos.size(), reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( infos.data() ), reinterpret_cast<const VkDeviceAddress *>( indirectDeviceAddresses.data() ), indirectStrides.data(), pMaxPrimitiveCounts.data() ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureInfoNV*>( pInfo ), static_cast<VkBuffer>( instanceData ), static_cast<VkDeviceSize>( instanceOffset ), static_cast<VkBool32>( update ), static_cast<VkAccelerationStructureKHR>( dst ), static_cast<VkAccelerationStructureKHR>( src ), static_cast<VkBuffer>( scratch ), static_cast<VkDeviceSize>( scratchOffset ) ); + d.vkCmdBuildAccelerationStructuresKHR( m_commandBuffer, infoCount, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( pInfos ), reinterpret_cast<const VkAccelerationStructureBuildRangeInfoKHR * const *>( ppBuildRangeInfos ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const > const & pBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == pBuildRangeInfos.size() ); +#else + if ( infos.size() != pBuildRangeInfos.size() ) { - d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast<const VkAccelerationStructureInfoNV*>( &info ), static_cast<VkBuffer>( instanceData ), static_cast<VkDeviceSize>( instanceOffset ), static_cast<VkBool32>( update ), static_cast<VkAccelerationStructureKHR>( dst ), static_cast<VkAccelerationStructureKHR>( src ), static_cast<VkBuffer>( scratch ), static_cast<VkDeviceSize>( scratchOffset ) ); + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBuildAccelerationStructuresKHR( m_commandBuffer, infos.size(), reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( infos.data() ), reinterpret_cast<const VkAccelerationStructureBuildRangeInfoKHR * const *>( pBuildRangeInfos.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearAttachments( m_commandBuffer, attachmentCount, reinterpret_cast<const VkClearAttachment*>( pAttachments ), rectCount, reinterpret_cast<const VkClearRect*>( pRects ) ); + d.vkCmdClearAttachments( m_commandBuffer, attachmentCount, reinterpret_cast<const VkClearAttachment *>( pAttachments ), rectCount, reinterpret_cast<const VkClearRect *>( pRects ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearAttachment> const &attachments, ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearRect> const &rects, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearAttachment> const & attachments, ArrayProxy<const VULKAN_HPP_NAMESPACE::ClearRect> const & rects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearAttachments( m_commandBuffer, attachments.size() , reinterpret_cast<const VkClearAttachment*>( attachments.data() ), rects.size() , reinterpret_cast<const VkClearRect*>( rects.data() ) ); + d.vkCmdClearAttachments( m_commandBuffer, attachments.size(), reinterpret_cast<const VkClearAttachment *>( attachments.data() ), rects.size(), reinterpret_cast<const VkClearRect *>( rects.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue*>( pColor ), rangeCount, reinterpret_cast<const VkImageSubresourceRange*>( pRanges ) ); + d.vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue *>( pColor ), rangeCount, reinterpret_cast<const VkImageSubresourceRange *>( pRanges ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const &ranges, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const & ranges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue*>( &color ), ranges.size() , reinterpret_cast<const VkImageSubresourceRange*>( ranges.data() ) ); + d.vkCmdClearColorImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearColorValue *>( &color ), ranges.size(), reinterpret_cast<const VkImageSubresourceRange *>( ranges.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue*>( pDepthStencil ), rangeCount, reinterpret_cast<const VkImageSubresourceRange*>( pRanges ) ); + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue *>( pDepthStencil ), rangeCount, reinterpret_cast<const VkImageSubresourceRange *>( pRanges ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const &ranges, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageSubresourceRange> const & ranges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue*>( &depthStencil ), ranges.size() , reinterpret_cast<const VkImageSubresourceRange*>( ranges.data() ) ); + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast<VkImage>( image ), static_cast<VkImageLayout>( imageLayout ), reinterpret_cast<const VkClearDepthStencilValue *>( &depthStencil ), ranges.size(), reinterpret_cast<const VkImageSubresourceRange *>( ranges.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR*>( pInfo ) ); + d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR*>( &info ) ); + d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdCopyAccelerationStructureNV( m_commandBuffer, static_cast<VkAccelerationStructureKHR>( dst ), static_cast<VkAccelerationStructureKHR>( src ), static_cast<VkCopyAccelerationStructureModeKHR>( mode ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyAccelerationStructureNV( m_commandBuffer, static_cast<VkAccelerationStructureKHR>( dst ), static_cast<VkAccelerationStructureKHR>( src ), static_cast<VkCopyAccelerationStructureModeKHR>( mode ) ); + d.vkCmdCopyAccelerationStructureNV( m_commandBuffer, static_cast<VkAccelerationStructureNV>( dst ), static_cast<VkAccelerationStructureNV>( src ), static_cast<VkCopyAccelerationStructureModeKHR>( mode ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR*>( pInfo ) ); + d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR*>( &info ) ); + d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferCopy*>( pRegions ) ); + d.vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferCopy *>( pRegions ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferCopy> const ®ions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferCopy> const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regions.size() , reinterpret_cast<const VkBufferCopy*>( regions.data() ) ); + d.vkCmdCopyBuffer( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkBuffer>( dstBuffer ), regions.size(), reinterpret_cast<const VkBufferCopy *>( regions.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferInfo2KHR*>( pCopyBufferInfo ) ); + d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferInfo2KHR *>( pCopyBufferInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferInfo2KHR*>( ©BufferInfo ) ); + d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferInfo2KHR *>( ©BufferInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkBufferImageCopy*>( pRegions ) ); + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkBufferImageCopy *>( pRegions ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const ®ions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkBufferImageCopy*>( regions.data() ) ); + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast<VkBuffer>( srcBuffer ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size(), reinterpret_cast<const VkBufferImageCopy *>( regions.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferToImageInfo2KHR*>( pCopyBufferToImageInfo ) ); + d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferToImageInfo2KHR *>( pCopyBufferToImageInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferToImageInfo2KHR*>( ©BufferToImageInfo ) ); + d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyBufferToImageInfo2KHR *>( ©BufferToImageInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageCopy*>( pRegions ) ); + d.vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageCopy *>( pRegions ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageCopy> const ®ions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageCopy> const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageCopy*>( regions.data() ) ); + d.vkCmdCopyImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size(), reinterpret_cast<const VkImageCopy *>( regions.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageInfo2KHR*>( pCopyImageInfo ) ); + d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageInfo2KHR *>( pCopyImageInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageInfo2KHR*>( ©ImageInfo ) ); + d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageInfo2KHR *>( ©ImageInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferImageCopy*>( pRegions ) ); + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regionCount, reinterpret_cast<const VkBufferImageCopy *>( pRegions ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const ®ions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferImageCopy> const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regions.size() , reinterpret_cast<const VkBufferImageCopy*>( regions.data() ) ); + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkBuffer>( dstBuffer ), regions.size(), reinterpret_cast<const VkBufferImageCopy *>( regions.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageToBufferInfo2KHR*>( pCopyImageToBufferInfo ) ); + d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageToBufferInfo2KHR *>( pCopyImageToBufferInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageToBufferInfo2KHR*>( ©ImageToBufferInfo ) ); + d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast<const VkCopyImageToBufferInfo2KHR *>( ©ImageToBufferInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR*>( pInfo ) ); + d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR*>( &info ) ); + d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT*>( pMarkerInfo ) ); + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT *>( pMarkerInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT*>( &markerInfo ) ); + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT *>( &markerInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDebugMarkerEndEXT( m_commandBuffer ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDebugMarkerEndEXT( m_commandBuffer ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT*>( pMarkerInfo ) ); + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT *>( pMarkerInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT*>( &markerInfo ) ); + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast<const VkDebugMarkerMarkerInfoEXT *>( &markerInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDispatchBase( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDispatchBase( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDispatchBaseKHR( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDispatchBaseKHR( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDispatchIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDispatchIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndexedIndirectCount( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndexedIndirectCount( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndexedIndirectCountKHR( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndexedIndirectCountKHR( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndirect( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndirectByteCountEXT( m_commandBuffer, instanceCount, firstInstance, static_cast<VkBuffer>( counterBuffer ), static_cast<VkDeviceSize>( counterBufferOffset ), counterOffset, vertexStride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndirectByteCountEXT( m_commandBuffer, instanceCount, firstInstance, static_cast<VkBuffer>( counterBuffer ), static_cast<VkDeviceSize>( counterBufferOffset ), counterOffset, vertexStride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndirectCount( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndirectCount( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawIndirectCountKHR( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawIndirectCountKHR( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawMeshTasksIndirectCountNV( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawMeshTasksIndirectCountNV( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), static_cast<VkBuffer>( countBuffer ), static_cast<VkDeviceSize>( countBufferOffset ), maxDrawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawMeshTasksIndirectNV( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawMeshTasksIndirectNV( m_commandBuffer, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ), drawCount, stride ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdDrawMeshTasksNV( m_commandBuffer, taskCount, firstTask ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdDrawMeshTasksNV( m_commandBuffer, taskCount, firstTask ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdEndConditionalRenderingEXT( m_commandBuffer ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdEndConditionalRenderingEXT( m_commandBuffer ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdEndDebugUtilsLabelEXT( m_commandBuffer ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdEndDebugUtilsLabelEXT( m_commandBuffer ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdEndQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdEndQuery( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdEndQueryIndexedEXT( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, index ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdEndQueryIndexedEXT( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), query, index ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdEndRenderPass( m_commandBuffer ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdEndRenderPass( m_commandBuffer ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo*>( pSubpassEndInfo ) ); + d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo *>( pSubpassEndInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo*>( &subpassEndInfo ) ); + d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo *>( &subpassEndInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo*>( pSubpassEndInfo ) ); + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo *>( pSubpassEndInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo*>( &subpassEndInfo ) ); + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassEndInfo *>( &subpassEndInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast<const VkBuffer*>( pCounterBuffers ), reinterpret_cast<const VkDeviceSize*>( pCounterBufferOffsets ) ); + d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast<const VkBuffer *>( pCounterBuffers ), reinterpret_cast<const VkDeviceSize *>( pCounterBufferOffsets ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const &counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const &counterBufferOffsets, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy<const VULKAN_HPP_NAMESPACE::Buffer> const & counterBuffers, ArrayProxy<const VULKAN_HPP_NAMESPACE::DeviceSize> const & counterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( counterBuffers.size() == counterBufferOffsets.size() ); + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); #else - if ( counterBuffers.size() != counterBufferOffsets.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkCommandBuffer::endTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); - } + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::endTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size() , reinterpret_cast<const VkBuffer*>( counterBuffers.data() ), reinterpret_cast<const VkDeviceSize*>( counterBufferOffsets.data() ) ); + + d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size(), reinterpret_cast<const VkBuffer *>( counterBuffers.data() ), reinterpret_cast<const VkDeviceSize *>( counterBufferOffsets.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdExecuteCommands( m_commandBuffer, commandBufferCount, reinterpret_cast<const VkCommandBuffer*>( pCommandBuffers ) ); + d.vkCmdExecuteCommands( m_commandBuffer, commandBufferCount, reinterpret_cast<const VkCommandBuffer *>( pCommandBuffers ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::executeCommands( ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdExecuteCommands( m_commandBuffer, commandBuffers.size() , reinterpret_cast<const VkCommandBuffer*>( commandBuffers.data() ) ); + d.vkCmdExecuteCommands( m_commandBuffer, commandBuffers.size(), reinterpret_cast<const VkCommandBuffer *>( commandBuffers.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast<VkBool32>( isPreprocessed ), reinterpret_cast<const VkGeneratedCommandsInfoNV*>( pGeneratedCommandsInfo ) ); + d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast<VkBool32>( isPreprocessed ), reinterpret_cast<const VkGeneratedCommandsInfoNV *>( pGeneratedCommandsInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast<VkBool32>( isPreprocessed ), reinterpret_cast<const VkGeneratedCommandsInfoNV*>( &generatedCommandsInfo ) ); + d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast<VkBool32>( isPreprocessed ), reinterpret_cast<const VkGeneratedCommandsInfoNV *>( &generatedCommandsInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdFillBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), static_cast<VkDeviceSize>( size ), data ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdFillBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), static_cast<VkDeviceSize>( size ), data ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT*>( pLabelInfo ) ); + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT *>( pLabelInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT*>( &labelInfo ) ); + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast<const VkDebugUtilsLabelEXT *>( &labelInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdNextSubpass( m_commandBuffer, static_cast<VkSubpassContents>( contents ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdNextSubpass( m_commandBuffer, static_cast<VkSubpassContents>( contents ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo*>( pSubpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo*>( pSubpassEndInfo ) ); + d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo *>( pSubpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo *>( pSubpassEndInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo*>( &subpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo*>( &subpassEndInfo ) ); + d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo *>( &subpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo *>( &subpassEndInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo*>( pSubpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo*>( pSubpassEndInfo ) ); + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo *>( pSubpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo *>( pSubpassEndInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo*>( &subpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo*>( &subpassEndInfo ) ); + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast<const VkSubpassBeginInfo *>( &subpassBeginInfo ), reinterpret_cast<const VkSubpassEndInfo *>( &subpassEndInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier*>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier*>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier*>( pImageMemoryBarriers ) ); + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier *>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier *>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier *>( pImageMemoryBarriers ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const &memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const &bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const &imageMemoryBarriers, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const & memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const & bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const & imageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarriers.size() , reinterpret_cast<const VkMemoryBarrier*>( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast<const VkBufferMemoryBarrier*>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast<const VkImageMemoryBarrier*>( imageMemoryBarriers.data() ) ); + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), static_cast<VkDependencyFlags>( dependencyFlags ), memoryBarriers.size(), reinterpret_cast<const VkMemoryBarrier *>( memoryBarriers.data() ), bufferMemoryBarriers.size(), reinterpret_cast<const VkBufferMemoryBarrier *>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size(), reinterpret_cast<const VkImageMemoryBarrier *>( imageMemoryBarriers.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast<const VkGeneratedCommandsInfoNV*>( pGeneratedCommandsInfo ) ); + d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast<const VkGeneratedCommandsInfoNV *>( pGeneratedCommandsInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast<const VkGeneratedCommandsInfoNV*>( &generatedCommandsInfo ) ); + d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast<const VkGeneratedCommandsInfoNV *>( &generatedCommandsInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdPushConstants( m_commandBuffer, static_cast<VkPipelineLayout>( layout ), static_cast<VkShaderStageFlags>( stageFlags ), offset, size, pValues ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> const &values, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename T, typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy<const T> const & values, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPushConstants( m_commandBuffer, static_cast<VkPipelineLayout>( layout ), static_cast<VkShaderStageFlags>( stageFlags ), offset, values.size() * sizeof( T ) , reinterpret_cast<const void*>( values.data() ) ); + d.vkCmdPushConstants( m_commandBuffer, static_cast<VkPipelineLayout>( layout ), static_cast<VkShaderStageFlags>( stageFlags ), offset, values.size() * sizeof( T ), reinterpret_cast<const void *>( values.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet*>( pDescriptorWrites ) ); + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet *>( pDescriptorWrites ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const &descriptorWrites, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const & descriptorWrites, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWrites.size() , reinterpret_cast<const VkWriteDescriptorSet*>( descriptorWrites.data() ) ); + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast<VkPipelineBindPoint>( pipelineBindPoint ), static_cast<VkPipelineLayout>( layout ), set, descriptorWrites.size(), reinterpret_cast<const VkWriteDescriptorSet *>( descriptorWrites.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), static_cast<VkPipelineLayout>( layout ), set, pData ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), static_cast<VkPipelineLayout>( layout ), set, pData ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdResetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdResetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdResetQueryPool( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdResetQueryPool( m_commandBuffer, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageResolve*>( pRegions ) ); + d.vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regionCount, reinterpret_cast<const VkImageResolve *>( pRegions ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageResolve> const ®ions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageResolve> const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size() , reinterpret_cast<const VkImageResolve*>( regions.data() ) ); + d.vkCmdResolveImage( m_commandBuffer, static_cast<VkImage>( srcImage ), static_cast<VkImageLayout>( srcImageLayout ), static_cast<VkImage>( dstImage ), static_cast<VkImageLayout>( dstImageLayout ), regions.size(), reinterpret_cast<const VkImageResolve *>( regions.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast<const VkResolveImageInfo2KHR*>( pResolveImageInfo ) ); + d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast<const VkResolveImageInfo2KHR *>( pResolveImageInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast<const VkResolveImageInfo2KHR*>( &resolveImageInfo ) ); + d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast<const VkResolveImageInfo2KHR *>( &resolveImageInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetBlendConstants( m_commandBuffer, blendConstants ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetBlendConstants( m_commandBuffer, blendConstants ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetCheckpointNV( m_commandBuffer, pCheckpointMarker ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetCheckpointNV( m_commandBuffer, pCheckpointMarker ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast<VkCoarseSampleOrderTypeNV>( sampleOrderType ), customSampleOrderCount, reinterpret_cast<const VkCoarseSampleOrderCustomNV*>( pCustomSampleOrders ) ); + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast<VkCoarseSampleOrderTypeNV>( sampleOrderType ), customSampleOrderCount, reinterpret_cast<const VkCoarseSampleOrderCustomNV *>( pCustomSampleOrders ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy<const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV> const &customSampleOrders, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy<const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV> const & customSampleOrders, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast<VkCoarseSampleOrderTypeNV>( sampleOrderType ), customSampleOrders.size() , reinterpret_cast<const VkCoarseSampleOrderCustomNV*>( customSampleOrders.data() ) ); + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast<VkCoarseSampleOrderTypeNV>( sampleOrderType ), customSampleOrders.size(), reinterpret_cast<const VkCoarseSampleOrderCustomNV *>( customSampleOrders.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetCullModeEXT( m_commandBuffer, static_cast<VkCullModeFlags>( cullMode ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetCullModeEXT( m_commandBuffer, static_cast<VkCullModeFlags>( cullMode ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthBoundsTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthBoundsTestEnable ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthBoundsTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthBoundsTestEnable ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthCompareOpEXT( m_commandBuffer, static_cast<VkCompareOp>( depthCompareOp ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthCompareOpEXT( m_commandBuffer, static_cast<VkCompareOp>( depthCompareOp ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthTestEnable ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthTestEnable ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDepthWriteEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthWriteEnable ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDepthWriteEnableEXT( m_commandBuffer, static_cast<VkBool32>( depthWriteEnable ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDeviceMask( m_commandBuffer, deviceMask ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDeviceMask( m_commandBuffer, deviceMask ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetDeviceMaskKHR( m_commandBuffer, deviceMask ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetDeviceMaskKHR( m_commandBuffer, deviceMask ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangleCount, reinterpret_cast<const VkRect2D*>( pDiscardRectangles ) ); + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangleCount, reinterpret_cast<const VkRect2D *>( pDiscardRectangles ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &discardRectangles, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & discardRectangles, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangles.size() , reinterpret_cast<const VkRect2D*>( discardRectangles.data() ) ); + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangles.size(), reinterpret_cast<const VkRect2D *>( discardRectangles.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetEvent( m_commandBuffer, static_cast<VkEvent>( event ), static_cast<VkPipelineStageFlags>( stageMask ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissorCount, reinterpret_cast<const VkRect2D*>( pExclusiveScissors ) ); + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissorCount, reinterpret_cast<const VkRect2D *>( pExclusiveScissors ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &exclusiveScissors, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & exclusiveScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissors.size() , reinterpret_cast<const VkRect2D*>( exclusiveScissors.data() ) ); + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissors.size(), reinterpret_cast<const VkRect2D *>( exclusiveScissors.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateEnumNV( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetFrontFaceEXT( m_commandBuffer, static_cast<VkFrontFace>( frontFace ) ); + d.vkCmdSetFragmentShadingRateEnumNV( m_commandBuffer, static_cast<VkFragmentShadingRateNV>( shadingRate ), reinterpret_cast<const VkFragmentShadingRateCombinerOpKHR*>( combinerOps ) ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateKHR( const VULKAN_HPP_NAMESPACE::Extent2D* pFragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetFrontFaceEXT( m_commandBuffer, static_cast<VkFrontFace>( frontFace ) ); + d.vkCmdSetFragmentShadingRateKHR( m_commandBuffer, reinterpret_cast<const VkExtent2D *>( pFragmentSize ), reinterpret_cast<const VkFragmentShadingRateCombinerOpKHR*>( combinerOps ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const &d) const VULKAN_HPP_NOEXCEPT +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateKHR( const Extent2D & fragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetLineStippleEXT( m_commandBuffer, lineStippleFactor, lineStipplePattern ); + d.vkCmdSetFragmentShadingRateKHR( m_commandBuffer, reinterpret_cast<const VkExtent2D *>( &fragmentSize ), reinterpret_cast<const VkFragmentShadingRateCombinerOpKHR *>( combinerOps ) ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetLineStippleEXT( m_commandBuffer, lineStippleFactor, lineStipplePattern ); + d.vkCmdSetFrontFaceEXT( m_commandBuffer, static_cast<VkFrontFace>( frontFace ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetLineWidth( m_commandBuffer, lineWidth ); + d.vkCmdSetLineStippleEXT( m_commandBuffer, lineStippleFactor, lineStipplePattern ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetLineWidth( m_commandBuffer, lineWidth ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceMarkerInfoINTEL*>( pMarkerInfo ) ) ); + return static_cast<Result>( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceMarkerInfoINTEL *>( pMarkerInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceMarkerInfoINTEL*>( &markerInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::setPerformanceMarkerINTEL" ); + Result result = static_cast<Result>( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceMarkerInfoINTEL *>( &markerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceOverrideInfoINTEL*>( pOverrideInfo ) ) ); + return static_cast<Result>( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceOverrideInfoINTEL *>( pOverrideInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceOverrideInfoINTEL*>( &overrideInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::setPerformanceOverrideINTEL" ); + Result result = static_cast<Result>( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceOverrideInfoINTEL *>( &overrideInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceStreamMarkerInfoINTEL*>( pMarkerInfo ) ) ); + return static_cast<Result>( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceStreamMarkerInfoINTEL *>( pMarkerInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceStreamMarkerInfoINTEL*>( &markerInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::setPerformanceStreamMarkerINTEL" ); + Result result = static_cast<Result>( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast<const VkPerformanceStreamMarkerInfoINTEL *>( &markerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetPrimitiveTopologyEXT( m_commandBuffer, static_cast<VkPrimitiveTopology>( primitiveTopology ) ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetPrimitiveTopologyEXT( m_commandBuffer, static_cast<VkPrimitiveTopology>( primitiveTopology ) ); + d.vkCmdSetRayTracingPipelineStackSizeKHR( m_commandBuffer, pipelineStackSize ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast<const VkSampleLocationsInfoEXT*>( pSampleLocationsInfo ) ); + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast<const VkSampleLocationsInfoEXT *>( pSampleLocationsInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast<const VkSampleLocationsInfoEXT*>( &sampleLocationsInfo ) ); + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast<const VkSampleLocationsInfoEXT *>( &sampleLocationsInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissorCount, reinterpret_cast<const VkRect2D*>( pScissors ) ); + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissorCount, reinterpret_cast<const VkRect2D *>( pScissors ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &scissors, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & scissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissors.size() , reinterpret_cast<const VkRect2D*>( scissors.data() ) ); + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissors.size(), reinterpret_cast<const VkRect2D *>( scissors.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissorCount, reinterpret_cast<const VkRect2D*>( pScissors ) ); + d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissorCount, reinterpret_cast<const VkRect2D *>( pScissors ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const &scissors, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Rect2D> const & scissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissors.size() , reinterpret_cast<const VkRect2D*>( scissors.data() ) ); + d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissors.size(), reinterpret_cast<const VkRect2D *>( scissors.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetStencilCompareMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), compareMask ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetStencilCompareMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), compareMask ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetStencilOpEXT( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), static_cast<VkStencilOp>( failOp ), static_cast<VkStencilOp>( passOp ), static_cast<VkStencilOp>( depthFailOp ), static_cast<VkCompareOp>( compareOp ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetStencilOpEXT( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), static_cast<VkStencilOp>( failOp ), static_cast<VkStencilOp>( passOp ), static_cast<VkStencilOp>( depthFailOp ), static_cast<VkCompareOp>( compareOp ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetStencilReference( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), reference ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetStencilReference( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), reference ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetStencilTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( stencilTestEnable ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetStencilTestEnableEXT( m_commandBuffer, static_cast<VkBool32>( stencilTestEnable ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdSetStencilWriteMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), writeMask ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdSetStencilWriteMask( m_commandBuffer, static_cast<VkStencilFaceFlags>( faceMask ), writeMask ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewport*>( pViewports ) ); + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewport *>( pViewports ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const &viewports, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const & viewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewports.size() , reinterpret_cast<const VkViewport*>( viewports.data() ) ); + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewports.size(), reinterpret_cast<const VkViewport *>( viewports.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkShadingRatePaletteNV*>( pShadingRatePalettes ) ); + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkShadingRatePaletteNV *>( pShadingRatePalettes ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV> const &shadingRatePalettes, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV> const & shadingRatePalettes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, shadingRatePalettes.size() , reinterpret_cast<const VkShadingRatePaletteNV*>( shadingRatePalettes.data() ) ); + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, shadingRatePalettes.size(), reinterpret_cast<const VkShadingRatePaletteNV *>( shadingRatePalettes.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewportWScalingNV*>( pViewportWScalings ) ); + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast<const VkViewportWScalingNV *>( pViewportWScalings ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ViewportWScalingNV> const &viewportWScalings, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, ArrayProxy<const VULKAN_HPP_NAMESPACE::ViewportWScalingNV> const & viewportWScalings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportWScalings.size() , reinterpret_cast<const VkViewportWScalingNV*>( viewportWScalings.data() ) ); + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportWScalings.size(), reinterpret_cast<const VkViewportWScalingNV *>( viewportWScalings.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewportCount, reinterpret_cast<const VkViewport*>( pViewports ) ); + d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewportCount, reinterpret_cast<const VkViewport *>( pViewports ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const &viewports, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::Viewport> const & viewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewports.size() , reinterpret_cast<const VkViewport*>( viewports.data() ) ); + d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewports.size(), reinterpret_cast<const VkViewport *>( viewports.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast<const VkStridedBufferRegionKHR*>( pRaygenShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pMissShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pHitShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pCallableShaderBindingTable ), static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ) ); + d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pRaygenShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pMissShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pHitShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pCallableShaderBindingTable ), static_cast<VkDeviceAddress>( indirectDeviceAddress ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const StridedBufferRegionKHR & raygenShaderBindingTable, const StridedBufferRegionKHR & missShaderBindingTable, const StridedBufferRegionKHR & hitShaderBindingTable, const StridedBufferRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast<const VkStridedBufferRegionKHR*>( &raygenShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &missShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &hitShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &callableShaderBindingTable ), static_cast<VkBuffer>( buffer ), static_cast<VkDeviceSize>( offset ) ); + d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &raygenShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &missShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &hitShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &callableShaderBindingTable ), static_cast<VkDeviceAddress>( indirectDeviceAddress ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast<const VkStridedBufferRegionKHR*>( pRaygenShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pMissShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pHitShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( pCallableShaderBindingTable ), width, height, depth ); + d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pRaygenShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pMissShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pHitShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( pCallableShaderBindingTable ), width, height, depth ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const StridedBufferRegionKHR & raygenShaderBindingTable, const StridedBufferRegionKHR & missShaderBindingTable, const StridedBufferRegionKHR & hitShaderBindingTable, const StridedBufferRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast<const VkStridedBufferRegionKHR*>( &raygenShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &missShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &hitShaderBindingTable ), reinterpret_cast<const VkStridedBufferRegionKHR*>( &callableShaderBindingTable ), width, height, depth ); + d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &raygenShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &missShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &hitShaderBindingTable ), reinterpret_cast<const VkStridedDeviceAddressRegionKHR *>( &callableShaderBindingTable ), width, height, depth ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdTraceRaysNV( m_commandBuffer, static_cast<VkBuffer>( raygenShaderBindingTableBuffer ), static_cast<VkDeviceSize>( raygenShaderBindingOffset ), static_cast<VkBuffer>( missShaderBindingTableBuffer ), static_cast<VkDeviceSize>( missShaderBindingOffset ), static_cast<VkDeviceSize>( missShaderBindingStride ), static_cast<VkBuffer>( hitShaderBindingTableBuffer ), static_cast<VkDeviceSize>( hitShaderBindingOffset ), static_cast<VkDeviceSize>( hitShaderBindingStride ), static_cast<VkBuffer>( callableShaderBindingTableBuffer ), static_cast<VkDeviceSize>( callableShaderBindingOffset ), static_cast<VkDeviceSize>( callableShaderBindingStride ), width, height, depth ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdTraceRaysNV( m_commandBuffer, static_cast<VkBuffer>( raygenShaderBindingTableBuffer ), static_cast<VkDeviceSize>( raygenShaderBindingOffset ), static_cast<VkBuffer>( missShaderBindingTableBuffer ), static_cast<VkDeviceSize>( missShaderBindingOffset ), static_cast<VkDeviceSize>( missShaderBindingStride ), static_cast<VkBuffer>( hitShaderBindingTableBuffer ), static_cast<VkDeviceSize>( hitShaderBindingOffset ), static_cast<VkDeviceSize>( hitShaderBindingStride ), static_cast<VkBuffer>( callableShaderBindingTableBuffer ), static_cast<VkDeviceSize>( callableShaderBindingOffset ), static_cast<VkDeviceSize>( callableShaderBindingStride ), width, height, depth ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdUpdateBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), static_cast<VkDeviceSize>( dataSize ), pData ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy<const T> const &data, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename T, typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy<const T> const & data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdUpdateBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), data.size() * sizeof( T ) , reinterpret_cast<const void*>( data.data() ) ); + d.vkCmdUpdateBuffer( m_commandBuffer, static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), data.size() * sizeof( T ), reinterpret_cast<const void *>( data.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWaitEvents( m_commandBuffer, eventCount, reinterpret_cast<const VkEvent*>( pEvents ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier*>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier*>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier*>( pImageMemoryBarriers ) ); + d.vkCmdWaitEvents( m_commandBuffer, eventCount, reinterpret_cast<const VkEvent *>( pEvents ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarrierCount, reinterpret_cast<const VkMemoryBarrier *>( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast<const VkBufferMemoryBarrier *>( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast<const VkImageMemoryBarrier *>( pImageMemoryBarriers ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::waitEvents( ArrayProxy<const VULKAN_HPP_NAMESPACE::Event> const &events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const &memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const &bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const &imageMemoryBarriers, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( ArrayProxy<const VULKAN_HPP_NAMESPACE::Event> const & events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy<const VULKAN_HPP_NAMESPACE::MemoryBarrier> const & memoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier> const & bufferMemoryBarriers, ArrayProxy<const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier> const & imageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWaitEvents( m_commandBuffer, events.size() , reinterpret_cast<const VkEvent*>( events.data() ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarriers.size() , reinterpret_cast<const VkMemoryBarrier*>( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast<const VkBufferMemoryBarrier*>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast<const VkImageMemoryBarrier*>( imageMemoryBarriers.data() ) ); + d.vkCmdWaitEvents( m_commandBuffer, events.size(), reinterpret_cast<const VkEvent *>( events.data() ), static_cast<VkPipelineStageFlags>( srcStageMask ), static_cast<VkPipelineStageFlags>( dstStageMask ), memoryBarriers.size(), reinterpret_cast<const VkMemoryBarrier *>( memoryBarriers.data() ), bufferMemoryBarriers.size(), reinterpret_cast<const VkBufferMemoryBarrier *>( bufferMemoryBarriers.data() ), imageMemoryBarriers.size(), reinterpret_cast<const VkImageMemoryBarrier *>( imageMemoryBarriers.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureKHR*>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); + d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureKHR *>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructures.size() , reinterpret_cast<const VkAccelerationStructureKHR*>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); + d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructures.size(), reinterpret_cast<const VkAccelerationStructureKHR *>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureKHR*>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); + d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureNV *>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureNV> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructures.size() , reinterpret_cast<const VkAccelerationStructureKHR*>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); + d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructures.size(), reinterpret_cast<const VkAccelerationStructureNV *>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), static_cast<VkQueryPool>( queryPool ), firstQuery ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdWriteBufferMarkerAMD( m_commandBuffer, static_cast<VkPipelineStageFlagBits>( pipelineStage ), static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), marker ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdWriteBufferMarkerAMD( m_commandBuffer, static_cast<VkPipelineStageFlagBits>( pipelineStage ), static_cast<VkBuffer>( dstBuffer ), static_cast<VkDeviceSize>( dstOffset ), marker ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkCmdWriteTimestamp( m_commandBuffer, static_cast<VkPipelineStageFlagBits>( pipelineStage ), static_cast<VkQueryPool>( queryPool ), query ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkCmdWriteTimestamp( m_commandBuffer, static_cast<VkPipelineStageFlagBits>( pipelineStage ), static_cast<VkQueryPool>( queryPool ), query ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::end(Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::end( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkEndCommandBuffer( m_commandBuffer ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::end(Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::end( Dispatch const & d ) const { Result result = static_cast<Result>( d.vkEndCommandBuffer( m_commandBuffer ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::end" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkResetCommandBuffer( m_commandBuffer, static_cast<VkCommandBufferResetFlags>( flags ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkResetCommandBuffer( m_commandBuffer, static_cast<VkCommandBufferResetFlags>( flags ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::reset" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VK_USE_PLATFORM_WIN32_KHR #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkAcquireFullScreenExclusiveModeEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkAcquireFullScreenExclusiveModeEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireFullScreenExclusiveModeEXT" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireFullScreenExclusiveModeEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHR*>( pAcquireInfo ), pImageIndex ) ); + return static_cast<Result>( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHR *>( pAcquireInfo ), pImageIndex ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const & d ) const { uint32_t imageIndex; - Result result = static_cast<Result>( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHR*>( &acquireInfo ), &imageIndex ) ); - return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireNextImage2KHR", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } ); + Result result = static_cast<Result>( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast<const VkAcquireNextImageInfoKHR *>( &acquireInfo ), &imageIndex ) ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eNotReady, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkAcquireNextImageKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), timeout, static_cast<VkSemaphore>( semaphore ), static_cast<VkFence>( fence ), pImageIndex ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<uint32_t> Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const { uint32_t imageIndex; Result result = static_cast<Result>( d.vkAcquireNextImageKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), timeout, static_cast<VkSemaphore>( semaphore ), static_cast<VkFence>( fence ), &imageIndex ) ); - return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireNextImageKHR", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImageKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eNotReady, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast<const VkPerformanceConfigurationAcquireInfoINTEL*>( pAcquireInfo ), reinterpret_cast<VkPerformanceConfigurationINTEL*>( pConfiguration ) ) ); + return static_cast<Result>( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast<const VkPerformanceConfigurationAcquireInfoINTEL *>( pAcquireInfo ), reinterpret_cast< VkPerformanceConfigurationINTEL *>( pConfiguration ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL>::type Device::acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL>::type Device::acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration; + Result result = static_cast<Result>( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast<const VkPerformanceConfigurationAcquireInfoINTEL *>( &acquireInfo ), reinterpret_cast<VkPerformanceConfigurationINTEL *>( &configuration ) ) ); + return createResultValue( result, configuration, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTEL" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL, Dispatch>>::type Device::acquirePerformanceConfigurationINTELUnique( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration; - Result result = static_cast<Result>( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast<const VkPerformanceConfigurationAcquireInfoINTEL*>( &acquireInfo ), reinterpret_cast<VkPerformanceConfigurationINTEL*>( &configuration ) ) ); - return createResultValue( result, configuration, VULKAN_HPP_NAMESPACE_STRING"::Device::acquirePerformanceConfigurationINTEL" ); + Result result = static_cast<Result>( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast<const VkPerformanceConfigurationAcquireInfoINTEL *>( &acquireInfo ), reinterpret_cast<VkPerformanceConfigurationINTEL *>( &configuration ) ) ); + ObjectRelease<Device, Dispatch> deleter( *this, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL, Dispatch>( result, configuration, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTELUnique", deleter ); } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast<const VkAcquireProfilingLockInfoKHR*>( pInfo ) ) ); + return static_cast<Result>( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast<const VkAcquireProfilingLockInfoKHR *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast<const VkAcquireProfilingLockInfoKHR*>( &info ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireProfilingLockKHR" ); + Result result = static_cast<Result>( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast<const VkAcquireProfilingLockInfoKHR *>( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( pAllocateInfo ), reinterpret_cast<VkCommandBuffer*>( pCommandBuffers ) ) ); + return static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo *>( pAllocateInfo ), reinterpret_cast< VkCommandBuffer *>( pCommandBuffers ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d ) const + template <typename CommandBufferAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<CommandBuffer, CommandBufferAllocator>>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d ) const { - std::vector<CommandBuffer,Allocator> commandBuffers( allocateInfo.commandBufferCount ); - Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkCommandBuffer*>( commandBuffers.data() ) ) ); - return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateCommandBuffers" ); + std::vector<CommandBuffer, CommandBufferAllocator> commandBuffers( allocateInfo.commandBufferCount ); + Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkCommandBuffer *>( commandBuffers.data() ) ) ); + return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CommandBuffer>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<CommandBuffer,Allocator>>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename CommandBufferAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CommandBuffer>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<CommandBuffer, CommandBufferAllocator>>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d ) const { - std::vector<CommandBuffer,Allocator> commandBuffers( allocateInfo.commandBufferCount, vectorAllocator ); - Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkCommandBuffer*>( commandBuffers.data() ) ) ); - return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateCommandBuffers" ); + std::vector<CommandBuffer, CommandBufferAllocator> commandBuffers( allocateInfo.commandBufferCount, commandBufferAllocator ); + Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkCommandBuffer *>( commandBuffers.data() ) ) ); + return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<CommandBuffer,Dispatch>,Allocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename CommandBufferAllocator> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d ) const { - std::vector<UniqueHandle<CommandBuffer, Dispatch>, Allocator> uniqueCommandBuffers; + std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator> uniqueCommandBuffers; std::vector<CommandBuffer> commandBuffers( allocateInfo.commandBufferCount ); - Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkCommandBuffer*>(commandBuffers.data()) ) ); + Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkCommandBuffer *>( commandBuffers.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); - PoolFree<Device,CommandPool,Dispatch> deleter( *this, allocateInfo.commandPool, d ); - for ( size_t i=0 ; i<allocateInfo.commandBufferCount ; i++ ) + PoolFree<Device, CommandPool, Dispatch> deleter( *this, allocateInfo.commandPool, d ); + for ( size_t i=0; i < allocateInfo.commandBufferCount; i++ ) { uniqueCommandBuffers.push_back( UniqueHandle<CommandBuffer, Dispatch>( commandBuffers[i], deleter ) ); } } - return createResultValue( result, std::move( uniqueCommandBuffers ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<CommandBuffer, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<CommandBuffer,Dispatch>,Allocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename CommandBufferAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<CommandBuffer, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<CommandBuffer, Dispatch>, Allocator> uniqueCommandBuffers( vectorAllocator ); + std::vector<UniqueHandle<CommandBuffer, Dispatch>, CommandBufferAllocator> uniqueCommandBuffers( commandBufferAllocator ); std::vector<CommandBuffer> commandBuffers( allocateInfo.commandBufferCount ); - Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkCommandBuffer*>(commandBuffers.data()) ) ); + Result result = static_cast<Result>( d.vkAllocateCommandBuffers( m_device, reinterpret_cast<const VkCommandBufferAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkCommandBuffer *>( commandBuffers.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); - PoolFree<Device,CommandPool,Dispatch> deleter( *this, allocateInfo.commandPool, d ); - for ( size_t i=0 ; i<allocateInfo.commandBufferCount ; i++ ) + PoolFree<Device, CommandPool, Dispatch> deleter( *this, allocateInfo.commandPool, d ); + for ( size_t i=0; i < allocateInfo.commandBufferCount; i++ ) { uniqueCommandBuffers.push_back( UniqueHandle<CommandBuffer, Dispatch>( commandBuffers[i], deleter ) ); } } - return createResultValue( result, std::move( uniqueCommandBuffers ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( pAllocateInfo ), reinterpret_cast<VkDescriptorSet*>( pDescriptorSets ) ) ); + return static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo *>( pAllocateInfo ), reinterpret_cast< VkDescriptorSet *>( pDescriptorSets ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d ) const + template <typename DescriptorSetAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<DescriptorSet, DescriptorSetAllocator>>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d ) const { - std::vector<DescriptorSet,Allocator> descriptorSets( allocateInfo.descriptorSetCount ); - Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkDescriptorSet*>( descriptorSets.data() ) ) ); - return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateDescriptorSets" ); + std::vector<DescriptorSet, DescriptorSetAllocator> descriptorSets( allocateInfo.descriptorSetCount ); + Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkDescriptorSet *>( descriptorSets.data() ) ) ); + return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DescriptorSet>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<DescriptorSet,Allocator>>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DescriptorSetAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DescriptorSet>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<DescriptorSet, DescriptorSetAllocator>>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d ) const { - std::vector<DescriptorSet,Allocator> descriptorSets( allocateInfo.descriptorSetCount, vectorAllocator ); - Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkDescriptorSet*>( descriptorSets.data() ) ) ); - return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateDescriptorSets" ); + std::vector<DescriptorSet, DescriptorSetAllocator> descriptorSets( allocateInfo.descriptorSetCount, descriptorSetAllocator ); + Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkDescriptorSet *>( descriptorSets.data() ) ) ); + return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<DescriptorSet,Dispatch>,Allocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename DescriptorSetAllocator> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d ) const { - std::vector<UniqueHandle<DescriptorSet, Dispatch>, Allocator> uniqueDescriptorSets; + std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator> uniqueDescriptorSets; std::vector<DescriptorSet> descriptorSets( allocateInfo.descriptorSetCount ); - Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkDescriptorSet*>(descriptorSets.data()) ) ); + Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkDescriptorSet *>( descriptorSets.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); - PoolFree<Device,DescriptorPool,Dispatch> deleter( *this, allocateInfo.descriptorPool, d ); - for ( size_t i=0 ; i<allocateInfo.descriptorSetCount ; i++ ) + PoolFree<Device, DescriptorPool, Dispatch> deleter( *this, allocateInfo.descriptorPool, d ); + for ( size_t i=0; i < allocateInfo.descriptorSetCount; i++ ) { uniqueDescriptorSets.push_back( UniqueHandle<DescriptorSet, Dispatch>( descriptorSets[i], deleter ) ); } } - return createResultValue( result, std::move( uniqueDescriptorSets ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<DescriptorSet, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<DescriptorSet,Dispatch>,Allocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename DescriptorSetAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<DescriptorSet, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<DescriptorSet, Dispatch>, Allocator> uniqueDescriptorSets( vectorAllocator ); + std::vector<UniqueHandle<DescriptorSet, Dispatch>, DescriptorSetAllocator> uniqueDescriptorSets( descriptorSetAllocator ); std::vector<DescriptorSet> descriptorSets( allocateInfo.descriptorSetCount ); - Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo*>( &allocateInfo ), reinterpret_cast<VkDescriptorSet*>(descriptorSets.data()) ) ); + Result result = static_cast<Result>( d.vkAllocateDescriptorSets( m_device, reinterpret_cast<const VkDescriptorSetAllocateInfo *>( &allocateInfo ), reinterpret_cast<VkDescriptorSet *>( descriptorSets.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); - PoolFree<Device,DescriptorPool,Dispatch> deleter( *this, allocateInfo.descriptorPool, d ); - for ( size_t i=0 ; i<allocateInfo.descriptorSetCount ; i++ ) + PoolFree<Device, DescriptorPool, Dispatch> deleter( *this, allocateInfo.descriptorPool, d ); + for ( size_t i=0; i < allocateInfo.descriptorSetCount; i++ ) { uniqueDescriptorSets.push_back( UniqueHandle<DescriptorSet, Dispatch>( descriptorSets[i], deleter ) ); } } - return createResultValue( result, std::move( uniqueDescriptorSets ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo*>( pAllocateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDeviceMemory*>( pMemory ) ) ); + return static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo *>( pAllocateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDeviceMemory *>( pMemory ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceMemory>::type Device::allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceMemory>::type Device::allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeviceMemory memory; - Result result = static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo*>( &allocateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeviceMemory*>( &memory ) ) ); - return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateMemory" ); + Result result = static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo *>( &allocateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeviceMemory *>( &memory ) ) ); + return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemory" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DeviceMemory,Dispatch>>::type Device::allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DeviceMemory, Dispatch>>::type Device::allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeviceMemory memory; - Result result = static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo*>( &allocateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeviceMemory*>( &memory ) ) ); - - ObjectFree<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DeviceMemory,Dispatch>( result, memory, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateMemoryUnique", deleter ); + Result result = static_cast<Result>( d.vkAllocateMemory( m_device, reinterpret_cast<const VkMemoryAllocateInfo *>( &allocateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeviceMemory *>( &memory ) ) ); + ObjectFree<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DeviceMemory, Dispatch>( result, memory, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemoryUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindAccelerationStructureMemoryKHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - return static_cast<Result>( d.vkBindAccelerationStructureMemoryKHR( m_device, bindInfoCount, reinterpret_cast<const VkBindAccelerationStructureMemoryInfoKHR*>( pBindInfos ) ) ); - } -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindAccelerationStructureMemoryKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR> const &bindInfos, Dispatch const &d ) const - { - Result result = static_cast<Result>( d.vkBindAccelerationStructureMemoryKHR( m_device, bindInfos.size() , reinterpret_cast<const VkBindAccelerationStructureMemoryInfoKHR*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindAccelerationStructureMemoryKHR" ); - } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfoCount, reinterpret_cast<const VkBindAccelerationStructureMemoryInfoKHR*>( pBindInfos ) ) ); + return static_cast<Result>( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfoCount, reinterpret_cast<const VkBindAccelerationStructureMemoryInfoNV *>( pBindInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindAccelerationStructureMemoryNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoKHR> const &bindInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindAccelerationStructureMemoryNV( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV> const & bindInfos, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfos.size() , reinterpret_cast<const VkBindAccelerationStructureMemoryInfoKHR*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindAccelerationStructureMemoryNV" ); + Result result = static_cast<Result>( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfos.size(), reinterpret_cast<const VkBindAccelerationStructureMemoryInfoNV *>( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkBindBufferMemory( m_device, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( memoryOffset ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkBindBufferMemory( m_device, static_cast<VkBuffer>( buffer ), static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( memoryOffset ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBindBufferMemory2( m_device, bindInfoCount, reinterpret_cast<const VkBindBufferMemoryInfo*>( pBindInfos ) ) ); + return static_cast<Result>( d.vkBindBufferMemory2( m_device, bindInfoCount, reinterpret_cast<const VkBindBufferMemoryInfo *>( pBindInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const &bindInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const & bindInfos, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBindBufferMemory2( m_device, bindInfos.size() , reinterpret_cast<const VkBindBufferMemoryInfo*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory2" ); + Result result = static_cast<Result>( d.vkBindBufferMemory2( m_device, bindInfos.size(), reinterpret_cast<const VkBindBufferMemoryInfo *>( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBindBufferMemory2KHR( m_device, bindInfoCount, reinterpret_cast<const VkBindBufferMemoryInfo*>( pBindInfos ) ) ); + return static_cast<Result>( d.vkBindBufferMemory2KHR( m_device, bindInfoCount, reinterpret_cast<const VkBindBufferMemoryInfo *>( pBindInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const &bindInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindBufferMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo> const & bindInfos, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBindBufferMemory2KHR( m_device, bindInfos.size() , reinterpret_cast<const VkBindBufferMemoryInfo*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory2KHR" ); + Result result = static_cast<Result>( d.vkBindBufferMemory2KHR( m_device, bindInfos.size(), reinterpret_cast<const VkBindBufferMemoryInfo *>( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkBindImageMemory( m_device, static_cast<VkImage>( image ), static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( memoryOffset ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkBindImageMemory( m_device, static_cast<VkImage>( image ), static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( memoryOffset ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBindImageMemory2( m_device, bindInfoCount, reinterpret_cast<const VkBindImageMemoryInfo*>( pBindInfos ) ) ); + return static_cast<Result>( d.vkBindImageMemory2( m_device, bindInfoCount, reinterpret_cast<const VkBindImageMemoryInfo *>( pBindInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const &bindInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory2( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const & bindInfos, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBindImageMemory2( m_device, bindInfos.size() , reinterpret_cast<const VkBindImageMemoryInfo*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory2" ); + Result result = static_cast<Result>( d.vkBindImageMemory2( m_device, bindInfos.size(), reinterpret_cast<const VkBindImageMemoryInfo *>( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBindImageMemory2KHR( m_device, bindInfoCount, reinterpret_cast<const VkBindImageMemoryInfo*>( pBindInfos ) ) ); + return static_cast<Result>( d.vkBindImageMemory2KHR( m_device, bindInfoCount, reinterpret_cast<const VkBindImageMemoryInfo *>( pBindInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const &bindInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::bindImageMemory2KHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo> const & bindInfos, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkBindImageMemory2KHR( m_device, bindInfos.size() , reinterpret_cast<const VkBindImageMemoryInfo*>( bindInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory2KHR" ); + Result result = static_cast<Result>( d.vkBindImageMemory2KHR( m_device, bindInfos.size(), reinterpret_cast<const VkBindImageMemoryInfo *>( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::buildAccelerationStructureKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const * ppOffsetInfos, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkBuildAccelerationStructureKHR( m_device, infoCount, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( pInfos ), reinterpret_cast<const VkAccelerationStructureBuildOffsetInfoKHR* const*>( ppOffsetInfos ) ) ); + return static_cast<Result>( d.vkBuildAccelerationStructuresKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), infoCount, reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( pInfos ), reinterpret_cast<const VkAccelerationStructureBuildRangeInfoKHR * const *>( ppBuildRangeInfos ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::buildAccelerationStructureKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const &infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildOffsetInfoKHR* const > const &pOffsetInfos, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE Result Device::buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR> const & infos, ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const > const & pBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( infos.size() == pOffsetInfos.size() ); + VULKAN_HPP_ASSERT( infos.size() == pBuildRangeInfos.size() ); #else - if ( infos.size() != pOffsetInfos.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkDevice::buildAccelerationStructureKHR: infos.size() != pOffsetInfos.size()" ); - } + if ( infos.size() != pBuildRangeInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - Result result = static_cast<Result>( d.vkBuildAccelerationStructureKHR( m_device, infos.size() , reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR*>( infos.data() ), reinterpret_cast<const VkAccelerationStructureBuildOffsetInfoKHR* const*>( pOffsetInfos.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::buildAccelerationStructureKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR } ); + + Result result = static_cast<Result>( d.vkBuildAccelerationStructuresKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), infos.size(), reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( infos.data() ), reinterpret_cast<const VkAccelerationStructureBuildRangeInfoKHR * const *>( pBuildRangeInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkCompileDeferredNV( m_device, static_cast<VkPipeline>( pipeline ), shader ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkCompileDeferredNV( m_device, static_cast<VkPipeline>( pipeline ), shader ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::compileDeferredNV" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::compileDeferredNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCopyAccelerationStructureKHR( m_device, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR*>( pInfo ) ) ); + return static_cast<Result>( d.vkCopyAccelerationStructureKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyAccelerationStructureInfoKHR *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureInfoKHR & info, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCopyAccelerationStructureKHR( m_device, reinterpret_cast<const VkCopyAccelerationStructureInfoKHR*>( &info ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::copyAccelerationStructureKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR } ); + Result result = static_cast<Result>( d.vkCopyAccelerationStructureKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyAccelerationStructureInfoKHR *>( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCopyAccelerationStructureToMemoryKHR( m_device, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR*>( pInfo ) ) ); + return static_cast<Result>( d.vkCopyAccelerationStructureToMemoryKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCopyAccelerationStructureToMemoryKHR( m_device, reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR*>( &info ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::copyAccelerationStructureToMemoryKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR } ); + Result result = static_cast<Result>( d.vkCopyAccelerationStructureToMemoryKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyAccelerationStructureToMemoryInfoKHR *>( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCopyMemoryToAccelerationStructureKHR( m_device, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR*>( pInfo ) ) ); + return static_cast<Result>( d.vkCopyMemoryToAccelerationStructureKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkCopyMemoryToAccelerationStructureKHR( m_device, reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR*>( &info ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::copyMemoryToAccelerationStructureKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR } ); + Result result = static_cast<Result>( d.vkCopyMemoryToAccelerationStructureKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), reinterpret_cast<const VkCopyMemoryToAccelerationStructureInfoKHR *>( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkAccelerationStructureKHR*>( pAccelerationStructure ) ) ); + return static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkAccelerationStructureKHR *>( pAccelerationStructure ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR>::type Device::createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR>::type Device::createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure; - Result result = static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureKHR*>( &accelerationStructure ) ) ); - return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureKHR" ); + Result result = static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureKHR *>( &accelerationStructure ) ) ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<AccelerationStructureKHR,Dispatch>>::type Device::createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR, Dispatch>>::type Device::createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure; - Result result = static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureKHR*>( &accelerationStructure ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<AccelerationStructureKHR,Dispatch>( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureKHR *>( &accelerationStructure ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::AccelerationStructureKHR, Dispatch>( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkAccelerationStructureNV*>( pAccelerationStructure ) ) ); + return static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkAccelerationStructureNV *>( pAccelerationStructure ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureNV>::type Device::createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AccelerationStructureNV>::type Device::createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure; - Result result = static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureNV*>( &accelerationStructure ) ) ); - return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureNV" ); + Result result = static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureNV *>( &accelerationStructure ) ) ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNV" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<AccelerationStructureNV,Dispatch>>::type Device::createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::AccelerationStructureNV, Dispatch>>::type Device::createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure; - Result result = static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureNV*>( &accelerationStructure ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<AccelerationStructureNV,Dispatch>( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureNVUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast<const VkAccelerationStructureCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkAccelerationStructureNV *>( &accelerationStructure ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::AccelerationStructureNV, Dispatch>( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNVUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkBuffer*>( pBuffer ) ) ); + return static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkBuffer *>( pBuffer ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Buffer>::type Device::createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Buffer>::type Device::createBuffer( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Buffer buffer; - Result result = static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBuffer*>( &buffer ) ) ); - return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createBuffer" ); + Result result = static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBuffer *>( &buffer ) ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createBuffer" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Buffer,Dispatch>>::type Device::createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Buffer, Dispatch>>::type Device::createBufferUnique( const BufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Buffer buffer; - Result result = static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBuffer*>( &buffer ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Buffer,Dispatch>( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateBuffer( m_device, reinterpret_cast<const VkBufferCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBuffer *>( &buffer ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Buffer, Dispatch>( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkBufferView*>( pView ) ) ); + return static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkBufferView *>( pView ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::BufferView>::type Device::createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::BufferView>::type Device::createBufferView( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::BufferView view; - Result result = static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBufferView*>( &view ) ) ); - return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferView" ); + Result result = static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBufferView *>( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferView" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<BufferView,Dispatch>>::type Device::createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::BufferView, Dispatch>>::type Device::createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::BufferView view; - Result result = static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBufferView*>( &view ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<BufferView,Dispatch>( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferViewUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateBufferView( m_device, reinterpret_cast<const VkBufferViewCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkBufferView *>( &view ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::BufferView, Dispatch>( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferViewUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkCommandPool*>( pCommandPool ) ) ); + return static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkCommandPool *>( pCommandPool ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::CommandPool>::type Device::createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::CommandPool>::type Device::createCommandPool( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::CommandPool commandPool; - Result result = static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkCommandPool*>( &commandPool ) ) ); - return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createCommandPool" ); + Result result = static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkCommandPool *>( &commandPool ) ) ); + return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPool" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<CommandPool,Dispatch>>::type Device::createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::CommandPool, Dispatch>>::type Device::createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::CommandPool commandPool; - Result result = static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkCommandPool*>( &commandPool ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<CommandPool,Dispatch>( result, commandPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createCommandPoolUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateCommandPool( m_device, reinterpret_cast<const VkCommandPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkCommandPool *>( &commandPool ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::CommandPool, Dispatch>( result, commandPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPoolUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkComputePipelineCreateInfo*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) ); + return static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkComputePipelineCreateInfo *>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename PipelineAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkComputePipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipelines", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkComputePipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size(), vectorAllocator ); - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkComputePipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipelines", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkComputePipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkComputePipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipeline", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkComputePipelineCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipeline", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename PipelineAllocator> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines; + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines; std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkComputePipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkComputePipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename PipelineAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines( vectorAllocator ); + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines( pipelineAllocator ); std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkComputePipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkComputePipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline,Dispatch>> Device::createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline, Dispatch>> Device::createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkComputePipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Pipeline,Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipelineUnique", { Result::eSuccess, Result::ePipelineCompileRequiredEXT }, deleter ); + Result result = static_cast<Result>( d.vkCreateComputePipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkComputePipelineCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<Pipeline, Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelineUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDeferredOperationKHR*>( pDeferredOperation ) ) ); + return static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDeferredOperationKHR *>( pDeferredOperation ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR>::type Device::createDeferredOperationKHR( Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeferredOperationKHR>::type Device::createDeferredOperationKHR( Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation; - Result result = static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeferredOperationKHR*>( &deferredOperation ) ) ); - return createResultValue( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING"::Device::createDeferredOperationKHR" ); + Result result = static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeferredOperationKHR *>( &deferredOperation ) ) ); + return createResultValue( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DeferredOperationKHR,Dispatch>>::type Device::createDeferredOperationKHRUnique( Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DeferredOperationKHR, Dispatch>>::type Device::createDeferredOperationKHRUnique( Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation; - Result result = static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeferredOperationKHR*>( &deferredOperation ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DeferredOperationKHR,Dispatch>( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING"::Device::createDeferredOperationKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDeferredOperationKHR *>( &deferredOperation ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DeferredOperationKHR, Dispatch>( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorPool*>( pDescriptorPool ) ) ); + return static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDescriptorPool *>( pDescriptorPool ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorPool>::type Device::createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorPool>::type Device::createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool; - Result result = static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorPool*>( &descriptorPool ) ) ); - return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorPool" ); + Result result = static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorPool *>( &descriptorPool ) ) ); + return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPool" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DescriptorPool,Dispatch>>::type Device::createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorPool, Dispatch>>::type Device::createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool; - Result result = static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorPool*>( &descriptorPool ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DescriptorPool,Dispatch>( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorPoolUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDescriptorPool( m_device, reinterpret_cast<const VkDescriptorPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorPool *>( &descriptorPool ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DescriptorPool, Dispatch>( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPoolUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorSetLayout*>( pSetLayout ) ) ); + return static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDescriptorSetLayout *>( pSetLayout ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorSetLayout>::type Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorSetLayout>::type Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorSetLayout setLayout; - Result result = static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorSetLayout*>( &setLayout ) ) ); - return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorSetLayout" ); + Result result = static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorSetLayout *>( &setLayout ) ) ); + return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayout" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DescriptorSetLayout,Dispatch>>::type Device::createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorSetLayout, Dispatch>>::type Device::createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorSetLayout setLayout; - Result result = static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorSetLayout*>( &setLayout ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DescriptorSetLayout,Dispatch>( result, setLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorSetLayoutUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorSetLayout *>( &setLayout ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DescriptorSetLayout, Dispatch>( result, setLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayoutUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorUpdateTemplate*>( pDescriptorUpdateTemplate ) ) ); + return static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDescriptorUpdateTemplate *>( pDescriptorUpdateTemplate ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type Device::createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type Device::createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; - Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate*>( &descriptorUpdateTemplate ) ) ); - return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplate" ); + Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate *>( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplate" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DescriptorUpdateTemplate,Dispatch>>::type Device::createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>>::type Device::createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; - Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate*>( &descriptorUpdateTemplate ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DescriptorUpdateTemplate,Dispatch>( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate *>( &descriptorUpdateTemplate ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDescriptorUpdateTemplate*>( pDescriptorUpdateTemplate ) ) ); + return static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDescriptorUpdateTemplate *>( pDescriptorUpdateTemplate ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate>::type Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; - Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate*>( &descriptorUpdateTemplate ) ) ); - return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateKHR" ); + Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate *>( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DescriptorUpdateTemplate,Dispatch>>::type Device::createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>>::type Device::createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; - Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate*>( &descriptorUpdateTemplate ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DescriptorUpdateTemplate,Dispatch>( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast<const VkDescriptorUpdateTemplateCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDescriptorUpdateTemplate *>( &descriptorUpdateTemplate ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate, Dispatch>( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkEvent*>( pEvent ) ) ); + return static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkEvent *>( pEvent ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Event>::type Device::createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Event>::type Device::createEvent( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Event event; - Result result = static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkEvent*>( &event ) ) ); - return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING"::Device::createEvent" ); + Result result = static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkEvent *>( &event ) ) ); + return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING "::Device::createEvent" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Event,Dispatch>>::type Device::createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Event, Dispatch>>::type Device::createEventUnique( const EventCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Event event; - Result result = static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkEvent*>( &event ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Event,Dispatch>( result, event, VULKAN_HPP_NAMESPACE_STRING"::Device::createEventUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateEvent( m_device, reinterpret_cast<const VkEventCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkEvent *>( &event ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Event, Dispatch>( result, event, VULKAN_HPP_NAMESPACE_STRING "::Device::createEventUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) ); + return static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::createFence( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::createFence" ); + Result result = static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::createFence" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type Device::createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type Device::createFenceUnique( const FenceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Fence,Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::createFenceUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateFence( m_device, reinterpret_cast<const VkFenceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Fence, Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::createFenceUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFramebuffer*>( pFramebuffer ) ) ); + return static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkFramebuffer *>( pFramebuffer ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Framebuffer>::type Device::createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Framebuffer>::type Device::createFramebuffer( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Framebuffer framebuffer; - Result result = static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFramebuffer*>( &framebuffer ) ) ); - return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createFramebuffer" ); + Result result = static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFramebuffer *>( &framebuffer ) ) ); + return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebuffer" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Framebuffer,Dispatch>>::type Device::createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Framebuffer, Dispatch>>::type Device::createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Framebuffer framebuffer; - Result result = static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFramebuffer*>( &framebuffer ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Framebuffer,Dispatch>( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createFramebufferUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateFramebuffer( m_device, reinterpret_cast<const VkFramebufferCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFramebuffer *>( &framebuffer ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Framebuffer, Dispatch>( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebufferUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) ); + return static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename PipelineAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipelines", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size(), vectorAllocator ); - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipelines", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipeline", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipeline", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename PipelineAllocator> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines; + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines; std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename PipelineAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines( vectorAllocator ); + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines( pipelineAllocator ); std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline,Dispatch>> Device::createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline, Dispatch>> Device::createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkGraphicsPipelineCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Pipeline,Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipelineUnique", { Result::eSuccess, Result::ePipelineCompileRequiredEXT }, deleter ); + Result result = static_cast<Result>( d.vkCreateGraphicsPipelines( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkGraphicsPipelineCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<Pipeline, Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelineUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkImage*>( pImage ) ) ); + return static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkImage *>( pImage ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Image>::type Device::createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Image>::type Device::createImage( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Image image; - Result result = static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImage*>( &image ) ) ); - return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING"::Device::createImage" ); + Result result = static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImage *>( &image ) ) ); + return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING "::Device::createImage" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Image,Dispatch>>::type Device::createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Image, Dispatch>>::type Device::createImageUnique( const ImageCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Image image; - Result result = static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImage*>( &image ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Image,Dispatch>( result, image, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateImage( m_device, reinterpret_cast<const VkImageCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImage *>( &image ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Image, Dispatch>( result, image, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkImageView*>( pView ) ) ); + return static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkImageView *>( pView ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageView>::type Device::createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageView>::type Device::createImageView( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageView view; - Result result = static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImageView*>( &view ) ) ); - return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageView" ); + Result result = static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImageView *>( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageView" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<ImageView,Dispatch>>::type Device::createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ImageView, Dispatch>>::type Device::createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageView view; - Result result = static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImageView*>( &view ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<ImageView,Dispatch>( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageViewUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateImageView( m_device, reinterpret_cast<const VkImageViewCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkImageView *>( &view ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::ImageView, Dispatch>( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageViewUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkIndirectCommandsLayoutNV*>( pIndirectCommandsLayout ) ) ); + return static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkIndirectCommandsLayoutNV *>( pIndirectCommandsLayout ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV>::type Device::createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV>::type Device::createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout; - Result result = static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkIndirectCommandsLayoutNV*>( &indirectCommandsLayout ) ) ); - return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createIndirectCommandsLayoutNV" ); + Result result = static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkIndirectCommandsLayoutNV *>( &indirectCommandsLayout ) ) ); + return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNV" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<IndirectCommandsLayoutNV,Dispatch>>::type Device::createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV, Dispatch>>::type Device::createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout; - Result result = static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkIndirectCommandsLayoutNV*>( &indirectCommandsLayout ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<IndirectCommandsLayoutNV,Dispatch>( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createIndirectCommandsLayoutNVUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast<const VkIndirectCommandsLayoutCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkIndirectCommandsLayoutNV *>( &indirectCommandsLayout ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV, Dispatch>( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNVUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipelineCache*>( pPipelineCache ) ) ); + return static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipelineCache *>( pPipelineCache ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineCache>::type Device::createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineCache>::type Device::createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache; - Result result = static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineCache*>( &pipelineCache ) ) ); - return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineCache" ); + Result result = static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineCache *>( &pipelineCache ) ) ); + return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCache" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<PipelineCache,Dispatch>>::type Device::createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PipelineCache, Dispatch>>::type Device::createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache; - Result result = static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineCache*>( &pipelineCache ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<PipelineCache,Dispatch>( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineCacheUnique", deleter ); + Result result = static_cast<Result>( d.vkCreatePipelineCache( m_device, reinterpret_cast<const VkPipelineCacheCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineCache *>( &pipelineCache ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::PipelineCache, Dispatch>( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCacheUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipelineLayout*>( pPipelineLayout ) ) ); + return static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipelineLayout *>( pPipelineLayout ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineLayout>::type Device::createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PipelineLayout>::type Device::createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout; - Result result = static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineLayout*>( &pipelineLayout ) ) ); - return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineLayout" ); + Result result = static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineLayout *>( &pipelineLayout ) ) ); + return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayout" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<PipelineLayout,Dispatch>>::type Device::createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PipelineLayout, Dispatch>>::type Device::createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout; - Result result = static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineLayout*>( &pipelineLayout ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<PipelineLayout,Dispatch>( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineLayoutUnique", deleter ); + Result result = static_cast<Result>( d.vkCreatePipelineLayout( m_device, reinterpret_cast<const VkPipelineLayoutCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipelineLayout *>( &pipelineLayout ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::PipelineLayout, Dispatch>( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayoutUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPrivateDataSlotEXT*>( pPrivateDataSlot ) ) ); + return static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPrivateDataSlotEXT *>( pPrivateDataSlot ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT>::type Device::createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT>::type Device::createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot; - Result result = static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPrivateDataSlotEXT*>( &privateDataSlot ) ) ); - return createResultValue( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING"::Device::createPrivateDataSlotEXT" ); + Result result = static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPrivateDataSlotEXT *>( &privateDataSlot ) ) ); + return createResultValue( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<PrivateDataSlotEXT,Dispatch>>::type Device::createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT, Dispatch>>::type Device::createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot; - Result result = static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPrivateDataSlotEXT*>( &privateDataSlot ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<PrivateDataSlotEXT,Dispatch>( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING"::Device::createPrivateDataSlotEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast<const VkPrivateDataSlotCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPrivateDataSlotEXT *>( &privateDataSlot ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT, Dispatch>( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkQueryPool*>( pQueryPool ) ) ); + return static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkQueryPool *>( pQueryPool ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::QueryPool>::type Device::createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::QueryPool>::type Device::createQueryPool( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::QueryPool queryPool; - Result result = static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkQueryPool*>( &queryPool ) ) ); - return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createQueryPool" ); + Result result = static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkQueryPool *>( &queryPool ) ) ); + return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPool" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<QueryPool,Dispatch>>::type Device::createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::QueryPool, Dispatch>>::type Device::createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::QueryPool queryPool; - Result result = static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkQueryPool*>( &queryPool ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<QueryPool,Dispatch>( result, queryPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createQueryPoolUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateQueryPool( m_device, reinterpret_cast<const VkQueryPoolCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkQueryPool *>( &queryPool ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::QueryPool, Dispatch>( result, queryPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPoolUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) ); + return static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename PipelineAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelinesKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size(), vectorAllocator ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelinesKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelineKHR", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR, Result::ePipelineCompileRequiredEXT } ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename PipelineAllocator> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines; + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines; std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) || ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename PipelineAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines( vectorAllocator ); + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines( pipelineAllocator ); std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR ) || ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline,Dispatch>> Device::createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline, Dispatch>> Device::createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Pipeline,Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelineKHRUnique", { Result::eSuccess, Result::eOperationDeferredKHR, Result::eOperationNotDeferredKHR, Result::ePipelineCompileRequiredEXT }, deleter ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast<VkDeferredOperationKHR>( deferredOperation ), static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkRayTracingPipelineCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<Pipeline, Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkPipeline*>( pPipelines ) ) ); + return static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfoCount, reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename PipelineAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelinesNV", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline,Allocator>> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Pipeline>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<Pipeline, PipelineAllocator>> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<Pipeline,Allocator> pipelines( createInfos.size(), vectorAllocator ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( pipelines.data() ) ) ); - return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelinesNV", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + std::vector<Pipeline, PipelineAllocator> pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<Pipeline> Device::createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelineNV", { Result::eSuccess, Result::ePipelineCompileRequiredEXT } ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename PipelineAllocator> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines; + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines; std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline,Dispatch>,Allocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename PipelineAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<Pipeline, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV> const & createInfos, Optional<const AllocationCallbacks> allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<Pipeline, Dispatch>, Allocator> uniquePipelines( vectorAllocator ); + std::vector<UniqueHandle<Pipeline, Dispatch>, PipelineAllocator> uniquePipelines( pipelineAllocator ); std::vector<Pipeline> pipelines( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size() , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>(pipelines.data()) ) ); - if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) || ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), createInfos.size(), reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) { uniquePipelines.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { uniquePipelines.push_back( UniqueHandle<Pipeline, Dispatch>( pipelines[i], deleter ) ); } } - return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline,Dispatch>> Device::createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<UniqueHandle<Pipeline, Dispatch>> Device::createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { Pipeline pipeline; - Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1 , reinterpret_cast<const VkRayTracingPipelineCreateInfoNV*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline*>( &pipeline ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Pipeline,Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRayTracingPipelineNVUnique", { Result::eSuccess, Result::ePipelineCompileRequiredEXT }, deleter ); + Result result = static_cast<Result>( d.vkCreateRayTracingPipelinesNV( m_device, static_cast<VkPipelineCache>( pipelineCache ), 1, reinterpret_cast<const VkRayTracingPipelineCreateInfoNV *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkPipeline *>( &pipeline ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<Pipeline, Dispatch>( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkRenderPass*>( pRenderPass ) ) ); + return static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass" ); + Result result = static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type Device::createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type Device::createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<RenderPass,Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPassUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateRenderPass( m_device, reinterpret_cast<const VkRenderPassCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPassUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkRenderPass*>( pRenderPass ) ) ); + return static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2" ); + Result result = static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type Device::createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type Device::createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<RenderPass,Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2Unique", deleter ); + Result result = static_cast<Result>( d.vkCreateRenderPass2( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2Unique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkRenderPass*>( pRenderPass ) ) ); + return static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RenderPass>::type Device::createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2KHR" ); + Result result = static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<RenderPass,Dispatch>>::type Device::createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>>::type Device::createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RenderPass renderPass; - Result result = static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass*>( &renderPass ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<RenderPass,Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2KHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast<const VkRenderPassCreateInfo2 *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkRenderPass *>( &renderPass ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::RenderPass, Dispatch>( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSampler*>( pSampler ) ) ); + return static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSampler *>( pSampler ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Sampler>::type Device::createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Sampler>::type Device::createSampler( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Sampler sampler; - Result result = static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSampler*>( &sampler ) ) ); - return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING"::Device::createSampler" ); + Result result = static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSampler *>( &sampler ) ) ); + return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING "::Device::createSampler" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Sampler,Dispatch>>::type Device::createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Sampler, Dispatch>>::type Device::createSamplerUnique( const SamplerCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Sampler sampler; - Result result = static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSampler*>( &sampler ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Sampler,Dispatch>( result, sampler, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSampler( m_device, reinterpret_cast<const VkSamplerCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSampler *>( &sampler ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Sampler, Dispatch>( result, sampler, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSamplerYcbcrConversion*>( pYcbcrConversion ) ) ); + return static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSamplerYcbcrConversion *>( pYcbcrConversion ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type Device::createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type Device::createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; - Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion*>( &ycbcrConversion ) ) ); - return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversion" ); + Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion *>( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversion" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SamplerYcbcrConversion,Dispatch>>::type Device::createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>>::type Device::createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; - Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion*>( &ycbcrConversion ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SamplerYcbcrConversion,Dispatch>( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion *>( &ycbcrConversion ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSamplerYcbcrConversion*>( pYcbcrConversion ) ) ); + return static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSamplerYcbcrConversion *>( pYcbcrConversion ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type Device::createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion>::type Device::createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; - Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion*>( &ycbcrConversion ) ) ); - return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionKHR" ); + Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion *>( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SamplerYcbcrConversion,Dispatch>>::type Device::createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>>::type Device::createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; - Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion*>( &ycbcrConversion ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SamplerYcbcrConversion,Dispatch>( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast<const VkSamplerYcbcrConversionCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSamplerYcbcrConversion *>( &ycbcrConversion ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion, Dispatch>( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSemaphore*>( pSemaphore ) ) ); + return static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSemaphore *>( pSemaphore ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Semaphore>::type Device::createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Semaphore>::type Device::createSemaphore( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Semaphore semaphore; - Result result = static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSemaphore*>( &semaphore ) ) ); - return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING"::Device::createSemaphore" ); + Result result = static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSemaphore *>( &semaphore ) ) ); + return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphore" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Semaphore,Dispatch>>::type Device::createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Semaphore, Dispatch>>::type Device::createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Semaphore semaphore; - Result result = static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSemaphore*>( &semaphore ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Semaphore,Dispatch>( result, semaphore, VULKAN_HPP_NAMESPACE_STRING"::Device::createSemaphoreUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSemaphore( m_device, reinterpret_cast<const VkSemaphoreCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSemaphore *>( &semaphore ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Semaphore, Dispatch>( result, semaphore, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphoreUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkShaderModule*>( pShaderModule ) ) ); + return static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkShaderModule *>( pShaderModule ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ShaderModule>::type Device::createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ShaderModule>::type Device::createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ShaderModule shaderModule; - Result result = static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkShaderModule*>( &shaderModule ) ) ); - return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING"::Device::createShaderModule" ); + Result result = static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkShaderModule *>( &shaderModule ) ) ); + return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModule" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<ShaderModule,Dispatch>>::type Device::createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ShaderModule, Dispatch>>::type Device::createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ShaderModule shaderModule; - Result result = static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkShaderModule*>( &shaderModule ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<ShaderModule,Dispatch>( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING"::Device::createShaderModuleUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateShaderModule( m_device, reinterpret_cast<const VkShaderModuleCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkShaderModule *>( &shaderModule ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::ShaderModule, Dispatch>( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModuleUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, swapchainCount, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSwapchainKHR*>( pSwapchains ) ) ); + return static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, swapchainCount, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( pCreateInfos ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSwapchainKHR *>( pSwapchains ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type Device::createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename SwapchainKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<SwapchainKHR, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<SwapchainKHR,Allocator> swapchains( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( swapchains.data() ) ) ); - return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainsKHR" ); + std::vector<SwapchainKHR, SwapchainKHRAllocator> swapchains( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast<const VkSwapchainCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( swapchains.data() ) ) ); + return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SwapchainKHR>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<SwapchainKHR,Allocator>>::type Device::createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SwapchainKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SwapchainKHR>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<SwapchainKHR, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d ) const { - std::vector<SwapchainKHR,Allocator> swapchains( createInfos.size(), vectorAllocator ); - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( swapchains.data() ) ) ); - return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainsKHR" ); + std::vector<SwapchainKHR, SwapchainKHRAllocator> swapchains( createInfos.size(), swapchainKHRAllocator ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast<const VkSwapchainCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( swapchains.data() ) ) ); + return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<SwapchainKHR>::type Device::createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<SwapchainKHR>::type Device::createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { SwapchainKHR swapchain; - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, 1 , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) ); - return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainKHR" ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, 1, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch, typename Allocator > - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR,Dispatch>,Allocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch, typename SwapchainKHRAllocator> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { - std::vector<UniqueHandle<SwapchainKHR, Dispatch>, Allocator> uniqueSwapchainKHRs; - std::vector<SwapchainKHR> swapchainKHRs( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>(swapchainKHRs.data()) ) ); + std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator> uniqueSwapchains; + std::vector<SwapchainKHR> swapchains( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast<const VkSwapchainCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( swapchains.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { - uniqueSwapchainKHRs.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + uniqueSwapchains.reserve( createInfos.size() ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { - uniqueSwapchainKHRs.push_back( UniqueHandle<SwapchainKHR, Dispatch>( swapchainKHRs[i], deleter ) ); + uniqueSwapchains.push_back( UniqueHandle<SwapchainKHR, Dispatch>( swapchains[i], deleter ) ); } } - - return createResultValue( result, std::move( uniqueSwapchainKHRs ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + return createResultValue( result, std::move( uniqueSwapchains ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); } - template<typename Dispatch, typename Allocator , typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<SwapchainKHR, Dispatch>>::value, int>::type> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR,Dispatch>,Allocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const &createInfos, Optional<const AllocationCallbacks> allocator, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Dispatch, typename SwapchainKHRAllocator, typename B, typename std::enable_if<std::is_same<typename B::value_type, UniqueHandle<SwapchainKHR, Dispatch>>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR> const & createInfos, Optional<const AllocationCallbacks> allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d ) const { - std::vector<UniqueHandle<SwapchainKHR, Dispatch>, Allocator> uniqueSwapchainKHRs( vectorAllocator ); - std::vector<SwapchainKHR> swapchainKHRs( createInfos.size() ); - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>(swapchainKHRs.data()) ) ); + std::vector<UniqueHandle<SwapchainKHR, Dispatch>, SwapchainKHRAllocator> uniqueSwapchains( swapchainKHRAllocator ); + std::vector<SwapchainKHR> swapchains( createInfos.size() ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast<const VkSwapchainCreateInfoKHR *>( createInfos.data() ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( swapchains.data() ) ) ); if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) { - uniqueSwapchainKHRs.reserve( createInfos.size() ); - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - for ( size_t i=0 ; i<createInfos.size() ; i++ ) + uniqueSwapchains.reserve( createInfos.size() ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) { - uniqueSwapchainKHRs.push_back( UniqueHandle<SwapchainKHR, Dispatch>( swapchainKHRs[i], deleter ) ); + uniqueSwapchains.push_back( UniqueHandle<SwapchainKHR, Dispatch>( swapchains[i], deleter ) ); } } - - return createResultValue( result, std::move( uniqueSwapchainKHRs ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + return createResultValue( result, std::move( uniqueSwapchains ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); } - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SwapchainKHR,Dispatch>>::type Device::createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SwapchainKHR, Dispatch>>::type Device::createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { SwapchainKHR swapchain; - Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, 1 , reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SwapchainKHR,Dispatch>( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSharedSwapchainsKHR( m_device, 1, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( &swapchain ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<SwapchainKHR, Dispatch>( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSwapchainKHR*>( pSwapchain ) ) ); + return static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSwapchainKHR *>( pSwapchain ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SwapchainKHR>::type Device::createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SwapchainKHR>::type Device::createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain; - Result result = static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) ); - return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSwapchainKHR" ); + Result result = static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SwapchainKHR,Dispatch>>::type Device::createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SwapchainKHR, Dispatch>>::type Device::createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain; - Result result = static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR*>( &swapchain ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SwapchainKHR,Dispatch>( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSwapchainKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateSwapchainKHR( m_device, reinterpret_cast<const VkSwapchainCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSwapchainKHR *>( &swapchain ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SwapchainKHR, Dispatch>( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkValidationCacheEXT*>( pValidationCache ) ) ); + return static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkValidationCacheEXT *>( pValidationCache ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ValidationCacheEXT>::type Device::createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ValidationCacheEXT>::type Device::createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache; - Result result = static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkValidationCacheEXT*>( &validationCache ) ) ); - return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createValidationCacheEXT" ); + Result result = static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkValidationCacheEXT *>( &validationCache ) ) ); + return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<ValidationCacheEXT,Dispatch>>::type Device::createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::ValidationCacheEXT, Dispatch>>::type Device::createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache; - Result result = static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkValidationCacheEXT*>( &validationCache ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<ValidationCacheEXT,Dispatch>( result, validationCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createValidationCacheEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast<const VkValidationCacheCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkValidationCacheEXT *>( &validationCache ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::ValidationCacheEXT, Dispatch>( result, validationCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectNameInfoEXT*>( pNameInfo ) ) ); + return static_cast<Result>( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectNameInfoEXT *>( pNameInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectNameInfoEXT*>( &nameInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::debugMarkerSetObjectNameEXT" ); + Result result = static_cast<Result>( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectNameInfoEXT *>( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectTagInfoEXT*>( pTagInfo ) ) ); + return static_cast<Result>( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectTagInfoEXT *>( pTagInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectTagInfoEXT*>( &tagInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::debugMarkerSetObjectTagEXT" ); + Result result = static_cast<Result>( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast<const VkDebugMarkerObjectTagInfoEXT *>( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkDeferredOperationJoinKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkDeferredOperationJoinKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::deferredOperationJoinKHR", { Result::eSuccess, Result::eThreadDoneKHR, Result::eThreadIdleKHR } ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::deferredOperationJoinKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { -#ifdef VK_ENABLE_BETA_EXTENSIONS - d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); -#else - d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { -#ifdef VK_ENABLE_BETA_EXTENSIONS - d.vkDestroyAccelerationStructureKHR( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); -#else - d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyBuffer( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyBufferView( m_device, static_cast<VkBufferView>( bufferView ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDeferredOperationKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorSetLayout( m_device, static_cast<VkDescriptorSetLayout>( descriptorSetLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDevice( m_device, reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyEvent( m_device, static_cast<VkEvent>( event ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyFence( m_device, static_cast<VkFence>( fence ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyFramebuffer( m_device, static_cast<VkFramebuffer>( framebuffer ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyImage( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyImageView( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast<VkIndirectCommandsLayoutNV>( indirectCommandsLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipeline( m_device, static_cast<VkPipeline>( pipeline ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipelineCache( m_device, static_cast<VkPipelineCache>( pipelineCache ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPipelineLayout( m_device, static_cast<VkPipelineLayout>( pipelineLayout ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyRenderPass( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySampler( m_device, static_cast<VkSampler>( sampler ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySamplerYcbcrConversion( m_device, static_cast<VkSamplerYcbcrConversion>( ycbcrConversion ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySemaphore( m_device, static_cast<VkSemaphore>( semaphore ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyShaderModule( m_device, static_cast<VkShaderModule>( shaderModule ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySwapchainKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyValidationCacheEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitIdle(Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitIdle( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkDeviceWaitIdle( m_device ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::waitIdle(Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::waitIdle( Dispatch const & d ) const { Result result = static_cast<Result>( d.vkDeviceWaitIdle( m_device ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitIdle" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT*>( pDisplayPowerInfo ) ) ); + return static_cast<Result>( d.vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT *>( pDisplayPowerInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT*>( &displayPowerInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::displayPowerControlEXT" ); + Result result = static_cast<Result>( d.vkDisplayPowerControlEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayPowerInfoEXT *>( &displayPowerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast<Result>( d.vkFlushMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange *>( pMemoryRanges ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::flushMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const & memoryRanges, Dispatch const & d ) const + { + Result result = static_cast<Result>( d.vkFlushMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast<const VkMappedMemoryRange *>( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkFlushMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange*>( pMemoryRanges ) ) ); + d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBufferCount, reinterpret_cast<const VkCommandBuffer *>( pCommandBuffers ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::flushMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const &memoryRanges, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - Result result = static_cast<Result>( d.vkFlushMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast<const VkMappedMemoryRange*>( memoryRanges.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::flushMappedMemoryRanges" ); + d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBuffers.size(), reinterpret_cast<const VkCommandBuffer *>( commandBuffers.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBufferCount, reinterpret_cast<const VkCommandBuffer*>( pCommandBuffers ) ); + d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBufferCount, reinterpret_cast<const VkCommandBuffer *>( pCommandBuffers ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBuffers.size() , reinterpret_cast<const VkCommandBuffer*>( commandBuffers.data() ) ); + d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBuffers.size(), reinterpret_cast<const VkCommandBuffer *>( commandBuffers.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE Result Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBufferCount, reinterpret_cast<const VkCommandBuffer*>( pCommandBuffers ) ); + return static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSetCount, reinterpret_cast<const VkDescriptorSet *>( pDescriptorSets ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::CommandBuffer> const &commandBuffers, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, Dispatch const & d ) const { - d.vkFreeCommandBuffers( m_device, static_cast<VkCommandPool>( commandPool ), commandBuffers.size() , reinterpret_cast<const VkCommandBuffer*>( commandBuffers.data() ) ); + Result result = static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSets.size(), reinterpret_cast<const VkDescriptorSet *>( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::freeDescriptorSets" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE Result Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSetCount, reinterpret_cast<const VkDescriptorSet*>( pDescriptorSets ) ) ); + return static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSetCount, reinterpret_cast<const VkDescriptorSet *>( pDescriptorSets ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const & descriptorSets, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSets.size() , reinterpret_cast<const VkDescriptorSet*>( descriptorSets.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::freeDescriptorSets" ); + Result result = static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSets.size(), reinterpret_cast<const VkDescriptorSet *>( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::free" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSetCount, reinterpret_cast<const VkDescriptorSet*>( pDescriptorSets ) ) ); + d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy<const VULKAN_HPP_NAMESPACE::DescriptorSet> const &descriptorSets, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - Result result = static_cast<Result>( d.vkFreeDescriptorSets( m_device, static_cast<VkDescriptorPool>( descriptorPool ), descriptorSets.size() , reinterpret_cast<const VkDescriptorSet*>( descriptorSets.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::free" ); + d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR* pSizeInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkGetAccelerationStructureBuildSizesKHR( m_device, static_cast<VkAccelerationStructureBuildTypeKHR>( buildType ), reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( pBuildInfo ), pMaxPrimitiveCounts, reinterpret_cast< VkAccelerationStructureBuildSizesInfoKHR *>( pSizeInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR Device::getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const AccelerationStructureBuildGeometryInfoKHR & buildInfo, ArrayProxy<const uint32_t> const & maxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { - d.vkFreeMemory( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( maxPrimitiveCounts.size() == buildInfo.geometryCount ); +#else + if ( maxPrimitiveCounts.size() != buildInfo.geometryCount ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureBuildSizesKHR: maxPrimitiveCounts.size() != buildInfo.geometryCount" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR sizeInfo; + d.vkGetAccelerationStructureBuildSizesKHR( m_device, static_cast<VkAccelerationStructureBuildTypeKHR>( buildType ), reinterpret_cast<const VkAccelerationStructureBuildGeometryInfoKHR *>( &buildInfo ), maxPrimitiveCounts.data(), reinterpret_cast<VkAccelerationStructureBuildSizesInfoKHR *>( &sizeInfo ) ); + return sizeInfo; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<DeviceAddress>( d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast<const VkAccelerationStructureDeviceAddressInfoKHR*>( pInfo ) ) ); + return static_cast<DeviceAddress>( d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast<const VkAccelerationStructureDeviceAddressInfoKHR *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast<const VkAccelerationStructureDeviceAddressInfoKHR*>( &info ) ); + return d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast<const VkAccelerationStructureDeviceAddressInfoKHR *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), dataSize, pData ) ); + return static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), dataSize, pData ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template <typename T, typename Dispatch> VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, ArrayProxy<T> const &data, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, ArrayProxy<T> const &data, Dispatch const &d ) const { - Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); + Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getAccelerationStructureHandleNV" ); } template <typename T, typename Allocator, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, size_t dataSize, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, Dispatch const & d ) const { VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); std::vector<T,Allocator> data( dataSize / sizeof( T ) ); - Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); - + Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ) ) ); return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); } template <typename T, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Dispatch const & d ) const { T data; - Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureKHR>( accelerationStructure ), 1 * sizeof( T ) , reinterpret_cast<void*>( &data ) ) ); - + Result result = static_cast<Result>( d.vkGetAccelerationStructureHandleNV( m_device, static_cast<VkAccelerationStructureNV>( accelerationStructure ), sizeof( T ), reinterpret_cast<void *>( &data ) ) ); return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getAccelerationStructureMemoryRequirementsKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoKHR* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkGetAccelerationStructureMemoryRequirementsKHR( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoKHR*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); - } -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getAccelerationStructureMemoryRequirementsKHR( const AccelerationStructureMemoryRequirementsInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT - { - VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetAccelerationStructureMemoryRequirementsKHR( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoKHR*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); - return memoryRequirements; - } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getAccelerationStructureMemoryRequirementsKHR( const AccelerationStructureMemoryRequirementsInfoKHR & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetAccelerationStructureMemoryRequirementsKHR( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoKHR*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); - return structureChain; + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV *>( pInfo ), reinterpret_cast< VkMemoryRequirements2KHR *>( pMemoryRequirements ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV*>( pInfo ), reinterpret_cast<VkMemoryRequirements2KHR*>( pMemoryRequirements ) ); - } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR memoryRequirements; - d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV*>( &info ), reinterpret_cast<VkMemoryRequirements2KHR*>( &memoryRequirements ) ); + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV *>( &info ), reinterpret_cast<VkMemoryRequirements2KHR *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR>(); - d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV*>( &info ), reinterpret_cast<VkMemoryRequirements2KHR*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR>(); + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast<const VkAccelerationStructureMemoryRequirementsInfoNV *>( &info ), reinterpret_cast<VkMemoryRequirements2KHR *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast<VkAndroidHardwareBufferPropertiesANDROID*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast< VkAndroidHardwareBufferPropertiesANDROID *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID properties; - Result result = static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast<VkAndroidHardwareBufferPropertiesANDROID*>( &properties ) ) ); - return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getAndroidHardwareBufferPropertiesANDROID" ); + Result result = static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast<VkAndroidHardwareBufferPropertiesANDROID *>( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d ) const + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d ) const { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID& properties = structureChain.template get<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>(); - Result result = static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast<VkAndroidHardwareBufferPropertiesANDROID*>( &properties ) ) ); + VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID & properties = structureChain.template get<VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID>(); + Result result = static_cast<Result>( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast<VkAndroidHardwareBufferPropertiesANDROID *>( &properties ) ) ); return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::Device::getAndroidHardwareBufferPropertiesANDROID" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( pInfo ) ) ); + return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferDeviceAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( &info ) ); + return d.vkGetBufferDeviceAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( pInfo ) ) ); + return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( &info ) ); + return d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( pInfo ) ) ); + return static_cast<DeviceAddress>( d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( pInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( &info ) ); + return d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<VkMemoryRequirements*>( pMemoryRequirements ) ); + d.vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast< VkMemoryRequirements *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; - d.vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<VkMemoryRequirements*>( &memoryRequirements ) ); + d.vkGetBufferMemoryRequirements( m_device, static_cast<VkBuffer>( buffer ), reinterpret_cast<VkMemoryRequirements *>( &memoryRequirements ) ); return memoryRequirements; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast<const VkBufferMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( pInfo ) ); + return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( &info ) ); + return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( pInfo ) ); + return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo*>( &info ) ); + return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkBufferDeviceAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetCalibratedTimestampsEXT( m_device, timestampCount, reinterpret_cast<const VkCalibratedTimestampInfoEXT*>( pTimestampInfos ), pTimestamps, pMaxDeviation ) ); + return static_cast<Result>( d.vkGetCalibratedTimestampsEXT( m_device, timestampCount, reinterpret_cast<const VkCalibratedTimestampInfoEXT *>( pTimestampInfos ), pTimestamps, pMaxDeviation ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const ×tampInfos, ArrayProxy<uint64_t> const ×tamps, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const ×tampInfos, ArrayProxy<uint64_t> const ×tamps, Dispatch const &d ) const { -#ifdef VULKAN_HPP_NO_EXCEPTIONS + #ifdef VULKAN_HPP_NO_EXCEPTIONS VULKAN_HPP_ASSERT( timestampInfos.size() == timestamps.size() ); #else if ( timestampInfos.size() != timestamps.size() ) @@ -84827,676 +86643,770 @@ namespace VULKAN_HPP_NAMESPACE uint64_t maxDeviation; Result result = static_cast<Result>( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size() , reinterpret_cast<const VkCalibratedTimestampInfoEXT*>( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); return createResultValue( result, maxDeviation, VULKAN_HPP_NAMESPACE_STRING"::Device::getCalibratedTimestampsEXT" ); + } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint32_t Device::getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Uint64_tAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::pair<std::vector<uint64_t, Uint64_tAllocator>, uint64_t>>::type Device::getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const & timestampInfos, Dispatch const & d ) const { - return d.vkGetDeferredOperationMaxConcurrencyKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ); + std::pair<std::vector<uint64_t, Uint64_tAllocator>,uint64_t> data( std::piecewise_construct, std::forward_as_tuple( timestampInfos.size() ), std::forward_as_tuple( 0 ) ); + std::vector<uint64_t, Uint64_tAllocator> & timestamps = data.first; + uint64_t & maxDeviation = data.second; + Result result = static_cast<Result>( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast<const VkCalibratedTimestampInfoEXT *>( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE uint32_t Device::getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Uint64_tAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint64_t>::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::pair<std::vector<uint64_t, Uint64_tAllocator>, uint64_t>>::type Device::getCalibratedTimestampsEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT> const & timestampInfos, Uint64_tAllocator & uint64_tAllocator, Dispatch const & d ) const { - return d.vkGetDeferredOperationMaxConcurrencyKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ); + std::pair<std::vector<uint64_t, Uint64_tAllocator>,uint64_t> data( std::piecewise_construct, std::forward_as_tuple( timestampInfos.size(), uint64_tAllocator ), std::forward_as_tuple( 0 ) ); + std::vector<uint64_t, Uint64_tAllocator> & timestamps = data.first; + uint64_t & maxDeviation = data.second; + Result result = static_cast<Result>( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast<const VkCalibratedTimestampInfoEXT *>( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS + + template <typename Dispatch> + VULKAN_HPP_INLINE uint32_t Device::getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeferredOperationMaxConcurrencyKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ); + } + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetDeferredOperationResultKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkGetDeferredOperationResultKHR( m_device, static_cast<VkDeferredOperationKHR>( operation ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getDeferredOperationResultKHR", { Result::eSuccess, Result::eNotReady } ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getDeferredOperationResultKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( pSupport ) ); + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( pCreateInfo ), reinterpret_cast< VkDescriptorSetLayoutSupport *>( pSupport ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport support; - d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( &support ) ); + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport *>( &support ) ); return support; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport& support = structureChain.template get<VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport>(); - d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( &support ) ); + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport & support = structureChain.template get<VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport>(); + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport *>( &support ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( pCreateInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( pSupport ) ); + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( pCreateInfo ), reinterpret_cast< VkDescriptorSetLayoutSupport *>( pSupport ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport support; - d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( &support ) ); + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport *>( &support ) ); return support; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport& support = structureChain.template get<VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport>(); - d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo*>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport*>( &support ) ); + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport & support = structureChain.template get<VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport>(); + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast<const VkDescriptorSetLayoutCreateInfo *>( &createInfo ), reinterpret_cast<VkDescriptorSetLayoutSupport *>( &support ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionKHR* version, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionInfoKHR* pVersionInfo, VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR* pCompatibility, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast<const VkAccelerationStructureVersionKHR*>( version ) ) ); + d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast<const VkAccelerationStructureVersionInfoKHR *>( pVersionInfo ), reinterpret_cast< VkAccelerationStructureCompatibilityKHR *>( pCompatibility ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionKHR & version, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR Device::getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - Result result = static_cast<Result>( d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast<const VkAccelerationStructureVersionKHR*>( &version ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getAccelerationStructureCompatibilityKHR" ); + VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR compatibility; + d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast<const VkAccelerationStructureVersionInfoKHR *>( &versionInfo ), reinterpret_cast<VkAccelerationStructureCompatibilityKHR *>( &compatibility ) ); + return compatibility; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags*>( pPeerMemoryFeatures ) ); + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast< VkPeerMemoryFeatureFlags *>( pPeerMemoryFeatures ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags peerMemoryFeatures; - d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags*>( &peerMemoryFeatures ) ); + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags *>( &peerMemoryFeatures ) ); return peerMemoryFeatures; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags*>( pPeerMemoryFeatures ) ); + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast< VkPeerMemoryFeatureFlags *>( pPeerMemoryFeatures ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags peerMemoryFeatures; - d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags*>( &peerMemoryFeatures ) ); + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast<VkPeerMemoryFeatureFlags *>( &peerMemoryFeatures ) ); return peerMemoryFeatures; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast<VkDeviceGroupPresentCapabilitiesKHR*>( pDeviceGroupPresentCapabilities ) ) ); + return static_cast<Result>( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast< VkDeviceGroupPresentCapabilitiesKHR *>( pDeviceGroupPresentCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR>::type Device::getGroupPresentCapabilitiesKHR(Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR>::type Device::getGroupPresentCapabilitiesKHR( Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; - Result result = static_cast<Result>( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast<VkDeviceGroupPresentCapabilitiesKHR*>( &deviceGroupPresentCapabilities ) ) ); - return createResultValue( result, deviceGroupPresentCapabilities, VULKAN_HPP_NAMESPACE_STRING"::Device::getGroupPresentCapabilitiesKHR" ); + Result result = static_cast<Result>( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast<VkDeviceGroupPresentCapabilitiesKHR *>( &deviceGroupPresentCapabilities ) ) ); + return createResultValue( result, deviceGroupPresentCapabilities, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR*>( pModes ) ) ); + return static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( pSurfaceInfo ), reinterpret_cast< VkDeviceGroupPresentModeFlagsKHR *>( pModes ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type Device::getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type Device::getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; - Result result = static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR*>( &modes ) ) ); - return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING"::Device::getGroupSurfacePresentModes2EXT" ); + Result result = static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR *>( &modes ) ) ); + return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR*>( pModes ) ) ); + return static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast< VkDeviceGroupPresentModeFlagsKHR *>( pModes ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR>::type Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; - Result result = static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR*>( &modes ) ) ); - return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING"::Device::getGroupSurfacePresentModesKHR" ); + Result result = static_cast<Result>( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkDeviceGroupPresentModeFlagsKHR *>( &modes ) ) ); + return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<VkDeviceSize*>( pCommittedMemoryInBytes ) ); + d.vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast< VkDeviceSize *>( pCommittedMemoryInBytes ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceSize Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceSize Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::DeviceSize committedMemoryInBytes; - d.vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<VkDeviceSize*>( &committedMemoryInBytes ) ); + d.vkGetDeviceMemoryCommitment( m_device, static_cast<VkDeviceMemory>( memory ), reinterpret_cast<VkDeviceSize *>( &committedMemoryInBytes ) ); return committedMemoryInBytes; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo*>( pInfo ) ); + return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo*>( &info ) ); + return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo*>( pInfo ) ); + return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo*>( &info ) ); + return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const char* pName, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const char* pName, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetDeviceProcAddr( m_device, pName ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const std::string & name, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const std::string & name, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetDeviceProcAddr( m_device, name.c_str() ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast<VkQueue*>( pQueue ) ); + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast< VkQueue *>( pQueue ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::Queue queue; - d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast<VkQueue*>( &queue ) ); + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast<VkQueue *>( &queue ) ); return queue; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetDeviceQueue2( m_device, reinterpret_cast<const VkDeviceQueueInfo2*>( pQueueInfo ), reinterpret_cast<VkQueue*>( pQueue ) ); + d.vkGetDeviceQueue2( m_device, reinterpret_cast<const VkDeviceQueueInfo2 *>( pQueueInfo ), reinterpret_cast< VkQueue *>( pQueue ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::Queue queue; - d.vkGetDeviceQueue2( m_device, reinterpret_cast<const VkDeviceQueueInfo2*>( &queueInfo ), reinterpret_cast<VkQueue*>( &queue ) ); + d.vkGetDeviceQueue2( m_device, reinterpret_cast<const VkDeviceQueueInfo2 *>( &queueInfo ), reinterpret_cast<VkQueue *>( &queue ) ); return queue; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetEventStatus( m_device, static_cast<VkEvent>( event ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkGetEventStatus( m_device, static_cast<VkEvent>( event ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getEventStatus", { Result::eEventSet, Result::eEventReset } ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEventStatus", { VULKAN_HPP_NAMESPACE::Result::eEventSet, VULKAN_HPP_NAMESPACE::Result::eEventReset } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetFenceFdKHR( m_device, reinterpret_cast<const VkFenceGetFdInfoKHR*>( pGetFdInfo ), pFd ) ); + return static_cast<Result>( d.vkGetFenceFdKHR( m_device, reinterpret_cast<const VkFenceGetFdInfoKHR *>( pGetFdInfo ), pFd ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const & d ) const { int fd; - Result result = static_cast<Result>( d.vkGetFenceFdKHR( m_device, reinterpret_cast<const VkFenceGetFdInfoKHR*>( &getFdInfo ), &fd ) ); - return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceFdKHR" ); + Result result = static_cast<Result>( d.vkGetFenceFdKHR( m_device, reinterpret_cast<const VkFenceGetFdInfoKHR *>( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetFenceStatus( m_device, static_cast<VkFence>( fence ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkGetFenceStatus( m_device, static_cast<VkFence>( fence ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceStatus", { Result::eSuccess, Result::eNotReady } ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceStatus", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast<const VkFenceGetWin32HandleInfoKHR*>( pGetWin32HandleInfo ), pHandle ) ); + return static_cast<Result>( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast<const VkFenceGetWin32HandleInfoKHR *>( pGetWin32HandleInfo ), pHandle ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const { HANDLE handle; - Result result = static_cast<Result>( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast<const VkFenceGetWin32HandleInfoKHR*>( &getWin32HandleInfo ), &handle ) ); - return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceWin32HandleKHR" ); + Result result = static_cast<Result>( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast<const VkFenceGetWin32HandleInfoKHR *>( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV *>( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast<const VkGeneratedCommandsMemoryRequirementsInfoNV *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkImageDrmFormatModifierPropertiesEXT*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast<VkImage>( image ), reinterpret_cast< VkImageDrmFormatModifierPropertiesEXT *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT>::type Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT>::type Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT properties; - Result result = static_cast<Result>( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkImageDrmFormatModifierPropertiesEXT*>( &properties ) ) ); - return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getImageDrmFormatModifierPropertiesEXT" ); + Result result = static_cast<Result>( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkImageDrmFormatModifierPropertiesEXT *>( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageDrmFormatModifierPropertiesEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkMemoryRequirements*>( pMemoryRequirements ) ); + d.vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast< VkMemoryRequirements *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; - d.vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkMemoryRequirements*>( &memoryRequirements ) ); + d.vkGetImageMemoryRequirements( m_device, static_cast<VkImage>( image ), reinterpret_cast<VkMemoryRequirements *>( &memoryRequirements ) ); return memoryRequirements; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( pInfo ), reinterpret_cast<VkMemoryRequirements2*>( pMemoryRequirements ) ); + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; - d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return memoryRequirements; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::MemoryRequirements2& memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); - d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2*>( &info ), reinterpret_cast<VkMemoryRequirements2*>( &memoryRequirements ) ); + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get<VULKAN_HPP_NAMESPACE::MemoryRequirements2>(); + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageMemoryRequirementsInfo2 *>( &info ), reinterpret_cast<VkMemoryRequirements2 *>( &memoryRequirements ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), pSparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements*>( pSparseMemoryRequirements ) ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements *>( pSparseMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements,Allocator> Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const &d ) const + template <typename SparseImageMemoryRequirementsAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements,Allocator> sparseMemoryRequirements; + std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> sparseMemoryRequirements; uint32_t sparseMemoryRequirementCount; d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements,Allocator> Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageMemoryRequirementsAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, SparseImageMemoryRequirementsAllocator & sparseImageMemoryRequirementsAllocator, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements,Allocator> sparseMemoryRequirements( vectorAllocator ); + std::vector<SparseImageMemoryRequirements, SparseImageMemoryRequirementsAllocator> sparseMemoryRequirements( sparseImageMemoryRequirementsAllocator ); uint32_t sparseMemoryRequirementCount; d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast<VkImage>( image ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( pSparseMemoryRequirements ) ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements2 *>( pSparseMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2,Allocator> Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d ) const + template <typename SparseImageMemoryRequirements2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements2,Allocator> sparseMemoryRequirements; + std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> sparseMemoryRequirements; uint32_t sparseMemoryRequirementCount; - d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, nullptr ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2 *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2,Allocator> Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageMemoryRequirements2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements2,Allocator> sparseMemoryRequirements( vectorAllocator ); + std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> sparseMemoryRequirements( sparseImageMemoryRequirements2Allocator ); uint32_t sparseMemoryRequirementCount; - d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, nullptr ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2 *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( pSparseMemoryRequirements ) ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements2 *>( pSparseMemoryRequirements ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2,Allocator> Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d ) const + template <typename SparseImageMemoryRequirements2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements2,Allocator> sparseMemoryRequirements; + std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> sparseMemoryRequirements; uint32_t sparseMemoryRequirementCount; - d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, nullptr ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2 *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2,Allocator> Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageMemoryRequirements2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageMemoryRequirements2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d ) const { - std::vector<SparseImageMemoryRequirements2,Allocator> sparseMemoryRequirements( vectorAllocator ); + std::vector<SparseImageMemoryRequirements2, SparseImageMemoryRequirements2Allocator> sparseMemoryRequirements( sparseImageMemoryRequirements2Allocator ); uint32_t sparseMemoryRequirementCount; - d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, nullptr ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, nullptr ); sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); - d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2*>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2*>( sparseMemoryRequirements.data() ) ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast<const VkImageSparseMemoryRequirementsInfo2 *>( &info ), &sparseMemoryRequirementCount, reinterpret_cast<VkSparseImageMemoryRequirements2 *>( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); return sparseMemoryRequirements; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource*>( pSubresource ), reinterpret_cast<VkSubresourceLayout*>( pLayout ) ); + d.vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource *>( pSubresource ), reinterpret_cast< VkSubresourceLayout *>( pLayout ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::SubresourceLayout layout; - d.vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource*>( &subresource ), reinterpret_cast<VkSubresourceLayout*>( &layout ) ); + d.vkGetImageSubresourceLayout( m_device, static_cast<VkImage>( image ), reinterpret_cast<const VkImageSubresource *>( &subresource ), reinterpret_cast<VkSubresourceLayout *>( &layout ) ); return layout; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetImageViewAddressNVX( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<VkImageViewAddressPropertiesNVX*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetImageViewAddressNVX( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast< VkImageViewAddressPropertiesNVX *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX>::type Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX>::type Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX properties; - Result result = static_cast<Result>( d.vkGetImageViewAddressNVX( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<VkImageViewAddressPropertiesNVX*>( &properties ) ) ); - return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getImageViewAddressNVX" ); + Result result = static_cast<Result>( d.vkGetImageViewAddressNVX( m_device, static_cast<VkImageView>( imageView ), reinterpret_cast<VkImageViewAddressPropertiesNVX *>( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewAddressNVX" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast<const VkImageViewHandleInfoNVX*>( pInfo ) ); + return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast<const VkImageViewHandleInfoNVX *>( pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast<const VkImageViewHandleInfoNVX*>( &info ) ); + return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast<const VkImageViewHandleInfoNVX *>( &info ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast<const VkMemoryGetAndroidHardwareBufferInfoANDROID*>( pInfo ), pBuffer ) ); + return static_cast<Result>( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast<const VkMemoryGetAndroidHardwareBufferInfoANDROID *>( pInfo ), pBuffer ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<struct AHardwareBuffer*>::type Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<struct AHardwareBuffer*>::type Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const & d ) const { struct AHardwareBuffer* buffer; - Result result = static_cast<Result>( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast<const VkMemoryGetAndroidHardwareBufferInfoANDROID*>( &info ), &buffer ) ); - return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryAndroidHardwareBufferANDROID" ); + Result result = static_cast<Result>( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast<const VkMemoryGetAndroidHardwareBufferInfoANDROID *>( &info ), &buffer ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryFdKHR( m_device, reinterpret_cast<const VkMemoryGetFdInfoKHR*>( pGetFdInfo ), pFd ) ); + return static_cast<Result>( d.vkGetMemoryFdKHR( m_device, reinterpret_cast<const VkMemoryGetFdInfoKHR *>( pGetFdInfo ), pFd ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const & d ) const { int fd; - Result result = static_cast<Result>( d.vkGetMemoryFdKHR( m_device, reinterpret_cast<const VkMemoryGetFdInfoKHR*>( &getFdInfo ), &fd ) ); - return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryFdKHR" ); + Result result = static_cast<Result>( d.vkGetMemoryFdKHR( m_device, reinterpret_cast<const VkMemoryGetFdInfoKHR *>( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), fd, reinterpret_cast<VkMemoryFdPropertiesKHR*>( pMemoryFdProperties ) ) ); + return static_cast<Result>( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), fd, reinterpret_cast< VkMemoryFdPropertiesKHR *>( pMemoryFdProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR>::type Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR>::type Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR memoryFdProperties; - Result result = static_cast<Result>( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), fd, reinterpret_cast<VkMemoryFdPropertiesKHR*>( &memoryFdProperties ) ) ); - return createResultValue( result, memoryFdProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryFdPropertiesKHR" ); + Result result = static_cast<Result>( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), fd, reinterpret_cast<VkMemoryFdPropertiesKHR *>( &memoryFdProperties ) ) ); + return createResultValue( result, memoryFdProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), pHostPointer, reinterpret_cast<VkMemoryHostPointerPropertiesEXT*>( pMemoryHostPointerProperties ) ) ); + return static_cast<Result>( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), pHostPointer, reinterpret_cast< VkMemoryHostPointerPropertiesEXT *>( pMemoryHostPointerProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT>::type Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT>::type Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT memoryHostPointerProperties; - Result result = static_cast<Result>( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), pHostPointer, reinterpret_cast<VkMemoryHostPointerPropertiesEXT*>( &memoryHostPointerProperties ) ) ); - return createResultValue( result, memoryHostPointerProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryHostPointerPropertiesEXT" ); + Result result = static_cast<Result>( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), pHostPointer, reinterpret_cast<VkMemoryHostPointerPropertiesEXT *>( &memoryHostPointerProperties ) ) ); + return createResultValue( result, memoryHostPointerProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast<const VkMemoryGetWin32HandleInfoKHR*>( pGetWin32HandleInfo ), pHandle ) ); + return static_cast<Result>( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast<const VkMemoryGetWin32HandleInfoKHR *>( pGetWin32HandleInfo ), pHandle ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const { HANDLE handle; - Result result = static_cast<Result>( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast<const VkMemoryGetWin32HandleInfoKHR*>( &getWin32HandleInfo ), &handle ) ); - return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandleKHR" ); + Result result = static_cast<Result>( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast<const VkMemoryGetWin32HandleInfoKHR *>( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetMemoryWin32HandleNV( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( handleType ), pHandle ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const & d ) const { HANDLE handle; Result result = static_cast<Result>( d.vkGetMemoryWin32HandleNV( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( handleType ), &handle ) ); - return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandleNV" ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), handle, reinterpret_cast<VkMemoryWin32HandlePropertiesKHR*>( pMemoryWin32HandleProperties ) ) ); + return static_cast<Result>( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), handle, reinterpret_cast< VkMemoryWin32HandlePropertiesKHR *>( pMemoryWin32HandleProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR>::type Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR>::type Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR memoryWin32HandleProperties; - Result result = static_cast<Result>( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), handle, reinterpret_cast<VkMemoryWin32HandlePropertiesKHR*>( &memoryWin32HandleProperties ) ) ); - return createResultValue( result, memoryWin32HandleProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandlePropertiesKHR" ); + Result result = static_cast<Result>( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast<VkExternalMemoryHandleTypeFlagBits>( handleType ), handle, reinterpret_cast<VkMemoryWin32HandlePropertiesKHR *>( &memoryWin32HandleProperties ) ) ); + return createResultValue( result, memoryWin32HandleProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), pPresentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE*>( pPresentationTimings ) ) ); + return static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), pPresentationTimingCount, reinterpret_cast< VkPastPresentationTimingGOOGLE *>( pPresentationTimings ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename PastPresentationTimingGOOGLEAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator>>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { - std::vector<PastPresentationTimingGOOGLE,Allocator> presentationTimings; + std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator> presentationTimings; uint32_t presentationTimingCount; Result result; do @@ -85505,20 +87415,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && presentationTimingCount ) { presentationTimings.resize( presentationTimingCount ); - result = static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE*>( presentationTimings.data() ) ) ); + result = static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE *>( presentationTimings.data() ) ) ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentationTimingCount < presentationTimings.size() ) ) { - VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); presentationTimings.resize( presentationTimingCount ); } return createResultValue( result, presentationTimings, VULKAN_HPP_NAMESPACE_STRING"::Device::getPastPresentationTimingGOOGLE" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PastPresentationTimingGOOGLE>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PastPresentationTimingGOOGLE,Allocator>>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PastPresentationTimingGOOGLEAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PastPresentationTimingGOOGLE>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator>>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, PastPresentationTimingGOOGLEAllocator & pastPresentationTimingGOOGLEAllocator, Dispatch const & d ) const { - std::vector<PastPresentationTimingGOOGLE,Allocator> presentationTimings( vectorAllocator ); + std::vector<PastPresentationTimingGOOGLE, PastPresentationTimingGOOGLEAllocator> presentationTimings( pastPresentationTimingGOOGLEAllocator ); uint32_t presentationTimingCount; Result result; do @@ -85527,43 +87438,47 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && presentationTimingCount ) { presentationTimings.resize( presentationTimingCount ); - result = static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE*>( presentationTimings.data() ) ) ); + result = static_cast<Result>( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), &presentationTimingCount, reinterpret_cast<VkPastPresentationTimingGOOGLE *>( presentationTimings.data() ) ) ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentationTimingCount < presentationTimings.size() ) ) { - VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); presentationTimings.resize( presentationTimingCount ); } return createResultValue( result, presentationTimings, VULKAN_HPP_NAMESPACE_STRING"::Device::getPastPresentationTimingGOOGLE" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPerformanceParameterINTEL( m_device, static_cast<VkPerformanceParameterTypeINTEL>( parameter ), reinterpret_cast<VkPerformanceValueINTEL*>( pValue ) ) ); + return static_cast<Result>( d.vkGetPerformanceParameterINTEL( m_device, static_cast<VkPerformanceParameterTypeINTEL>( parameter ), reinterpret_cast< VkPerformanceValueINTEL *>( pValue ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceValueINTEL>::type Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::PerformanceValueINTEL>::type Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::PerformanceValueINTEL value; - Result result = static_cast<Result>( d.vkGetPerformanceParameterINTEL( m_device, static_cast<VkPerformanceParameterTypeINTEL>( parameter ), reinterpret_cast<VkPerformanceValueINTEL*>( &value ) ) ); - return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING"::Device::getPerformanceParameterINTEL" ); + Result result = static_cast<Result>( d.vkGetPerformanceParameterINTEL( m_device, static_cast<VkPerformanceParameterTypeINTEL>( parameter ), reinterpret_cast<VkPerformanceValueINTEL *>( &value ) ) ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), pDataSize, pData ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const &d ) const + template <typename Uint8_tAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> data; + std::vector<uint8_t, Uint8_tAllocator> data; size_t dataSize; Result result; do @@ -85572,20 +87487,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && dataSize ) { data.resize( dataSize ); - result = static_cast<Result>( d.vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, reinterpret_cast<void*>( data.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, reinterpret_cast<void *>( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) { - VULKAN_HPP_ASSERT( dataSize <= data.size() ); data.resize( dataSize ); } return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineCacheData" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Uint8_tAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> data( vectorAllocator ); + std::vector<uint8_t, Uint8_tAllocator> data( uint8_tAllocator ); size_t dataSize; Result result; do @@ -85594,182 +87510,193 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && dataSize ) { data.resize( dataSize ); - result = static_cast<Result>( d.vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, reinterpret_cast<void*>( data.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineCacheData( m_device, static_cast<VkPipelineCache>( pipelineCache ), &dataSize, reinterpret_cast<void *>( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) { - VULKAN_HPP_ASSERT( dataSize <= data.size() ); data.resize( dataSize ); } return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineCacheData" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( pExecutableInfo ), pInternalRepresentationCount, reinterpret_cast<VkPipelineExecutableInternalRepresentationKHR*>( pInternalRepresentations ) ) ); + return static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( pExecutableInfo ), pInternalRepresentationCount, reinterpret_cast< VkPipelineExecutableInternalRepresentationKHR *>( pInternalRepresentations ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR,Allocator>>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const &d ) const + template <typename PipelineExecutableInternalRepresentationKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator>>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d ) const { - std::vector<PipelineExecutableInternalRepresentationKHR,Allocator> internalRepresentations; + std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator> internalRepresentations; uint32_t internalRepresentationCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &internalRepresentationCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &internalRepresentationCount, nullptr ) ); if ( ( result == Result::eSuccess ) && internalRepresentationCount ) { internalRepresentations.resize( internalRepresentationCount ); - result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &internalRepresentationCount, reinterpret_cast<VkPipelineExecutableInternalRepresentationKHR*>( internalRepresentations.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &internalRepresentationCount, reinterpret_cast<VkPipelineExecutableInternalRepresentationKHR *>( internalRepresentations.data() ) ) ); + VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( internalRepresentationCount < internalRepresentations.size() ) ) { - VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); internalRepresentations.resize( internalRepresentationCount ); } return createResultValue( result, internalRepresentations, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableInternalRepresentationsKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableInternalRepresentationKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR,Allocator>>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineExecutableInternalRepresentationKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableInternalRepresentationKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator>>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableInternalRepresentationKHRAllocator & pipelineExecutableInternalRepresentationKHRAllocator, Dispatch const & d ) const { - std::vector<PipelineExecutableInternalRepresentationKHR,Allocator> internalRepresentations( vectorAllocator ); + std::vector<PipelineExecutableInternalRepresentationKHR, PipelineExecutableInternalRepresentationKHRAllocator> internalRepresentations( pipelineExecutableInternalRepresentationKHRAllocator ); uint32_t internalRepresentationCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &internalRepresentationCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &internalRepresentationCount, nullptr ) ); if ( ( result == Result::eSuccess ) && internalRepresentationCount ) { internalRepresentations.resize( internalRepresentationCount ); - result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &internalRepresentationCount, reinterpret_cast<VkPipelineExecutableInternalRepresentationKHR*>( internalRepresentations.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &internalRepresentationCount, reinterpret_cast<VkPipelineExecutableInternalRepresentationKHR *>( internalRepresentations.data() ) ) ); + VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( internalRepresentationCount < internalRepresentations.size() ) ) { - VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); internalRepresentations.resize( internalRepresentationCount ); } return createResultValue( result, internalRepresentations, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableInternalRepresentationsKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR*>( pPipelineInfo ), pExecutableCount, reinterpret_cast<VkPipelineExecutablePropertiesKHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR *>( pPipelineInfo ), pExecutableCount, reinterpret_cast< VkPipelineExecutablePropertiesKHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR,Allocator>>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const &d ) const + template <typename PipelineExecutablePropertiesKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator>>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const & d ) const { - std::vector<PipelineExecutablePropertiesKHR,Allocator> properties; + std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator> properties; uint32_t executableCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR*>( &pipelineInfo ), &executableCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR *>( &pipelineInfo ), &executableCount, nullptr ) ); if ( ( result == Result::eSuccess ) && executableCount ) { properties.resize( executableCount ); - result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR*>( &pipelineInfo ), &executableCount, reinterpret_cast<VkPipelineExecutablePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR *>( &pipelineInfo ), &executableCount, reinterpret_cast<VkPipelineExecutablePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( executableCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( executableCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( executableCount <= properties.size() ); properties.resize( executableCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutablePropertiesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutablePropertiesKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR,Allocator>>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineExecutablePropertiesKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutablePropertiesKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator>>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, PipelineExecutablePropertiesKHRAllocator & pipelineExecutablePropertiesKHRAllocator, Dispatch const & d ) const { - std::vector<PipelineExecutablePropertiesKHR,Allocator> properties( vectorAllocator ); + std::vector<PipelineExecutablePropertiesKHR, PipelineExecutablePropertiesKHRAllocator> properties( pipelineExecutablePropertiesKHRAllocator ); uint32_t executableCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR*>( &pipelineInfo ), &executableCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR *>( &pipelineInfo ), &executableCount, nullptr ) ); if ( ( result == Result::eSuccess ) && executableCount ) { properties.resize( executableCount ); - result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR*>( &pipelineInfo ), &executableCount, reinterpret_cast<VkPipelineExecutablePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast<const VkPipelineInfoKHR *>( &pipelineInfo ), &executableCount, reinterpret_cast<VkPipelineExecutablePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( executableCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( executableCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( executableCount <= properties.size() ); properties.resize( executableCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutablePropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( pExecutableInfo ), pStatisticCount, reinterpret_cast<VkPipelineExecutableStatisticKHR*>( pStatistics ) ) ); + return static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( pExecutableInfo ), pStatisticCount, reinterpret_cast< VkPipelineExecutableStatisticKHR *>( pStatistics ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableStatisticKHR,Allocator>>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const &d ) const + template <typename PipelineExecutableStatisticKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator>>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d ) const { - std::vector<PipelineExecutableStatisticKHR,Allocator> statistics; + std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator> statistics; uint32_t statisticCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &statisticCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &statisticCount, nullptr ) ); if ( ( result == Result::eSuccess ) && statisticCount ) { statistics.resize( statisticCount ); - result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &statisticCount, reinterpret_cast<VkPipelineExecutableStatisticKHR*>( statistics.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &statisticCount, reinterpret_cast<VkPipelineExecutableStatisticKHR *>( statistics.data() ) ) ); + VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( statisticCount < statistics.size() ) ) { - VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); statistics.resize( statisticCount ); } return createResultValue( result, statistics, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableStatisticsKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableStatisticKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableStatisticKHR,Allocator>>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PipelineExecutableStatisticKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PipelineExecutableStatisticKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator>>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableStatisticKHRAllocator & pipelineExecutableStatisticKHRAllocator, Dispatch const & d ) const { - std::vector<PipelineExecutableStatisticKHR,Allocator> statistics( vectorAllocator ); + std::vector<PipelineExecutableStatisticKHR, PipelineExecutableStatisticKHRAllocator> statistics( pipelineExecutableStatisticKHRAllocator ); uint32_t statisticCount; Result result; do { - result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &statisticCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &statisticCount, nullptr ) ); if ( ( result == Result::eSuccess ) && statisticCount ) { statistics.resize( statisticCount ); - result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR*>( &executableInfo ), &statisticCount, reinterpret_cast<VkPipelineExecutableStatisticKHR*>( statistics.data() ) ) ); + result = static_cast<Result>( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast<const VkPipelineExecutableInfoKHR *>( &executableInfo ), &statisticCount, reinterpret_cast<VkPipelineExecutableStatisticKHR *>( statistics.data() ) ) ); + VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( statisticCount < statistics.size() ) ) { - VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); statistics.resize( statisticCount ); } return createResultValue( result, statistics, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableStatisticsKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkGetPrivateDataEXT( m_device, static_cast<VkObjectType>( objectType ), objectHandle, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), pData ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint64_t Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint64_t Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { uint64_t data; d.vkGetPrivateDataEXT( m_device, static_cast<VkObjectType>( objectType ), objectHandle, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), &data ); @@ -85779,9 +87706,9 @@ namespace VULKAN_HPP_NAMESPACE template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, dataSize, pData, static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); + return static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, dataSize, pData, static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE @@ -85795,32 +87722,28 @@ namespace VULKAN_HPP_NAMESPACE } template <typename T, typename Allocator, typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<T,Allocator>> Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d ) const + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<std::vector<T,Allocator>> Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const { VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); std::vector<T,Allocator> data( dataSize / sizeof( T ) ); - Result result = static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); - - return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", { Result::eSuccess, Result::eNotReady } ); + Result result = static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); } template <typename T, typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<T> Device::getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d ) const + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue<T> Device::getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const { T data; - Result result = static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, 1 * sizeof( T ) , reinterpret_cast<void*>( &data ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); - - return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", { Result::eSuccess, Result::eNotReady } ); + Result result = static_cast<Result>( d.vkGetQueryPoolResults( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount, sizeof( T ), reinterpret_cast<void *>( &data ), static_cast<VkDeviceSize>( stride ), static_cast<VkQueryResultFlags>( flags ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResult", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - -#ifdef VK_ENABLE_BETA_EXTENSIONS template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + return static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE @@ -85834,33 +87757,28 @@ namespace VULKAN_HPP_NAMESPACE } template <typename T, typename Allocator, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const { VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); std::vector<T,Allocator> data( dataSize / sizeof( T ) ); - Result result = static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); - + Result result = static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ) ) ); return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); } template <typename T, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const { T data; - Result result = static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, 1 * sizeof( T ) , reinterpret_cast<void*>( &data ) ) ); - - return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + Result result = static_cast<Result>( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast<void *>( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - -#ifdef VK_ENABLE_BETA_EXTENSIONS template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + return static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE @@ -85874,31 +87792,27 @@ namespace VULKAN_HPP_NAMESPACE } template <typename T, typename Allocator, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const { VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); std::vector<T,Allocator> data( dataSize / sizeof( T ) ); - Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); - + Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ) ) ); return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesKHR" ); } template <typename T, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const { T data; - Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, 1 * sizeof( T ) , reinterpret_cast<void*>( &data ) ) ); - - return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesKHR" ); + Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast<void *>( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + return static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, dataSize, pData ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE @@ -85912,128 +87826,145 @@ namespace VULKAN_HPP_NAMESPACE } template <typename T, typename Allocator, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const { VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); std::vector<T,Allocator> data( dataSize / sizeof( T ) ); - Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ) ) ); - + Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ) ) ); return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesNV" ); } template <typename T, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const &d ) const + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const { T data; - Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, 1 * sizeof( T ) , reinterpret_cast<void*>( &data ) ) ); - - return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesNV" ); + Result result = static_cast<Result>( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast<VkPipeline>( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast<void *>( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE DeviceSize Device::getRayTracingShaderGroupStackSizeKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<VkRefreshCycleDurationGOOGLE*>( pDisplayTimingProperties ) ) ); + return static_cast<DeviceSize>( d.vkGetRayTracingShaderGroupStackSizeKHR( m_device, static_cast<VkPipeline>( pipeline ), group, static_cast<VkShaderGroupShaderKHR>( groupShader ) ) ); } + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast<Result>( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast< VkRefreshCycleDurationGOOGLE *>( pDisplayTimingProperties ) ) ); + } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE>::type Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE>::type Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE displayTimingProperties; - Result result = static_cast<Result>( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<VkRefreshCycleDurationGOOGLE*>( &displayTimingProperties ) ) ); - return createResultValue( result, displayTimingProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getRefreshCycleDurationGOOGLE" ); + Result result = static_cast<Result>( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast<VkSwapchainKHR>( swapchain ), reinterpret_cast<VkRefreshCycleDurationGOOGLE *>( &displayTimingProperties ) ) ); + return createResultValue( result, displayTimingProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getRefreshCycleDurationGOOGLE" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<VkExtent2D*>( pGranularity ) ); + d.vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast< VkExtent2D *>( pGranularity ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::Extent2D granularity; - d.vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<VkExtent2D*>( &granularity ) ); + d.vkGetRenderAreaGranularity( m_device, static_cast<VkRenderPass>( renderPass ), reinterpret_cast<VkExtent2D *>( &granularity ) ); return granularity; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetSemaphoreCounterValue( m_device, static_cast<VkSemaphore>( semaphore ), pValue ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d ) const { uint64_t value; Result result = static_cast<Result>( d.vkGetSemaphoreCounterValue( m_device, static_cast<VkSemaphore>( semaphore ), &value ) ); - return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreCounterValue" ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValue" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetSemaphoreCounterValueKHR( m_device, static_cast<VkSemaphore>( semaphore ), pValue ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d ) const { uint64_t value; Result result = static_cast<Result>( d.vkGetSemaphoreCounterValueKHR( m_device, static_cast<VkSemaphore>( semaphore ), &value ) ); - return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreCounterValueKHR" ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValueKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast<const VkSemaphoreGetFdInfoKHR*>( pGetFdInfo ), pFd ) ); + return static_cast<Result>( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast<const VkSemaphoreGetFdInfoKHR *>( pGetFdInfo ), pFd ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<int>::type Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const & d ) const { int fd; - Result result = static_cast<Result>( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast<const VkSemaphoreGetFdInfoKHR*>( &getFdInfo ), &fd ) ); - return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreFdKHR" ); + Result result = static_cast<Result>( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast<const VkSemaphoreGetFdInfoKHR *>( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkSemaphoreGetWin32HandleInfoKHR*>( pGetWin32HandleInfo ), pHandle ) ); + return static_cast<Result>( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkSemaphoreGetWin32HandleInfoKHR *>( pGetWin32HandleInfo ), pHandle ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<HANDLE>::type Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const { HANDLE handle; - Result result = static_cast<Result>( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkSemaphoreGetWin32HandleInfoKHR*>( &getWin32HandleInfo ), &handle ) ); - return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreWin32HandleKHR" ); + Result result = static_cast<Result>( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkSemaphoreGetWin32HandleInfoKHR *>( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetShaderInfoAMD( m_device, static_cast<VkPipeline>( pipeline ), static_cast<VkShaderStageFlagBits>( shaderStage ), static_cast<VkShaderInfoTypeAMD>( infoType ), pInfoSize, pInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const &d ) const + template <typename Uint8_tAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> info; + std::vector<uint8_t, Uint8_tAllocator> info; size_t infoSize; Result result; do @@ -86042,20 +87973,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && infoSize ) { info.resize( infoSize ); - result = static_cast<Result>( d.vkGetShaderInfoAMD( m_device, static_cast<VkPipeline>( pipeline ), static_cast<VkShaderStageFlagBits>( shaderStage ), static_cast<VkShaderInfoTypeAMD>( infoType ), &infoSize, reinterpret_cast<void*>( info.data() ) ) ); + result = static_cast<Result>( d.vkGetShaderInfoAMD( m_device, static_cast<VkPipeline>( pipeline ), static_cast<VkShaderStageFlagBits>( shaderStage ), static_cast<VkShaderInfoTypeAMD>( infoType ), &infoSize, reinterpret_cast<void *>( info.data() ) ) ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( infoSize < info.size() ) ) { - VULKAN_HPP_ASSERT( infoSize <= info.size() ); info.resize( infoSize ); } return createResultValue( result, info, VULKAN_HPP_NAMESPACE_STRING"::Device::getShaderInfoAMD" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Uint8_tAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> info( vectorAllocator ); + std::vector<uint8_t, Uint8_tAllocator> info( uint8_tAllocator ); size_t infoSize; Result result; do @@ -86064,43 +87996,47 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && infoSize ) { info.resize( infoSize ); - result = static_cast<Result>( d.vkGetShaderInfoAMD( m_device, static_cast<VkPipeline>( pipeline ), static_cast<VkShaderStageFlagBits>( shaderStage ), static_cast<VkShaderInfoTypeAMD>( infoType ), &infoSize, reinterpret_cast<void*>( info.data() ) ) ); + result = static_cast<Result>( d.vkGetShaderInfoAMD( m_device, static_cast<VkPipeline>( pipeline ), static_cast<VkShaderStageFlagBits>( shaderStage ), static_cast<VkShaderInfoTypeAMD>( infoType ), &infoSize, reinterpret_cast<void *>( info.data() ) ) ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( infoSize < info.size() ) ) { - VULKAN_HPP_ASSERT( infoSize <= info.size() ); info.resize( infoSize ); } return createResultValue( result, info, VULKAN_HPP_NAMESPACE_STRING"::Device::getShaderInfoAMD" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetSwapchainCounterEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ), static_cast<VkSurfaceCounterFlagBitsEXT>( counter ), pCounterValue ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<uint64_t>::type Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const & d ) const { uint64_t counterValue; Result result = static_cast<Result>( d.vkGetSwapchainCounterEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ), static_cast<VkSurfaceCounterFlagBitsEXT>( counter ), &counterValue ) ); - return createResultValue( result, counterValue, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainCounterEXT" ); + return createResultValue( result, counterValue, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainCounterEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), pSwapchainImageCount, reinterpret_cast<VkImage*>( pSwapchainImages ) ) ); + return static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), pSwapchainImageCount, reinterpret_cast< VkImage *>( pSwapchainImages ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Image,Allocator>>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename ImageAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Image, ImageAllocator>>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { - std::vector<Image,Allocator> swapchainImages; + std::vector<Image, ImageAllocator> swapchainImages; uint32_t swapchainImageCount; Result result; do @@ -86109,20 +88045,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && swapchainImageCount ) { swapchainImages.resize( swapchainImageCount ); - result = static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, reinterpret_cast<VkImage*>( swapchainImages.data() ) ) ); + result = static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, reinterpret_cast<VkImage *>( swapchainImages.data() ) ) ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( swapchainImageCount < swapchainImages.size() ) ) { - VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); swapchainImages.resize( swapchainImageCount ); } return createResultValue( result, swapchainImages, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainImagesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Image>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Image,Allocator>>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename ImageAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Image>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Image, ImageAllocator>>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, ImageAllocator & imageAllocator, Dispatch const & d ) const { - std::vector<Image,Allocator> swapchainImages( vectorAllocator ); + std::vector<Image, ImageAllocator> swapchainImages( imageAllocator ); uint32_t swapchainImageCount; Result result; do @@ -86131,43 +88068,47 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && swapchainImageCount ) { swapchainImages.resize( swapchainImageCount ); - result = static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, reinterpret_cast<VkImage*>( swapchainImages.data() ) ) ); + result = static_cast<Result>( d.vkGetSwapchainImagesKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ), &swapchainImageCount, reinterpret_cast<VkImage *>( swapchainImages.data() ) ) ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( swapchainImageCount < swapchainImages.size() ) ) { - VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); swapchainImages.resize( swapchainImageCount ); } return createResultValue( result, swapchainImages, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainImagesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetSwapchainStatusKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkGetSwapchainStatusKHR( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainStatusKHR", { Result::eSuccess, Result::eSuboptimalKHR } ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainStatusKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkGetValidationCacheDataEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), pDataSize, pData ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const &d ) const + template <typename Uint8_tAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> data; + std::vector<uint8_t, Uint8_tAllocator> data; size_t dataSize; Result result; do @@ -86176,20 +88117,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && dataSize ) { data.resize( dataSize ); - result = static_cast<Result>( d.vkGetValidationCacheDataEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), &dataSize, reinterpret_cast<void*>( data.data() ) ) ); + result = static_cast<Result>( d.vkGetValidationCacheDataEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), &dataSize, reinterpret_cast<void *>( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) { - VULKAN_HPP_ASSERT( dataSize <= data.size() ); data.resize( dataSize ); } return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getValidationCacheDataEXT" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t,Allocator>>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Uint8_tAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, uint8_t>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<uint8_t, Uint8_tAllocator>>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const { - std::vector<uint8_t,Allocator> data( vectorAllocator ); + std::vector<uint8_t, Uint8_tAllocator> data( uint8_tAllocator ); size_t dataSize; Result result; do @@ -86198,1172 +88140,1232 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && dataSize ) { data.resize( dataSize ); - result = static_cast<Result>( d.vkGetValidationCacheDataEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), &dataSize, reinterpret_cast<void*>( data.data() ) ) ); + result = static_cast<Result>( d.vkGetValidationCacheDataEXT( m_device, static_cast<VkValidationCacheEXT>( validationCache ), &dataSize, reinterpret_cast<void *>( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) { - VULKAN_HPP_ASSERT( dataSize <= data.size() ); data.resize( dataSize ); } return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getValidationCacheDataEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkImportFenceFdKHR( m_device, reinterpret_cast<const VkImportFenceFdInfoKHR*>( pImportFenceFdInfo ) ) ); + return static_cast<Result>( d.vkImportFenceFdKHR( m_device, reinterpret_cast<const VkImportFenceFdInfoKHR *>( pImportFenceFdInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkImportFenceFdKHR( m_device, reinterpret_cast<const VkImportFenceFdInfoKHR*>( &importFenceFdInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importFenceFdKHR" ); + Result result = static_cast<Result>( d.vkImportFenceFdKHR( m_device, reinterpret_cast<const VkImportFenceFdInfoKHR *>( &importFenceFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast<const VkImportFenceWin32HandleInfoKHR*>( pImportFenceWin32HandleInfo ) ) ); + return static_cast<Result>( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast<const VkImportFenceWin32HandleInfoKHR *>( pImportFenceWin32HandleInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast<const VkImportFenceWin32HandleInfoKHR*>( &importFenceWin32HandleInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importFenceWin32HandleKHR" ); + Result result = static_cast<Result>( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast<const VkImportFenceWin32HandleInfoKHR *>( &importFenceWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHR*>( pImportSemaphoreFdInfo ) ) ); + return static_cast<Result>( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHR *>( pImportSemaphoreFdInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHR*>( &importSemaphoreFdInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importSemaphoreFdKHR" ); + Result result = static_cast<Result>( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast<const VkImportSemaphoreFdInfoKHR *>( &importSemaphoreFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHR*>( pImportSemaphoreWin32HandleInfo ) ) ); + return static_cast<Result>( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHR *>( pImportSemaphoreWin32HandleInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHR*>( &importSemaphoreWin32HandleInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importSemaphoreWin32HandleKHR" ); + Result result = static_cast<Result>( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast<const VkImportSemaphoreWin32HandleInfoKHR *>( &importSemaphoreWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast<const VkInitializePerformanceApiInfoINTEL*>( pInitializeInfo ) ) ); + return static_cast<Result>( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast<const VkInitializePerformanceApiInfoINTEL *>( pInitializeInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast<const VkInitializePerformanceApiInfoINTEL*>( &initializeInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::initializePerformanceApiINTEL" ); + Result result = static_cast<Result>( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast<const VkInitializePerformanceApiInfoINTEL *>( &initializeInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkInvalidateMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange*>( pMemoryRanges ) ) ); + return static_cast<Result>( d.vkInvalidateMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast<const VkMappedMemoryRange *>( pMemoryRanges ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::invalidateMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const &memoryRanges, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::invalidateMappedMemoryRanges( ArrayProxy<const VULKAN_HPP_NAMESPACE::MappedMemoryRange> const & memoryRanges, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast<const VkMappedMemoryRange*>( memoryRanges.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::invalidateMappedMemoryRanges" ); + Result result = static_cast<Result>( d.vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast<const VkMappedMemoryRange *>( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkMapMemory( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( offset ), static_cast<VkDeviceSize>( size ), static_cast<VkMemoryMapFlags>( flags ), ppData ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void*>::type Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void*>::type Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, Dispatch const & d ) const { void* pData; Result result = static_cast<Result>( d.vkMapMemory( m_device, static_cast<VkDeviceMemory>( memory ), static_cast<VkDeviceSize>( offset ), static_cast<VkDeviceSize>( size ), static_cast<VkMemoryMapFlags>( flags ), &pData ) ); - return createResultValue( result, pData, VULKAN_HPP_NAMESPACE_STRING"::Device::mapMemory" ); + return createResultValue( result, pData, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCacheCount, reinterpret_cast<const VkPipelineCache*>( pSrcCaches ) ) ); + return static_cast<Result>( d.vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCacheCount, reinterpret_cast<const VkPipelineCache *>( pSrcCaches ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::PipelineCache> const &srcCaches, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::PipelineCache> const & srcCaches, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCaches.size() , reinterpret_cast<const VkPipelineCache*>( srcCaches.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::mergePipelineCaches" ); + Result result = static_cast<Result>( d.vkMergePipelineCaches( m_device, static_cast<VkPipelineCache>( dstCache ), srcCaches.size(), reinterpret_cast<const VkPipelineCache *>( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergePipelineCaches" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkMergeValidationCachesEXT( m_device, static_cast<VkValidationCacheEXT>( dstCache ), srcCacheCount, reinterpret_cast<const VkValidationCacheEXT*>( pSrcCaches ) ) ); + return static_cast<Result>( d.vkMergeValidationCachesEXT( m_device, static_cast<VkValidationCacheEXT>( dstCache ), srcCacheCount, reinterpret_cast<const VkValidationCacheEXT *>( pSrcCaches ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ValidationCacheEXT> const &srcCaches, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy<const VULKAN_HPP_NAMESPACE::ValidationCacheEXT> const & srcCaches, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkMergeValidationCachesEXT( m_device, static_cast<VkValidationCacheEXT>( dstCache ), srcCaches.size() , reinterpret_cast<const VkValidationCacheEXT*>( srcCaches.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::mergeValidationCachesEXT" ); + Result result = static_cast<Result>( d.vkMergeValidationCachesEXT( m_device, static_cast<VkValidationCacheEXT>( dstCache ), srcCaches.size(), reinterpret_cast<const VkValidationCacheEXT *>( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergeValidationCachesEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT*>( pDeviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) ); + return static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT *>( pDeviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT*>( &deviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerEventEXT" ); + Result result = static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT *>( &deviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type Device::registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type Device::registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT*>( &deviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Fence,Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerEventEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast<const VkDeviceEventInfoEXT *>( &deviceEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Fence, Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT*>( pDisplayEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkFence*>( pFence ) ) ); + return static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT *>( pDisplayEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Fence>::type Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT*>( &displayEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerDisplayEventEXT" ); + Result result = static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT *>( &displayEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Fence,Dispatch>>::type Device::registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Fence, Dispatch>>::type Device::registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Fence fence; - Result result = static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT*>( &displayEventInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence*>( &fence ) ) ); - - ObjectDestroy<Device,Dispatch> deleter( *this, allocator, d ); - return createResultValue<Fence,Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerDisplayEventEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkRegisterDisplayEventEXT( m_device, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayEventInfoEXT *>( &displayEventInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkFence *>( &fence ) ) ); + ObjectDestroy<Device, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Fence, Dispatch>( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkReleaseFullScreenExclusiveModeEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkReleaseFullScreenExclusiveModeEXT( m_device, static_cast<VkSwapchainKHR>( swapchain ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::releaseFullScreenExclusiveModeEXT" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseFullScreenExclusiveModeEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::releasePerformanceConfigurationINTEL" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releasePerformanceConfigurationINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR(Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkReleaseProfilingLockKHR( m_device ); + return static_cast<Result>( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const { - d.vkReleaseProfilingLockKHR( m_device ); + Result result = static_cast<Result>( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::release" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkReleaseProfilingLockKHR( m_device ); + } + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkResetCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolResetFlags>( flags ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkResetCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolResetFlags>( flags ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetCommandPool" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetCommandPool" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Result Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkResetDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), static_cast<VkDescriptorPoolResetFlags>( flags ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkResetDescriptorPool( m_device, static_cast<VkDescriptorPool>( descriptorPool ), static_cast<VkDescriptorPoolResetFlags>( flags ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetDescriptorPool" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetDescriptorPool" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkResetEvent( m_device, static_cast<VkEvent>( event ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkResetEvent( m_device, static_cast<VkEvent>( event ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetEvent" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetEvent" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkResetFences( m_device, fenceCount, reinterpret_cast<const VkFence*>( pFences ) ) ); + return static_cast<Result>( d.vkResetFences( m_device, fenceCount, reinterpret_cast<const VkFence *>( pFences ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const &fences, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::resetFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const & fences, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkResetFences( m_device, fences.size() , reinterpret_cast<const VkFence*>( fences.data() ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetFences" ); + Result result = static_cast<Result>( d.vkResetFences( m_device, fences.size(), reinterpret_cast<const VkFence *>( fences.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkResetQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkResetQueryPool( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkResetQueryPoolEXT( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkResetQueryPoolEXT( m_device, static_cast<VkQueryPool>( queryPool ), firstQuery, queryCount ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT*>( pNameInfo ) ) ); + return static_cast<Result>( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT *>( pNameInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT*>( &nameInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setDebugUtilsObjectNameEXT" ); + Result result = static_cast<Result>( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT *>( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT*>( pTagInfo ) ) ); + return static_cast<Result>( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT *>( pTagInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT*>( &tagInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setDebugUtilsObjectTagEXT" ); + Result result = static_cast<Result>( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT *>( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkSetEvent( m_device, static_cast<VkEvent>( event ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkSetEvent( m_device, static_cast<VkEvent>( event ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setEvent" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setEvent" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkSetHdrMetadataEXT( m_device, swapchainCount, reinterpret_cast<const VkSwapchainKHR*>( pSwapchains ), reinterpret_cast<const VkHdrMetadataEXT*>( pMetadata ) ); + d.vkSetHdrMetadataEXT( m_device, swapchainCount, reinterpret_cast<const VkSwapchainKHR *>( pSwapchains ), reinterpret_cast<const VkHdrMetadataEXT *>( pMetadata ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainKHR> const &swapchains, ArrayProxy<const VULKAN_HPP_NAMESPACE::HdrMetadataEXT> const &metadata, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( ArrayProxy<const VULKAN_HPP_NAMESPACE::SwapchainKHR> const & swapchains, ArrayProxy<const VULKAN_HPP_NAMESPACE::HdrMetadataEXT> const & metadata, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS { #ifdef VULKAN_HPP_NO_EXCEPTIONS VULKAN_HPP_ASSERT( swapchains.size() == metadata.size() ); #else if ( swapchains.size() != metadata.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkDevice::setHdrMetadataEXT: swapchains.size() != metadata.size()" ); - } + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::setHdrMetadataEXT: swapchains.size() != metadata.size()" ); + } #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - d.vkSetHdrMetadataEXT( m_device, swapchains.size() , reinterpret_cast<const VkSwapchainKHR*>( swapchains.data() ), reinterpret_cast<const VkHdrMetadataEXT*>( metadata.data() ) ); + + d.vkSetHdrMetadataEXT( m_device, swapchains.size(), reinterpret_cast<const VkSwapchainKHR *>( swapchains.data() ), reinterpret_cast<const VkHdrMetadataEXT *>( metadata.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkSetLocalDimmingAMD( m_device, static_cast<VkSwapchainKHR>( swapChain ), static_cast<VkBool32>( localDimmingEnable ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkSetLocalDimmingAMD( m_device, static_cast<VkSwapchainKHR>( swapChain ), static_cast<VkBool32>( localDimmingEnable ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkSetPrivateDataEXT( m_device, static_cast<VkObjectType>( objectType ), objectHandle, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), data ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkSetPrivateDataEXT( m_device, static_cast<VkObjectType>( objectType ), objectHandle, static_cast<VkPrivateDataSlotEXT>( privateDataSlot ), data ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setPrivateDataEXT" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkSignalSemaphore( m_device, reinterpret_cast<const VkSemaphoreSignalInfo*>( pSignalInfo ) ) ); + return static_cast<Result>( d.vkSignalSemaphore( m_device, reinterpret_cast<const VkSemaphoreSignalInfo *>( pSignalInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkSignalSemaphore( m_device, reinterpret_cast<const VkSemaphoreSignalInfo*>( &signalInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::signalSemaphore" ); + Result result = static_cast<Result>( d.vkSignalSemaphore( m_device, reinterpret_cast<const VkSemaphoreSignalInfo *>( &signalInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast<const VkSemaphoreSignalInfo*>( pSignalInfo ) ) ); + return static_cast<Result>( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast<const VkSemaphoreSignalInfo *>( pSignalInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast<const VkSemaphoreSignalInfo*>( &signalInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::signalSemaphoreKHR" ); + Result result = static_cast<Result>( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast<const VkSemaphoreSignalInfo *>( &signalInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkTrimCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolTrimFlags>( flags ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkTrimCommandPool( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolTrimFlags>( flags ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkTrimCommandPoolKHR( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolTrimFlags>( flags ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkTrimCommandPoolKHR( m_device, static_cast<VkCommandPool>( commandPool ), static_cast<VkCommandPoolTrimFlags>( flags ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkUninitializePerformanceApiINTEL( m_device ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkUninitializePerformanceApiINTEL( m_device ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkUnmapMemory( m_device, static_cast<VkDeviceMemory>( memory ) ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkUnmapMemory( m_device, static_cast<VkDeviceMemory>( memory ) ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkUpdateDescriptorSetWithTemplate( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), pData ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkUpdateDescriptorSetWithTemplate( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), pData ); + d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), pData ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), pData ); + d.vkUpdateDescriptorSets( m_device, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet *>( pDescriptorWrites ), descriptorCopyCount, reinterpret_cast<const VkCopyDescriptorSet *>( pDescriptorCopies ) ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename Dispatch> + VULKAN_HPP_INLINE void Device::updateDescriptorSets( ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const & descriptorWrites, ArrayProxy<const VULKAN_HPP_NAMESPACE::CopyDescriptorSet> const & descriptorCopies, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast<VkDescriptorSet>( descriptorSet ), static_cast<VkDescriptorUpdateTemplate>( descriptorUpdateTemplate ), pData ); + d.vkUpdateDescriptorSets( m_device, descriptorWrites.size(), reinterpret_cast<const VkWriteDescriptorSet *>( descriptorWrites.data() ), descriptorCopies.size(), reinterpret_cast<const VkCopyDescriptorSet *>( descriptorCopies.data() ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkUpdateDescriptorSets( m_device, descriptorWriteCount, reinterpret_cast<const VkWriteDescriptorSet*>( pDescriptorWrites ), descriptorCopyCount, reinterpret_cast<const VkCopyDescriptorSet*>( pDescriptorCopies ) ); + return static_cast<Result>( d.vkWaitForFences( m_device, fenceCount, reinterpret_cast<const VkFence *>( pFences ), static_cast<VkBool32>( waitAll ), timeout ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Device::updateDescriptorSets( ArrayProxy<const VULKAN_HPP_NAMESPACE::WriteDescriptorSet> const &descriptorWrites, ArrayProxy<const VULKAN_HPP_NAMESPACE::CopyDescriptorSet> const &descriptorCopies, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const & fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d ) const { - d.vkUpdateDescriptorSets( m_device, descriptorWrites.size() , reinterpret_cast<const VkWriteDescriptorSet*>( descriptorWrites.data() ), descriptorCopies.size() , reinterpret_cast<const VkCopyDescriptorSet*>( descriptorCopies.data() ) ); + Result result = static_cast<Result>( d.vkWaitForFences( m_device, fences.size(), reinterpret_cast<const VkFence *>( fences.data() ), static_cast<VkBool32>( waitAll ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitForFences", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkWaitForFences( m_device, fenceCount, reinterpret_cast<const VkFence*>( pFences ), static_cast<VkBool32>( waitAll ), timeout ) ); + return static_cast<Result>( d.vkWaitSemaphores( m_device, reinterpret_cast<const VkSemaphoreWaitInfo *>( pWaitInfo ), timeout ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( ArrayProxy<const VULKAN_HPP_NAMESPACE::Fence> const &fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkWaitForFences( m_device, fences.size() , reinterpret_cast<const VkFence*>( fences.data() ), static_cast<VkBool32>( waitAll ), timeout ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitForFences", { Result::eSuccess, Result::eTimeout } ); + Result result = static_cast<Result>( d.vkWaitSemaphores( m_device, reinterpret_cast<const VkSemaphoreWaitInfo *>( &waitInfo ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphores", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkWaitSemaphores( m_device, reinterpret_cast<const VkSemaphoreWaitInfo*>( pWaitInfo ), timeout ) ); + return static_cast<Result>( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast<const VkSemaphoreWaitInfo *>( pWaitInfo ), timeout ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkWaitSemaphores( m_device, reinterpret_cast<const VkSemaphoreWaitInfo*>( &waitInfo ), timeout ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitSemaphores", { Result::eSuccess, Result::eTimeout } ); + Result result = static_cast<Result>( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast<const VkSemaphoreWaitInfo *>( &waitInfo ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphoresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast<const VkSemaphoreWaitInfo*>( pWaitInfo ), timeout ) ); + return static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureKHR *>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), dataSize, pData, stride ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const &d ) const + template <typename T, typename Dispatch> + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy<T> const &data, size_t stride, Dispatch const &d ) const { - Result result = static_cast<Result>( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast<const VkSemaphoreWaitInfo*>( &waitInfo ), timeout ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitSemaphoresKHR", { Result::eSuccess, Result::eTimeout } ); + Result result = static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size() , reinterpret_cast<const VkAccelerationStructureKHR*>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ), stride ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::writeAccelerationStructuresPropertiesKHR" ); + } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename T, typename Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<std::vector<T,Allocator>>::type Device::writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, size_t stride, Dispatch const & d ) const { - return static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructureCount, reinterpret_cast<const VkAccelerationStructureKHR*>( pAccelerationStructures ), static_cast<VkQueryType>( queryType ), dataSize, pData, stride ) ); + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector<T,Allocator> data( dataSize / sizeof( T ) ); + Result result = static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size(), reinterpret_cast<const VkAccelerationStructureKHR *>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), data.size() * sizeof( T ), reinterpret_cast<void *>( data.data() ), stride ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); } -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename T, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Device::writeAccelerationStructuresPropertiesKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy<T> const &data, size_t stride, Dispatch const &d ) const + + template <typename T, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<T>::type Device::writeAccelerationStructuresPropertyKHR( ArrayProxy<const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR> const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t stride, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size() , reinterpret_cast<const VkAccelerationStructureKHR*>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), data.size() * sizeof( T ) , reinterpret_cast<void*>( data.data() ), stride ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::writeAccelerationStructuresPropertiesKHR" ); + T data; + Result result = static_cast<Result>( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size(), reinterpret_cast<const VkAccelerationStructureKHR *>( accelerationStructures.data() ), static_cast<VkQueryType>( queryType ), sizeof( T ), reinterpret_cast<void *>( &data ), stride ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + #ifdef VK_USE_PLATFORM_ANDROID_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createAndroidSurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createAndroidSurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast<const VkAndroidSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Instance::createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDebugReportCallbackEXT*>( pCallback ) ) ); + return static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDebugReportCallbackEXT *>( pCallback ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT>::type Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT>::type Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback; - Result result = static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugReportCallbackEXT*>( &callback ) ) ); - return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugReportCallbackEXT" ); + Result result = static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugReportCallbackEXT *>( &callback ) ) ); + return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DebugReportCallbackEXT,Dispatch>>::type Instance::createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT, Dispatch>>::type Instance::createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback; - Result result = static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugReportCallbackEXT*>( &callback ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DebugReportCallbackEXT,Dispatch>( result, callback, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugReportCallbackEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugReportCallbackEXT *>( &callback ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT, Dispatch>( result, callback, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE Result Instance::createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDebugUtilsMessengerEXT*>( pMessenger ) ) ); + return static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDebugUtilsMessengerEXT *>( pMessenger ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT>::type Instance::createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT>::type Instance::createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger; - Result result = static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugUtilsMessengerEXT*>( &messenger ) ) ); - return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugUtilsMessengerEXT" ); + Result result = static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugUtilsMessengerEXT *>( &messenger ) ) ); + return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<DebugUtilsMessengerEXT,Dispatch>>::type Instance::createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT, Dispatch>>::type Instance::createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger; - Result result = static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugUtilsMessengerEXT*>( &messenger ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<DebugUtilsMessengerEXT,Dispatch>( result, messenger, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugUtilsMessengerEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDebugUtilsMessengerEXT *>( &messenger ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT, Dispatch>( result, messenger, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_DIRECTFB_EXT - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDirectFBSurfaceEXT" ); + Result result = static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDirectFBSurfaceEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast<const VkDirectFBSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDisplayPlaneSurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDisplayPlaneSurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast<const VkDisplaySurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createHeadlessSurfaceEXT" ); + Result result = static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createHeadlessSurfaceEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast<const VkHeadlessSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_IOS_MVK - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createIOSSurfaceMVK" ); + Result result = static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVK" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createIOSSurfaceMVKUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast<const VkIOSSurfaceCreateInfoMVK *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVKUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_IOS_MVK*/ + #ifdef VK_USE_PLATFORM_FUCHSIA - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createImagePipeSurfaceFUCHSIA" ); + Result result = static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIA" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createImagePipeSurfaceFUCHSIAUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast<const VkImagePipeSurfaceCreateInfoFUCHSIA *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIAUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_FUCHSIA*/ + #ifdef VK_USE_PLATFORM_MACOS_MVK - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMacOSSurfaceMVK" ); + Result result = static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVK" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMacOSSurfaceMVKUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast<const VkMacOSSurfaceCreateInfoMVK *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVKUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_MACOS_MVK*/ + #ifdef VK_USE_PLATFORM_METAL_EXT - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMetalSurfaceEXT" ); + Result result = static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXT" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMetalSurfaceEXTUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast<const VkMetalSurfaceCreateInfoEXT *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXTUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_METAL_EXT*/ + #ifdef VK_USE_PLATFORM_GGP - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createStreamDescriptorSurfaceGGP" ); + Result result = static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGP" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createStreamDescriptorSurfaceGGPUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast<const VkStreamDescriptorSurfaceCreateInfoGGP *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGPUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_GGP*/ + #ifdef VK_USE_PLATFORM_VI_NN - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createViSurfaceNN" ); + Result result = static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNN" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createViSurfaceNNUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast<const VkViSurfaceCreateInfoNN *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNNUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_VI_NN*/ + #ifdef VK_USE_PLATFORM_WAYLAND_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWaylandSurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWaylandSurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast<const VkWaylandSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWin32SurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWin32SurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast<const VkWin32SurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_XCB_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXcbSurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXcbSurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast<const VkXcbSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XCB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkSurfaceKHR*>( pSurface ) ) ); + return static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceKHR>::type Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXlibSurfaceKHR" ); + Result result = static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHR" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<SurfaceKHR,Dispatch>>::type Instance::createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>>::type Instance::createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceKHR surface; - Result result = static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR*>( &surface ) ) ); - - ObjectDestroy<Instance,Dispatch> deleter( *this, allocator, d ); - return createResultValue<SurfaceKHR,Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXlibSurfaceKHRUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast<const VkXlibSurfaceCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkSurfaceKHR *>( &surface ) ) ); + ObjectDestroy<Instance, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::SurfaceKHR, Dispatch>( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHRUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_KHR*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkDebugReportMessageEXT( m_instance, static_cast<VkDebugReportFlagsEXT>( flags ), static_cast<VkDebugReportObjectTypeEXT>( objectType ), object, location, messageCode, pLayerPrefix, pMessage ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - VULKAN_HPP_ASSERT( layerPrefix.size() == message.size() ); -#else - if ( layerPrefix.size() != message.size() ) - { - throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkInstance::debugReportMessageEXT: layerPrefix.size() != message.size()" ); - } -#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ d.vkDebugReportMessageEXT( m_instance, static_cast<VkDebugReportFlagsEXT>( flags ), static_cast<VkDebugReportObjectTypeEXT>( objectType ), object, location, messageCode, layerPrefix.c_str(), message.c_str() ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast<VkDebugReportCallbackEXT>( callback ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast<VkDebugUtilsMessengerEXT>( messenger ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroyInstance( m_instance, reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ) ); + d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ) ); + d.vkDestroySurfaceKHR( m_instance, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( pPhysicalDeviceGroupProperties ) ) ); + return static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast< VkPhysicalDeviceGroupProperties *>( pPhysicalDeviceGroupProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type Instance::enumeratePhysicalDeviceGroups(Dispatch const &d ) const + template <typename PhysicalDeviceGroupPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type Instance::enumeratePhysicalDeviceGroups( Dispatch const & d ) const { - std::vector<PhysicalDeviceGroupProperties,Allocator> physicalDeviceGroupProperties; + std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator> physicalDeviceGroupProperties; uint32_t physicalDeviceGroupCount; Result result; do @@ -87372,20 +89374,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( physicalDeviceGroupProperties.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties *>( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroups" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type Instance::enumeratePhysicalDeviceGroups(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PhysicalDeviceGroupPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type Instance::enumeratePhysicalDeviceGroups( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d ) const { - std::vector<PhysicalDeviceGroupProperties,Allocator> physicalDeviceGroupProperties( vectorAllocator ); + std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator> physicalDeviceGroupProperties( physicalDeviceGroupPropertiesAllocator ); uint32_t physicalDeviceGroupCount; Result result; do @@ -87394,28 +89397,29 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( physicalDeviceGroupProperties.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties *>( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroups" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( pPhysicalDeviceGroupProperties ) ) ); + return static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast< VkPhysicalDeviceGroupProperties *>( pPhysicalDeviceGroupProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type Instance::enumeratePhysicalDeviceGroupsKHR(Dispatch const &d ) const + template <typename PhysicalDeviceGroupPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type Instance::enumeratePhysicalDeviceGroupsKHR( Dispatch const & d ) const { - std::vector<PhysicalDeviceGroupProperties,Allocator> physicalDeviceGroupProperties; + std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator> physicalDeviceGroupProperties; uint32_t physicalDeviceGroupCount; Result result; do @@ -87424,20 +89428,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( physicalDeviceGroupProperties.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties *>( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroupsKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties,Allocator>>::type Instance::enumeratePhysicalDeviceGroupsKHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PhysicalDeviceGroupPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceGroupProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator>>::type Instance::enumeratePhysicalDeviceGroupsKHR( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d ) const { - std::vector<PhysicalDeviceGroupProperties,Allocator> physicalDeviceGroupProperties( vectorAllocator ); + std::vector<PhysicalDeviceGroupProperties, PhysicalDeviceGroupPropertiesAllocator> physicalDeviceGroupProperties( physicalDeviceGroupPropertiesAllocator ); uint32_t physicalDeviceGroupCount; Result result; do @@ -87446,28 +89451,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties*>( physicalDeviceGroupProperties.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast<VkPhysicalDeviceGroupProperties *>( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroupsKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, pPhysicalDeviceCount, reinterpret_cast<VkPhysicalDevice*>( pPhysicalDevices ) ) ); + return static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, pPhysicalDeviceCount, reinterpret_cast< VkPhysicalDevice *>( pPhysicalDevices ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type Instance::enumeratePhysicalDevices(Dispatch const &d ) const + template <typename PhysicalDeviceAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDevice, PhysicalDeviceAllocator>>::type Instance::enumeratePhysicalDevices( Dispatch const & d ) const { - std::vector<PhysicalDevice,Allocator> physicalDevices; + std::vector<PhysicalDevice, PhysicalDeviceAllocator> physicalDevices; uint32_t physicalDeviceCount; Result result; do @@ -87476,20 +89483,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceCount ) { physicalDevices.resize( physicalDeviceCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast<VkPhysicalDevice*>( physicalDevices.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast<VkPhysicalDevice *>( physicalDevices.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceCount < physicalDevices.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); physicalDevices.resize( physicalDeviceCount ); } return createResultValue( result, physicalDevices, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDevices" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDevice>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDevice,Allocator>>::type Instance::enumeratePhysicalDevices(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PhysicalDeviceAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDevice>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDevice, PhysicalDeviceAllocator>>::type Instance::enumeratePhysicalDevices( PhysicalDeviceAllocator & physicalDeviceAllocator, Dispatch const & d ) const { - std::vector<PhysicalDevice,Allocator> physicalDevices( vectorAllocator ); + std::vector<PhysicalDevice, PhysicalDeviceAllocator> physicalDevices( physicalDeviceAllocator ); uint32_t physicalDeviceCount; Result result; do @@ -87498,111 +89506,134 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && physicalDeviceCount ) { physicalDevices.resize( physicalDeviceCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast<VkPhysicalDevice*>( physicalDevices.data() ) ) ); + result = static_cast<Result>( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast<VkPhysicalDevice *>( physicalDevices.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( physicalDeviceCount < physicalDevices.size() ) ) { - VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); physicalDevices.resize( physicalDeviceCount ); } return createResultValue( result, physicalDevices, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDevices" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const char* pName, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const char* pName, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetInstanceProcAddr( m_instance, pName ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const std::string & name, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const std::string & name, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetInstanceProcAddr( m_instance, name.c_str() ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast<VkDebugUtilsMessageSeverityFlagBitsEXT>( messageSeverity ), static_cast<VkDebugUtilsMessageTypeFlagsEXT>( messageTypes ), reinterpret_cast<const VkDebugUtilsMessengerCallbackDataEXT*>( pCallbackData ) ); + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast<VkDebugUtilsMessageSeverityFlagBitsEXT>( messageSeverity ), static_cast<VkDebugUtilsMessageTypeFlagsEXT>( messageTypes ), reinterpret_cast<const VkDebugUtilsMessengerCallbackDataEXT *>( pCallbackData ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast<VkDebugUtilsMessageSeverityFlagBitsEXT>( messageSeverity ), static_cast<VkDebugUtilsMessageTypeFlagsEXT>( messageTypes ), reinterpret_cast<const VkDebugUtilsMessengerCallbackDataEXT*>( &callbackData ) ); + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast<VkDebugUtilsMessageSeverityFlagBitsEXT>( messageSeverity ), static_cast<VkDebugUtilsMessageTypeFlagsEXT>( messageTypes ), reinterpret_cast<const VkDebugUtilsMessengerCallbackDataEXT *>( &callbackData ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkAcquireXlibDisplayEXT( m_physicalDevice, dpy, static_cast<VkDisplayKHR>( display ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type PhysicalDevice::acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type PhysicalDevice::acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkAcquireXlibDisplayEXT( m_physicalDevice, &dpy, static_cast<VkDisplayKHR>( display ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::acquireXlibDisplayEXT" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDevice*>( pDevice ) ) ); + return static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDevice *>( pDevice ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Device>::type PhysicalDevice::createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Device>::type PhysicalDevice::createDevice( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Device device; - Result result = static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDevice*>( &device ) ) ); - return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDevice" ); + Result result = static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDevice *>( &device ) ) ); + return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDevice" ); } -#ifndef VULKAN_HPP_NO_SMART_HANDLE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<Device,Dispatch>>::type PhysicalDevice::createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::Device, Dispatch>>::type PhysicalDevice::createDeviceUnique( const DeviceCreateInfo & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Device device; - Result result = static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDevice*>( &device ) ) ); - - ObjectDestroy<NoParent,Dispatch> deleter( allocator, d ); - return createResultValue<Device,Dispatch>( result, device, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDeviceUnique", deleter ); + Result result = static_cast<Result>( d.vkCreateDevice( m_physicalDevice, reinterpret_cast<const VkDeviceCreateInfo *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDevice *>( &device ) ) ); + ObjectDestroy<NoParent, Dispatch> deleter( allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::Device, Dispatch>( result, device, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDeviceUnique", deleter ); } -#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR*>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks*>( pAllocator ), reinterpret_cast<VkDisplayModeKHR*>( pMode ) ) ); + return static_cast<Result>( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR *>( pCreateInfo ), reinterpret_cast<const VkAllocationCallbacks *>( pAllocator ), reinterpret_cast< VkDisplayModeKHR *>( pMode ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayModeKHR>::type PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayModeKHR>::type PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DisplayModeKHR mode; - Result result = static_cast<Result>( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR*>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks*>( static_cast<const AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDisplayModeKHR*>( &mode ) ) ); - return createResultValue( result, mode, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDisplayModeKHR" ); + Result result = static_cast<Result>( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDisplayModeKHR *>( &mode ) ) ); + return createResultValue( result, mode, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHR" ); } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DisplayModeKHR, Dispatch>>::type PhysicalDevice::createDisplayModeKHRUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional<const AllocationCallbacks> allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayModeKHR mode; + Result result = static_cast<Result>( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), reinterpret_cast<const VkDisplayModeCreateInfoKHR *>( &createInfo ), reinterpret_cast<const VkAllocationCallbacks *>( static_cast<const VULKAN_HPP_NAMESPACE::AllocationCallbacks*>( allocator ) ), reinterpret_cast<VkDisplayModeKHR *>( &mode ) ) ); + ObjectDestroy<PhysicalDevice, Dispatch> deleter( *this, allocator, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DisplayModeKHR, Dispatch>( result, mode, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, pLayerName, pPropertyCount, reinterpret_cast<VkExtensionProperties*>( pProperties ) ) ); + return static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, pLayerName, pPropertyCount, reinterpret_cast< VkExtensionProperties *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional<const std::string> layerName, Dispatch const &d ) const + template <typename ExtensionPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional<const std::string> layerName, Dispatch const & d ) const { - std::vector<ExtensionProperties,Allocator> properties; + std::vector<ExtensionProperties, ExtensionPropertiesAllocator> properties; uint32_t propertyCount; Result result; do @@ -87611,20 +89642,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceExtensionProperties" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties,Allocator>>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional<const std::string> layerName, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename ExtensionPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, ExtensionProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<ExtensionProperties, ExtensionPropertiesAllocator>>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional<const std::string> layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d ) const { - std::vector<ExtensionProperties,Allocator> properties( vectorAllocator ); + std::vector<ExtensionProperties, ExtensionPropertiesAllocator> properties( extensionPropertiesAllocator ); uint32_t propertyCount; Result result; do @@ -87633,28 +89665,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast<VkExtensionProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceExtensionProperties" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, pPropertyCount, reinterpret_cast<VkLayerProperties*>( pProperties ) ) ); + return static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, pPropertyCount, reinterpret_cast< VkLayerProperties *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type PhysicalDevice::enumerateDeviceLayerProperties(Dispatch const &d ) const + template <typename LayerPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type PhysicalDevice::enumerateDeviceLayerProperties( Dispatch const & d ) const { - std::vector<LayerProperties,Allocator> properties; + std::vector<LayerProperties, LayerPropertiesAllocator> properties; uint32_t propertyCount; Result result; do @@ -87663,20 +89697,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast<VkLayerProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceLayerProperties" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties,Allocator>>::type PhysicalDevice::enumerateDeviceLayerProperties(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename LayerPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, LayerProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<LayerProperties, LayerPropertiesAllocator>>::type PhysicalDevice::enumerateDeviceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d ) const { - std::vector<LayerProperties,Allocator> properties( vectorAllocator ); + std::vector<LayerProperties, LayerPropertiesAllocator> properties( layerPropertiesAllocator ); uint32_t propertyCount; Result result; do @@ -87685,12 +89720,12 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast<VkLayerProperties*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast<VkLayerProperties *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceLayerProperties" ); @@ -87698,10 +89733,10 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template <typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, pCounterCount, reinterpret_cast<VkPerformanceCounterKHR*>( pCounters ), reinterpret_cast<VkPerformanceCounterDescriptionKHR*>( pCounterDescriptions ) ) ); + return static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, pCounterCount, reinterpret_cast< VkPerformanceCounterKHR *>( pCounters ), reinterpret_cast< VkPerformanceCounterDescriptionKHR *>( pCounterDescriptions ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE @@ -87755,10 +89790,12 @@ namespace VULKAN_HPP_NAMESPACE } - template <typename PerformanceCounterKHRAllocator, typename PerformanceCounterDescriptionKHRAllocator, typename Dispatch> + template <typename PerformanceCounterKHRAllocator, typename PerformanceCounterDescriptionKHRAllocator, typename Dispatch> VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, Dispatch const & d ) const { - std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>> enumeratedData; + std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>> data; + std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator> & counters = data.first; + std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator> & counterDescriptions = data.second; uint32_t counterCount; Result result; do @@ -87766,24 +89803,26 @@ namespace VULKAN_HPP_NAMESPACE result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, nullptr, nullptr ) ); if ( ( result == Result::eSuccess ) && counterCount ) { - enumeratedData.first.resize( counterCount ); - enumeratedData.second.resize( counterCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast<VkPerformanceCounterKHR *>( enumeratedData.first.data() ), reinterpret_cast<VkPerformanceCounterDescriptionKHR *>( enumeratedData.second.data() ) ) ); - VULKAN_HPP_ASSERT( counterCount <= enumeratedData.first.size() ); + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast<VkPerformanceCounterKHR *>( counters.data() ), reinterpret_cast<VkPerformanceCounterDescriptionKHR *>( counterDescriptions.data() ) ) ); + VULKAN_HPP_ASSERT( counterCount <= counters.size() ); } } while ( result == Result::eIncomplete ); - if ( ( result == Result::eSuccess ) && ( counterCount < enumeratedData.first.size() ) ) + if ( ( result == Result::eSuccess ) && ( counterCount < counters.size() ) ) { - enumeratedData.first.resize( counterCount ); - enumeratedData.second.resize( counterCount ); + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); } - return createResultValue( result, enumeratedData, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); } - template <typename PerformanceCounterKHRAllocator, typename PerformanceCounterDescriptionKHRAllocator, typename Dispatch, typename B1, typename B2, typename std::enable_if < std::is_same<typename B1::value_type, PerformanceCounterKHR>::value && std::is_same<typename B2::value_type, PerformanceCounterDescriptionKHR>::value, int>::type > - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & countersAllocator, PerformanceCounterDescriptionKHRAllocator & counterDescriptionsAllocator, Dispatch const & d ) const + template <typename PerformanceCounterKHRAllocator, typename PerformanceCounterDescriptionKHRAllocator, typename Dispatch, typename B1, typename B2, typename std::enable_if<std::is_same<typename B1::value_type, PerformanceCounterKHR>::value && std::is_same<typename B2::value_type, PerformanceCounterDescriptionKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>>>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & performanceCounterKHRAllocator, PerformanceCounterDescriptionKHRAllocator & performanceCounterDescriptionKHRAllocator, Dispatch const & d ) const { - std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>> enumeratedData( std::piecewise_construct, std::forward_as_tuple( countersAllocator ), std::forward_as_tuple( counterDescriptionsAllocator ) ); + std::pair<std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator>, std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator>> data( std::piecewise_construct, std::forward_as_tuple( performanceCounterKHRAllocator ), std::forward_as_tuple( performanceCounterDescriptionKHRAllocator ) ); + std::vector<PerformanceCounterKHR, PerformanceCounterKHRAllocator> & counters = data.first; + std::vector<PerformanceCounterDescriptionKHR, PerformanceCounterDescriptionKHRAllocator> & counterDescriptions = data.second; uint32_t counterCount; Result result; do @@ -87791,31 +89830,33 @@ namespace VULKAN_HPP_NAMESPACE result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, nullptr, nullptr ) ); if ( ( result == Result::eSuccess ) && counterCount ) { - enumeratedData.first.resize( counterCount ); - enumeratedData.second.resize( counterCount ); - result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast<VkPerformanceCounterKHR *>( enumeratedData.first.data() ), reinterpret_cast<VkPerformanceCounterDescriptionKHR *>( enumeratedData.second.data() ) ) ); - VULKAN_HPP_ASSERT( counterCount <= enumeratedData.first.size() ); + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + result = static_cast<Result>( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast<VkPerformanceCounterKHR *>( counters.data() ), reinterpret_cast<VkPerformanceCounterDescriptionKHR *>( counterDescriptions.data() ) ) ); + VULKAN_HPP_ASSERT( counterCount <= counters.size() ); } } while ( result == Result::eIncomplete ); - if ( ( result == Result::eSuccess ) && ( counterCount < enumeratedData.first.size() ) ) + if ( ( result == Result::eSuccess ) && ( counterCount < counters.size() ) ) { - enumeratedData.first.resize( counterCount ); - enumeratedData.second.resize( counterCount ); + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); } - return createResultValue( result, enumeratedData, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), pPropertyCount, reinterpret_cast<VkDisplayModeProperties2KHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), pPropertyCount, reinterpret_cast< VkDisplayModeProperties2KHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModeProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d ) const + template <typename DisplayModeProperties2KHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator>>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const { - std::vector<DisplayModeProperties2KHR,Allocator> properties; + std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -87824,20 +89865,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModeProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModeProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModeProperties2KHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayModeProperties2KHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModeProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayModeProperties2KHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayModeProperties2KHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator>>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModeProperties2KHRAllocator & displayModeProperties2KHRAllocator, Dispatch const & d ) const { - std::vector<DisplayModeProperties2KHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayModeProperties2KHR, DisplayModeProperties2KHRAllocator> properties( displayModeProperties2KHRAllocator ); uint32_t propertyCount; Result result; do @@ -87846,28 +89888,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModeProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModeProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModeProperties2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), pPropertyCount, reinterpret_cast<VkDisplayModePropertiesKHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), pPropertyCount, reinterpret_cast< VkDisplayModePropertiesKHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d ) const + template <typename DisplayModePropertiesKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator>>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const { - std::vector<DisplayModePropertiesKHR,Allocator> properties; + std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -87876,20 +89920,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModePropertiesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayModePropertiesKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayModePropertiesKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayModePropertiesKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator>>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModePropertiesKHRAllocator & displayModePropertiesKHRAllocator, Dispatch const & d ) const { - std::vector<DisplayModePropertiesKHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayModePropertiesKHR, DisplayModePropertiesKHRAllocator> properties( displayModePropertiesKHRAllocator ); uint32_t propertyCount; Result result; do @@ -87898,58 +89943,64 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast<VkDisplayKHR>( display ), &propertyCount, reinterpret_cast<VkDisplayModePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModePropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkDisplayPlaneInfo2KHR*>( pDisplayPlaneInfo ), reinterpret_cast<VkDisplayPlaneCapabilities2KHR*>( pCapabilities ) ) ); + return static_cast<Result>( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkDisplayPlaneInfo2KHR *>( pDisplayPlaneInfo ), reinterpret_cast< VkDisplayPlaneCapabilities2KHR *>( pCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR>::type PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR>::type PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR capabilities; - Result result = static_cast<Result>( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkDisplayPlaneInfo2KHR*>( &displayPlaneInfo ), reinterpret_cast<VkDisplayPlaneCapabilities2KHR*>( &capabilities ) ) ); - return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + Result result = static_cast<Result>( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkDisplayPlaneInfo2KHR *>( &displayPlaneInfo ), reinterpret_cast<VkDisplayPlaneCapabilities2KHR *>( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast<VkDisplayPlaneCapabilitiesKHR*>( pCapabilities ) ) ); + return static_cast<Result>( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast< VkDisplayPlaneCapabilitiesKHR *>( pCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR>::type PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR>::type PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities; - Result result = static_cast<Result>( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast<VkDisplayPlaneCapabilitiesKHR*>( &capabilities ) ) ); - return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); + Result result = static_cast<Result>( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast<VkDisplayModeKHR>( mode ), planeIndex, reinterpret_cast<VkDisplayPlaneCapabilitiesKHR *>( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, pDisplayCount, reinterpret_cast<VkDisplayKHR*>( pDisplays ) ) ); + return static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, pDisplayCount, reinterpret_cast< VkDisplayKHR *>( pDisplays ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const &d ) const + template <typename DisplayKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayKHR, DisplayKHRAllocator>>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const & d ) const { - std::vector<DisplayKHR,Allocator> displays; + std::vector<DisplayKHR, DisplayKHRAllocator> displays; uint32_t displayCount; Result result; do @@ -87958,20 +90009,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && displayCount ) { displays.resize( displayCount ); - result = static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast<VkDisplayKHR*>( displays.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast<VkDisplayKHR *>( displays.data() ) ) ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( displayCount < displays.size() ) ) { - VULKAN_HPP_ASSERT( displayCount <= displays.size() ); displays.resize( displayCount ); } return createResultValue( result, displays, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayKHR,Allocator>>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayKHR, DisplayKHRAllocator>>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, DisplayKHRAllocator & displayKHRAllocator, Dispatch const & d ) const { - std::vector<DisplayKHR,Allocator> displays( vectorAllocator ); + std::vector<DisplayKHR, DisplayKHRAllocator> displays( displayKHRAllocator ); uint32_t displayCount; Result result; do @@ -87980,28 +90032,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && displayCount ) { displays.resize( displayCount ); - result = static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast<VkDisplayKHR*>( displays.data() ) ) ); + result = static_cast<Result>( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast<VkDisplayKHR *>( displays.data() ) ) ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( displayCount < displays.size() ) ) { - VULKAN_HPP_ASSERT( displayCount <= displays.size() ); displays.resize( displayCount ); } return createResultValue( result, displays, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, pTimeDomainCount, reinterpret_cast<VkTimeDomainEXT*>( pTimeDomains ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, pTimeDomainCount, reinterpret_cast< VkTimeDomainEXT *>( pTimeDomains ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<TimeDomainEXT,Allocator>>::type PhysicalDevice::getCalibrateableTimeDomainsEXT(Dispatch const &d ) const + template <typename TimeDomainEXTAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<TimeDomainEXT, TimeDomainEXTAllocator>>::type PhysicalDevice::getCalibrateableTimeDomainsEXT( Dispatch const & d ) const { - std::vector<TimeDomainEXT,Allocator> timeDomains; + std::vector<TimeDomainEXT, TimeDomainEXTAllocator> timeDomains; uint32_t timeDomainCount; Result result; do @@ -88010,20 +90064,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && timeDomainCount ) { timeDomains.resize( timeDomainCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast<VkTimeDomainEXT*>( timeDomains.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast<VkTimeDomainEXT *>( timeDomains.data() ) ) ); + VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( timeDomainCount < timeDomains.size() ) ) { - VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); timeDomains.resize( timeDomainCount ); } return createResultValue( result, timeDomains, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, TimeDomainEXT>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<TimeDomainEXT,Allocator>>::type PhysicalDevice::getCalibrateableTimeDomainsEXT(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename TimeDomainEXTAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, TimeDomainEXT>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<TimeDomainEXT, TimeDomainEXTAllocator>>::type PhysicalDevice::getCalibrateableTimeDomainsEXT( TimeDomainEXTAllocator & timeDomainEXTAllocator, Dispatch const & d ) const { - std::vector<TimeDomainEXT,Allocator> timeDomains( vectorAllocator ); + std::vector<TimeDomainEXT, TimeDomainEXTAllocator> timeDomains( timeDomainEXTAllocator ); uint32_t timeDomainCount; Result result; do @@ -88032,28 +90087,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && timeDomainCount ) { timeDomains.resize( timeDomainCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast<VkTimeDomainEXT*>( timeDomains.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast<VkTimeDomainEXT *>( timeDomains.data() ) ) ); + VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( timeDomainCount < timeDomains.size() ) ) { - VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); timeDomains.resize( timeDomainCount ); } return createResultValue( result, timeDomains, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, pPropertyCount, reinterpret_cast<VkCooperativeMatrixPropertiesNV*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, pPropertyCount, reinterpret_cast< VkCooperativeMatrixPropertiesNV *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV,Allocator>>::type PhysicalDevice::getCooperativeMatrixPropertiesNV(Dispatch const &d ) const + template <typename CooperativeMatrixPropertiesNVAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator>>::type PhysicalDevice::getCooperativeMatrixPropertiesNV( Dispatch const & d ) const { - std::vector<CooperativeMatrixPropertiesNV,Allocator> properties; + std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator> properties; uint32_t propertyCount; Result result; do @@ -88062,20 +90119,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast<VkCooperativeMatrixPropertiesNV*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast<VkCooperativeMatrixPropertiesNV *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CooperativeMatrixPropertiesNV>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV,Allocator>>::type PhysicalDevice::getCooperativeMatrixPropertiesNV(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename CooperativeMatrixPropertiesNVAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CooperativeMatrixPropertiesNV>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator>>::type PhysicalDevice::getCooperativeMatrixPropertiesNV( CooperativeMatrixPropertiesNVAllocator & cooperativeMatrixPropertiesNVAllocator, Dispatch const & d ) const { - std::vector<CooperativeMatrixPropertiesNV,Allocator> properties( vectorAllocator ); + std::vector<CooperativeMatrixPropertiesNV, CooperativeMatrixPropertiesNVAllocator> properties( cooperativeMatrixPropertiesNVAllocator ); uint32_t propertyCount; Result result; do @@ -88084,43 +90142,47 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast<VkCooperativeMatrixPropertiesNV*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast<VkCooperativeMatrixPropertiesNV *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_DIRECTFB_EXT - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Bool32>( d.vkGetPhysicalDeviceDirectFBPresentationSupportEXT( m_physicalDevice, queueFamilyIndex, dfb ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetPhysicalDeviceDirectFBPresentationSupportEXT( m_physicalDevice, queueFamilyIndex, &dfb ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayPlaneProperties2KHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPlaneProperties2KHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlaneProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayPlaneProperties2KHR(Dispatch const &d ) const + template <typename DisplayPlaneProperties2KHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator>>::type PhysicalDevice::getDisplayPlaneProperties2KHR( Dispatch const & d ) const { - std::vector<DisplayPlaneProperties2KHR,Allocator> properties; + std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -88129,20 +90191,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlaneProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlaneProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneProperties2KHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlaneProperties2KHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlaneProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayPlaneProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayPlaneProperties2KHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlaneProperties2KHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator>>::type PhysicalDevice::getDisplayPlaneProperties2KHR( DisplayPlaneProperties2KHRAllocator & displayPlaneProperties2KHRAllocator, Dispatch const & d ) const { - std::vector<DisplayPlaneProperties2KHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayPlaneProperties2KHR, DisplayPlaneProperties2KHRAllocator> properties( displayPlaneProperties2KHRAllocator ); uint32_t propertyCount; Result result; do @@ -88151,28 +90214,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlaneProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlaneProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneProperties2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPlanePropertiesKHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPlanePropertiesKHR(Dispatch const &d ) const + template <typename DisplayPlanePropertiesKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator>>::type PhysicalDevice::getDisplayPlanePropertiesKHR( Dispatch const & d ) const { - std::vector<DisplayPlanePropertiesKHR,Allocator> properties; + std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -88181,20 +90246,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlanePropertiesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlanePropertiesKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlanePropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPlanePropertiesKHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayPlanePropertiesKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPlanePropertiesKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator>>::type PhysicalDevice::getDisplayPlanePropertiesKHR( DisplayPlanePropertiesKHRAllocator & displayPlanePropertiesKHRAllocator, Dispatch const & d ) const { - std::vector<DisplayPlanePropertiesKHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayPlanePropertiesKHR, DisplayPlanePropertiesKHRAllocator> properties( displayPlanePropertiesKHRAllocator ); uint32_t propertyCount; Result result; do @@ -88203,28 +90269,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPlanePropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlanePropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayProperties2KHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayProperties2KHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayProperties2KHR(Dispatch const &d ) const + template <typename DisplayProperties2KHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator>>::type PhysicalDevice::getDisplayProperties2KHR( Dispatch const & d ) const { - std::vector<DisplayProperties2KHR,Allocator> properties; + std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -88233,20 +90301,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayProperties2KHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayProperties2KHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayProperties2KHR,Allocator>>::type PhysicalDevice::getDisplayProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayProperties2KHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayProperties2KHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator>>::type PhysicalDevice::getDisplayProperties2KHR( DisplayProperties2KHRAllocator & displayProperties2KHRAllocator, Dispatch const & d ) const { - std::vector<DisplayProperties2KHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayProperties2KHR, DisplayProperties2KHRAllocator> properties( displayProperties2KHRAllocator ); uint32_t propertyCount; Result result; do @@ -88255,28 +90324,30 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayProperties2KHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayProperties2KHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayProperties2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast<VkDisplayPropertiesKHR*>( pProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPropertiesKHR *>( pProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPropertiesKHR(Dispatch const &d ) const + template <typename DisplayPropertiesKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator>>::type PhysicalDevice::getDisplayPropertiesKHR( Dispatch const & d ) const { - std::vector<DisplayPropertiesKHR,Allocator> properties; + std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator> properties; uint32_t propertyCount; Result result; do @@ -88285,20 +90356,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPropertiesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPropertiesKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPropertiesKHR,Allocator>>::type PhysicalDevice::getDisplayPropertiesKHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename DisplayPropertiesKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, DisplayPropertiesKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator>>::type PhysicalDevice::getDisplayPropertiesKHR( DisplayPropertiesKHRAllocator & displayPropertiesKHRAllocator, Dispatch const & d ) const { - std::vector<DisplayPropertiesKHR,Allocator> properties( vectorAllocator ); + std::vector<DisplayPropertiesKHR, DisplayPropertiesKHRAllocator> properties( displayPropertiesKHRAllocator ); uint32_t propertyCount; Result result; do @@ -88307,392 +90379,490 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && propertyCount ) { properties.resize( propertyCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPropertiesKHR*>( properties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast<VkDisplayPropertiesKHR *>( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) { - VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); properties.resize( propertyCount ); } return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPropertiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo*>( pExternalBufferInfo ), reinterpret_cast<VkExternalBufferProperties*>( pExternalBufferProperties ) ); + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo *>( pExternalBufferInfo ), reinterpret_cast< VkExternalBufferProperties *>( pExternalBufferProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalBufferProperties externalBufferProperties; - d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo*>( &externalBufferInfo ), reinterpret_cast<VkExternalBufferProperties*>( &externalBufferProperties ) ); + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo *>( &externalBufferInfo ), reinterpret_cast<VkExternalBufferProperties *>( &externalBufferProperties ) ); return externalBufferProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo*>( pExternalBufferInfo ), reinterpret_cast<VkExternalBufferProperties*>( pExternalBufferProperties ) ); + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo *>( pExternalBufferInfo ), reinterpret_cast< VkExternalBufferProperties *>( pExternalBufferProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalBufferProperties externalBufferProperties; - d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo*>( &externalBufferInfo ), reinterpret_cast<VkExternalBufferProperties*>( &externalBufferProperties ) ); + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalBufferInfo *>( &externalBufferInfo ), reinterpret_cast<VkExternalBufferProperties *>( &externalBufferProperties ) ); return externalBufferProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo*>( pExternalFenceInfo ), reinterpret_cast<VkExternalFenceProperties*>( pExternalFenceProperties ) ); + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo *>( pExternalFenceInfo ), reinterpret_cast< VkExternalFenceProperties *>( pExternalFenceProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalFenceProperties externalFenceProperties; - d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo*>( &externalFenceInfo ), reinterpret_cast<VkExternalFenceProperties*>( &externalFenceProperties ) ); + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo *>( &externalFenceInfo ), reinterpret_cast<VkExternalFenceProperties *>( &externalFenceProperties ) ); return externalFenceProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo*>( pExternalFenceInfo ), reinterpret_cast<VkExternalFenceProperties*>( pExternalFenceProperties ) ); + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo *>( pExternalFenceInfo ), reinterpret_cast< VkExternalFenceProperties *>( pExternalFenceProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalFenceProperties externalFenceProperties; - d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo*>( &externalFenceInfo ), reinterpret_cast<VkExternalFenceProperties*>( &externalFenceProperties ) ); + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalFenceInfo *>( &externalFenceInfo ), reinterpret_cast<VkExternalFenceProperties *>( &externalFenceProperties ) ); return externalFenceProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast<VkExternalImageFormatPropertiesNV*>( pExternalImageFormatProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast< VkExternalImageFormatPropertiesNV *>( pExternalImageFormatProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV>::type PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV>::type PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV externalImageFormatProperties; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast<VkExternalImageFormatPropertiesNV*>( &externalImageFormatProperties ) ) ); - return createResultValue( result, externalImageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), static_cast<VkExternalMemoryHandleTypeFlagsNV>( externalHandleType ), reinterpret_cast<VkExternalImageFormatPropertiesNV *>( &externalImageFormatProperties ) ) ); + return createResultValue( result, externalImageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo*>( pExternalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties*>( pExternalSemaphoreProperties ) ); + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo *>( pExternalSemaphoreInfo ), reinterpret_cast< VkExternalSemaphoreProperties *>( pExternalSemaphoreProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties externalSemaphoreProperties; - d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo*>( &externalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties*>( &externalSemaphoreProperties ) ); + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo *>( &externalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties *>( &externalSemaphoreProperties ) ); return externalSemaphoreProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo*>( pExternalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties*>( pExternalSemaphoreProperties ) ); + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo *>( pExternalSemaphoreInfo ), reinterpret_cast< VkExternalSemaphoreProperties *>( pExternalSemaphoreProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties externalSemaphoreProperties; - d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo*>( &externalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties*>( &externalSemaphoreProperties ) ); + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceExternalSemaphoreInfo *>( &externalSemaphoreInfo ), reinterpret_cast<VkExternalSemaphoreProperties *>( &externalSemaphoreProperties ) ); return externalSemaphoreProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures*>( pFeatures ) ); + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures *>( pFeatures ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures PhysicalDevice::getFeatures(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures PhysicalDevice::getFeatures( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features; - d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures*>( &features ) ); + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures *>( &features ) ); return features; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( pFeatures ) ); + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures2 *>( pFeatures ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 features; - d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( &features ) ); + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2 *>( &features ) ); return features; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFeatures2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFeatures2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2& features = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2>(); - d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( &features ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 & features = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2>(); + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2 *>( &features ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( pFeatures ) ); + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures2 *>( pFeatures ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 features; - d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( &features ) ); + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2 *>( &features ) ); return features; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFeatures2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFeatures2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2& features = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2>(); - d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2*>( &features ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 & features = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2>(); + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceFeatures2 *>( &features ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties*>( pFormatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast< VkFormatProperties *>( pFormatProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::FormatProperties formatProperties; - d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties*>( &formatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties *>( &formatProperties ) ); return formatProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( pFormatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast< VkFormatProperties2 *>( pFormatProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::FormatProperties2 formatProperties; - d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( &formatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2 *>( &formatProperties ) ); return formatProperties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::FormatProperties2& formatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::FormatProperties2>(); - d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( &formatProperties ) ); + VULKAN_HPP_NAMESPACE::FormatProperties2 & formatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::FormatProperties2>(); + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2 *>( &formatProperties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( pFormatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast< VkFormatProperties2 *>( pFormatProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::FormatProperties2 formatProperties; - d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( &formatProperties ) ); + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2 *>( &formatProperties ) ); return formatProperties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::FormatProperties2& formatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::FormatProperties2>(); - d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2*>( &formatProperties ) ); + VULKAN_HPP_NAMESPACE::FormatProperties2 & formatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::FormatProperties2>(); + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast<VkFormat>( format ), reinterpret_cast<VkFormatProperties2 *>( &formatProperties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getFragmentShadingRatesKHR( uint32_t* pFragmentShadingRateCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast<Result>( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, pFragmentShadingRateCount, reinterpret_cast< VkPhysicalDeviceFragmentShadingRateKHR *>( pFragmentShadingRates ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template <typename PhysicalDeviceFragmentShadingRateKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator>>::type PhysicalDevice::getFragmentShadingRatesKHR( Dispatch const & d ) const + { + std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator> fragmentShadingRates; + uint32_t fragmentShadingRateCount; + Result result; + do + { + result = static_cast<Result>( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && fragmentShadingRateCount ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + result = static_cast<Result>( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateKHR *>( fragmentShadingRates.data() ) ) ); + VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( fragmentShadingRateCount < fragmentShadingRates.size() ) ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + } + return createResultValue( result, fragmentShadingRates, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getFragmentShadingRatesKHR" ); + } + + template <typename PhysicalDeviceFragmentShadingRateKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceFragmentShadingRateKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator>>::type PhysicalDevice::getFragmentShadingRatesKHR( PhysicalDeviceFragmentShadingRateKHRAllocator & physicalDeviceFragmentShadingRateKHRAllocator, Dispatch const & d ) const + { + std::vector<PhysicalDeviceFragmentShadingRateKHR, PhysicalDeviceFragmentShadingRateKHRAllocator> fragmentShadingRates( physicalDeviceFragmentShadingRateKHRAllocator ); + uint32_t fragmentShadingRateCount; + Result result; + do + { + result = static_cast<Result>( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && fragmentShadingRateCount ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + result = static_cast<Result>( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateKHR *>( fragmentShadingRates.data() ) ) ); + VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( fragmentShadingRateCount < fragmentShadingRates.size() ) ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + } + return createResultValue( result, fragmentShadingRates, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getFragmentShadingRatesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast<VkImageFormatProperties*>( pImageFormatProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast< VkImageFormatProperties *>( pImageFormatProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties>::type PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties>::type PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast<VkImageFormatProperties*>( &imageFormatProperties ) ) ); - return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkImageTiling>( tiling ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageCreateFlags>( flags ), reinterpret_cast<VkImageFormatProperties *>( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( pImageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( pImageFormatProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( pImageFormatInfo ), reinterpret_cast< VkImageFormatProperties2 *>( pImageFormatProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageFormatProperties2 imageFormatProperties; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( &imageFormatProperties ) ) ); - return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2 *>( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::ImageFormatProperties2& imageFormatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>(); - Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( &imageFormatProperties ) ) ); + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 & imageFormatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>(); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2 *>( &imageFormatProperties ) ) ); return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( pImageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( pImageFormatProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( pImageFormatInfo ), reinterpret_cast< VkImageFormatProperties2 *>( pImageFormatProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::ImageFormatProperties2 imageFormatProperties; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( &imageFormatProperties ) ) ); - return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2KHR" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2 *>( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::ImageFormatProperties2& imageFormatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>(); - Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2*>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2*>( &imageFormatProperties ) ) ); + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 & imageFormatProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::ImageFormatProperties2>(); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceImageFormatInfo2 *>( &imageFormatInfo ), reinterpret_cast<VkImageFormatProperties2 *>( &imageFormatProperties ) ) ); return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties*>( pMemoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties *>( pMemoryProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties PhysicalDevice::getMemoryProperties(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties PhysicalDevice::getMemoryProperties( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties; - d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties*>( &memoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties *>( &memoryProperties ) ); return memoryProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( pMemoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties2 *>( pMemoryProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 memoryProperties; - d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( &memoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2 *>( &memoryProperties ) ); return memoryProperties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getMemoryProperties2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getMemoryProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2& memoryProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2>(); - d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( &memoryProperties ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 & memoryProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2>(); + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2 *>( &memoryProperties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( pMemoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties2 *>( pMemoryProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 memoryProperties; - d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( &memoryProperties ) ); + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2 *>( &memoryProperties ) ); return memoryProperties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getMemoryProperties2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getMemoryProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2& memoryProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2>(); - d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2*>( &memoryProperties ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 & memoryProperties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2>(); + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceMemoryProperties2 *>( &memoryProperties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast<VkSampleCountFlagBits>( samples ), reinterpret_cast<VkMultisamplePropertiesEXT*>( pMultisampleProperties ) ); + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast<VkSampleCountFlagBits>( samples ), reinterpret_cast< VkMultisamplePropertiesEXT *>( pMultisampleProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT multisampleProperties; - d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast<VkSampleCountFlagBits>( samples ), reinterpret_cast<VkMultisamplePropertiesEXT*>( &multisampleProperties ) ); + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast<VkSampleCountFlagBits>( samples ), reinterpret_cast<VkMultisamplePropertiesEXT *>( &multisampleProperties ) ); return multisampleProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pRectCount, reinterpret_cast<VkRect2D*>( pRects ) ) ); + return static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pRectCount, reinterpret_cast< VkRect2D *>( pRects ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Rect2D,Allocator>>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename Rect2DAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Rect2D, Rect2DAllocator>>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { - std::vector<Rect2D,Allocator> rects; + std::vector<Rect2D, Rect2DAllocator> rects; uint32_t rectCount; Result result; do @@ -88701,20 +90871,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && rectCount ) { rects.resize( rectCount ); - result = static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, reinterpret_cast<VkRect2D*>( rects.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, reinterpret_cast<VkRect2D *>( rects.data() ) ) ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( rectCount < rects.size() ) ) { - VULKAN_HPP_ASSERT( rectCount <= rects.size() ); rects.resize( rectCount ); } return createResultValue( result, rects, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getPresentRectanglesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Rect2D>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Rect2D,Allocator>>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename Rect2DAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, Rect2D>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<Rect2D, Rect2DAllocator>>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Rect2DAllocator & rect2DAllocator, Dispatch const & d ) const { - std::vector<Rect2D,Allocator> rects( vectorAllocator ); + std::vector<Rect2D, Rect2DAllocator> rects( rect2DAllocator ); uint32_t rectCount; Result result; do @@ -88723,348 +90894,395 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && rectCount ) { rects.resize( rectCount ); - result = static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, reinterpret_cast<VkRect2D*>( rects.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &rectCount, reinterpret_cast<VkRect2D *>( rects.data() ) ) ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( rectCount < rects.size() ) ) { - VULKAN_HPP_ASSERT( rectCount <= rects.size() ); rects.resize( rectCount ); } return createResultValue( result, rects, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getPresentRectanglesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties*>( pProperties ) ); + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties PhysicalDevice::getProperties(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties PhysicalDevice::getProperties( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties; - d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties*>( &properties ) ); + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties *>( &properties ) ); return properties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( pProperties ) ); + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties2 *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties; - d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( &properties ) ); + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2 *>( &properties ) ); return properties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getProperties2(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2& properties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2>(); - d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( &properties ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 & properties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2>(); + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2 *>( &properties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( pProperties ) ); + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties2 *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties; - d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( &properties ) ); + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2 *>( &properties ) ); return properties; } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getProperties2KHR(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain<X, Y, Z...> PhysicalDevice::getProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2& properties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2>(); - d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2*>( &properties ) ); + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 & properties = structureChain.template get<VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2>(); + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast<VkPhysicalDeviceProperties2 *>( &properties ) ); return structureChain; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>( pPerformanceQueryCreateInfo ), pNumPasses ); + d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR *>( pPerformanceQueryCreateInfo ), pNumPasses ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE uint32_t PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint32_t PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { uint32_t numPasses; - d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>( &performanceQueryCreateInfo ), &numPasses ); + d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR *>( &performanceQueryCreateInfo ), &numPasses ); return numPasses; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties*>( pQueueFamilyProperties ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties *>( pQueueFamilyProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties,Allocator> PhysicalDevice::getQueueFamilyProperties(Dispatch const &d ) const + template <typename QueueFamilyPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> PhysicalDevice::getQueueFamilyProperties( Dispatch const & d ) const { - std::vector<QueueFamilyProperties,Allocator> queueFamilyProperties; + std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> queueFamilyProperties; uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties>::value, int>::type> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties,Allocator> PhysicalDevice::getQueueFamilyProperties(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename QueueFamilyPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> PhysicalDevice::getQueueFamilyProperties( QueueFamilyPropertiesAllocator & queueFamilyPropertiesAllocator, Dispatch const & d ) const { - std::vector<QueueFamilyProperties,Allocator> queueFamilyProperties( vectorAllocator ); + std::vector<QueueFamilyProperties, QueueFamilyPropertiesAllocator> queueFamilyProperties( queueFamilyPropertiesAllocator ); uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( pQueueFamilyProperties ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties2 *>( pQueueFamilyProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2,Allocator> PhysicalDevice::getQueueFamilyProperties2(Dispatch const &d ) const + template <typename QueueFamilyProperties2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> PhysicalDevice::getQueueFamilyProperties2( Dispatch const & d ) const { - std::vector<QueueFamilyProperties2,Allocator> queueFamilyProperties; + std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> queueFamilyProperties; uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2,Allocator> PhysicalDevice::getQueueFamilyProperties2(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename QueueFamilyProperties2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> PhysicalDevice::getQueueFamilyProperties2( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d ) const { - std::vector<QueueFamilyProperties2,Allocator> queueFamilyProperties( vectorAllocator ); + std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> queueFamilyProperties( queueFamilyProperties2Allocator ); uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } - template<typename StructureChain, typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<StructureChain,Allocator> PhysicalDevice::getQueueFamilyProperties2(Dispatch const &d ) const + + template <typename StructureChain, typename StructureChainAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<StructureChain, StructureChainAllocator> PhysicalDevice::getQueueFamilyProperties2( Dispatch const & d ) const { - std::vector<StructureChain,Allocator> queueFamilyProperties; uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); - queueFamilyProperties.resize( queueFamilyPropertyCount ); - std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> localVector( queueFamilyPropertyCount ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + std::vector<StructureChain, StructureChainAllocator> returnVector( queueFamilyPropertyCount ); + std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - localVector[i].pNext = queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; + queueFamilyProperties[i].pNext = + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; } - d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( localVector.data() ) ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = localVector[i]; + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = queueFamilyProperties[i]; } - return queueFamilyProperties; + return returnVector; } - template<typename StructureChain, typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type> - VULKAN_HPP_INLINE std::vector<StructureChain,Allocator> PhysicalDevice::getQueueFamilyProperties2(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename StructureChain, typename StructureChainAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<StructureChain, StructureChainAllocator> PhysicalDevice::getQueueFamilyProperties2( StructureChainAllocator & structureChainAllocator, Dispatch const & d ) const { - std::vector<StructureChain,Allocator> queueFamilyProperties( vectorAllocator ); uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); - queueFamilyProperties.resize( queueFamilyPropertyCount ); - std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> localVector( queueFamilyPropertyCount ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + std::vector<StructureChain, StructureChainAllocator> returnVector( queueFamilyPropertyCount, structureChainAllocator ); + std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - localVector[i].pNext = queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; + queueFamilyProperties[i].pNext = + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; } - d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( localVector.data() ) ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = localVector[i]; + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = queueFamilyProperties[i]; } - return queueFamilyProperties; + return returnVector; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( pQueueFamilyProperties ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties2 *>( pQueueFamilyProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2,Allocator> PhysicalDevice::getQueueFamilyProperties2KHR(Dispatch const &d ) const + template <typename QueueFamilyProperties2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> PhysicalDevice::getQueueFamilyProperties2KHR( Dispatch const & d ) const { - std::vector<QueueFamilyProperties2,Allocator> queueFamilyProperties; + std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> queueFamilyProperties; uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2,Allocator> PhysicalDevice::getQueueFamilyProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename QueueFamilyProperties2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, QueueFamilyProperties2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> PhysicalDevice::getQueueFamilyProperties2KHR( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d ) const { - std::vector<QueueFamilyProperties2,Allocator> queueFamilyProperties( vectorAllocator ); + std::vector<QueueFamilyProperties2, QueueFamilyProperties2Allocator> queueFamilyProperties( queueFamilyProperties2Allocator ); uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); queueFamilyProperties.resize( queueFamilyPropertyCount ); - d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( queueFamilyProperties.data() ) ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); return queueFamilyProperties; } - template<typename StructureChain, typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<StructureChain,Allocator> PhysicalDevice::getQueueFamilyProperties2KHR(Dispatch const &d ) const + + template <typename StructureChain, typename StructureChainAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<StructureChain, StructureChainAllocator> PhysicalDevice::getQueueFamilyProperties2KHR( Dispatch const & d ) const { - std::vector<StructureChain,Allocator> queueFamilyProperties; uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); - queueFamilyProperties.resize( queueFamilyPropertyCount ); - std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> localVector( queueFamilyPropertyCount ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + std::vector<StructureChain, StructureChainAllocator> returnVector( queueFamilyPropertyCount ); + std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - localVector[i].pNext = queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; + queueFamilyProperties[i].pNext = + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; } - d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( localVector.data() ) ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = localVector[i]; + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = queueFamilyProperties[i]; } - return queueFamilyProperties; + return returnVector; } - template<typename StructureChain, typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type> - VULKAN_HPP_INLINE std::vector<StructureChain,Allocator> PhysicalDevice::getQueueFamilyProperties2KHR(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename StructureChain, typename StructureChainAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, StructureChain>::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<StructureChain, StructureChainAllocator> PhysicalDevice::getQueueFamilyProperties2KHR( StructureChainAllocator & structureChainAllocator, Dispatch const & d ) const { - std::vector<StructureChain,Allocator> queueFamilyProperties( vectorAllocator ); uint32_t queueFamilyPropertyCount; d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); - queueFamilyProperties.resize( queueFamilyPropertyCount ); - std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> localVector( queueFamilyPropertyCount ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + std::vector<StructureChain, StructureChainAllocator> returnVector( queueFamilyPropertyCount, structureChainAllocator ); + std::vector<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2> queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - localVector[i].pNext = queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; + queueFamilyProperties[i].pNext = + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>().pNext; } - d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2*>( localVector.data() ) ); - for ( uint32_t i = 0; i < queueFamilyPropertyCount ; i++ ) + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast<VkQueueFamilyProperties2 *>( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) { - queueFamilyProperties[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = localVector[i]; + returnVector[i].template get<VULKAN_HPP_NAMESPACE::QueueFamilyProperties2>() = queueFamilyProperties[i]; } - return queueFamilyProperties; + return returnVector; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), pPropertyCount, reinterpret_cast<VkSparseImageFormatProperties*>( pProperties ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties,Allocator> PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const &d ) const + template <typename SparseImageFormatPropertiesAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties,Allocator> properties; + std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> properties; uint32_t propertyCount; d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties,Allocator> PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageFormatPropertiesAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, SparseImageFormatPropertiesAllocator & sparseImageFormatPropertiesAllocator, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties,Allocator> properties( vectorAllocator ); + std::vector<SparseImageFormatProperties, SparseImageFormatPropertiesAllocator> properties( sparseImageFormatPropertiesAllocator ); uint32_t propertyCount; d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast<VkFormat>( format ), static_cast<VkImageType>( type ), static_cast<VkSampleCountFlagBits>( samples ), static_cast<VkImageUsageFlags>( usage ), static_cast<VkImageTiling>( tiling ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( pFormatInfo ), pPropertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( pProperties ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( pFormatInfo ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties2 *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2,Allocator> PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d ) const + template <typename SparseImageFormatProperties2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties2,Allocator> properties; + std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> properties; uint32_t propertyCount; - d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, nullptr ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2 *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2,Allocator> PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageFormatProperties2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties2,Allocator> properties( vectorAllocator ); + std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> properties( sparseImageFormatProperties2Allocator ); uint32_t propertyCount; - d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, nullptr ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2 *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( pFormatInfo ), pPropertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( pProperties ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( pFormatInfo ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties2 *>( pProperties ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2,Allocator> PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d ) const + template <typename SparseImageFormatProperties2Allocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties2,Allocator> properties; + std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> properties; uint32_t propertyCount; - d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, nullptr ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2 *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type> - VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2,Allocator> PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SparseImageFormatProperties2Allocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SparseImageFormatProperties2>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d ) const { - std::vector<SparseImageFormatProperties2,Allocator> properties( vectorAllocator ); + std::vector<SparseImageFormatProperties2, SparseImageFormatProperties2Allocator> properties( sparseImageFormatProperties2Allocator ); uint32_t propertyCount; - d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, nullptr ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, nullptr ); properties.resize( propertyCount ); - d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2*>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2*>( properties.data() ) ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSparseImageFormatInfo2 *>( &formatInfo ), &propertyCount, reinterpret_cast<VkSparseImageFormatProperties2 *>( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); return properties; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, pCombinationCount, reinterpret_cast<VkFramebufferMixedSamplesCombinationNV*>( pCombinations ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, pCombinationCount, reinterpret_cast< VkFramebufferMixedSamplesCombinationNV *>( pCombinations ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV,Allocator>>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV(Dispatch const &d ) const + template <typename FramebufferMixedSamplesCombinationNVAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator>>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( Dispatch const & d ) const { - std::vector<FramebufferMixedSamplesCombinationNV,Allocator> combinations; + std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator> combinations; uint32_t combinationCount; Result result; do @@ -89073,20 +91291,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && combinationCount ) { combinations.resize( combinationCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast<VkFramebufferMixedSamplesCombinationNV*>( combinations.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast<VkFramebufferMixedSamplesCombinationNV *>( combinations.data() ) ) ); + VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( combinationCount < combinations.size() ) ) { - VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); combinations.resize( combinationCount ); } return createResultValue( result, combinations, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, FramebufferMixedSamplesCombinationNV>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV,Allocator>>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename FramebufferMixedSamplesCombinationNVAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, FramebufferMixedSamplesCombinationNV>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator>>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( FramebufferMixedSamplesCombinationNVAllocator & framebufferMixedSamplesCombinationNVAllocator, Dispatch const & d ) const { - std::vector<FramebufferMixedSamplesCombinationNV,Allocator> combinations( vectorAllocator ); + std::vector<FramebufferMixedSamplesCombinationNV, FramebufferMixedSamplesCombinationNVAllocator> combinations( framebufferMixedSamplesCombinationNVAllocator ); uint32_t combinationCount; Result result; do @@ -89095,133 +91314,145 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && combinationCount ) { combinations.resize( combinationCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast<VkFramebufferMixedSamplesCombinationNV*>( combinations.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast<VkFramebufferMixedSamplesCombinationNV *>( combinations.data() ) ) ); + VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( combinationCount < combinations.size() ) ) { - VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); combinations.resize( combinationCount ); } return createResultValue( result, combinations, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilities2EXT*>( pSurfaceCapabilities ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast< VkSurfaceCapabilities2EXT *>( pSurfaceCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT>::type PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT>::type PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT surfaceCapabilities; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilities2EXT*>( &surfaceCapabilities ) ) ); - return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2EXT" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilities2EXT *>( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR*>( pSurfaceCapabilities ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( pSurfaceInfo ), reinterpret_cast< VkSurfaceCapabilities2KHR *>( pSurfaceCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR surfaceCapabilities; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR*>( &surfaceCapabilities ) ) ); - return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2KHR" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR *>( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); } - template<typename X, typename Y, typename ...Z, typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + + template <typename X, typename Y, typename... Z, typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<StructureChain<X, Y, Z...>>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const { StructureChain<X, Y, Z...> structureChain; - VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR& surfaceCapabilities = structureChain.template get<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>(); - Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR*>( &surfaceCapabilities ) ) ); + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR & surfaceCapabilities = structureChain.template get<VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR>(); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), reinterpret_cast<VkSurfaceCapabilities2KHR *>( &surfaceCapabilities ) ) ); return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilitiesKHR*>( pSurfaceCapabilities ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast< VkSurfaceCapabilitiesKHR *>( pSurfaceCapabilities ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR>::type PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR>::type PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilitiesKHR*>( &surfaceCapabilities ) ) ); - return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkSurfaceCapabilitiesKHR *>( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), pSurfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR*>( pSurfaceFormats ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( pSurfaceInfo ), pSurfaceFormatCount, reinterpret_cast< VkSurfaceFormat2KHR *>( pSurfaceFormats ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + template <typename SurfaceFormat2KHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator>>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const { - std::vector<SurfaceFormat2KHR,Allocator> surfaceFormats; + std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator> surfaceFormats; uint32_t surfaceFormatCount; Result result; do { - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); if ( ( result == Result::eSuccess ) && surfaceFormatCount ) { surfaceFormats.resize( surfaceFormatCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR*>( surfaceFormats.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR *>( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) { - VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); surfaceFormats.resize( surfaceFormatCount ); } return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormats2KHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormat2KHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormat2KHR,Allocator>>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SurfaceFormat2KHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormat2KHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator>>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, SurfaceFormat2KHRAllocator & surfaceFormat2KHRAllocator, Dispatch const & d ) const { - std::vector<SurfaceFormat2KHR,Allocator> surfaceFormats( vectorAllocator ); + std::vector<SurfaceFormat2KHR, SurfaceFormat2KHRAllocator> surfaceFormats( surfaceFormat2KHRAllocator ); uint32_t surfaceFormatCount; Result result; do { - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); if ( ( result == Result::eSuccess ) && surfaceFormatCount ) { surfaceFormats.resize( surfaceFormatCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR*>( surfaceFormats.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormat2KHR *>( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) { - VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); surfaceFormats.resize( surfaceFormatCount ); } return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormats2KHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pSurfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR*>( pSurfaceFormats ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pSurfaceFormatCount, reinterpret_cast< VkSurfaceFormatKHR *>( pSurfaceFormats ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename SurfaceFormatKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator>>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { - std::vector<SurfaceFormatKHR,Allocator> surfaceFormats; + std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator> surfaceFormats; uint32_t surfaceFormatCount; Result result; do @@ -89230,20 +91461,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && surfaceFormatCount ) { surfaceFormats.resize( surfaceFormatCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR*>( surfaceFormats.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR *>( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) { - VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); surfaceFormats.resize( surfaceFormatCount ); } return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormatsKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormatKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormatKHR,Allocator>>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename SurfaceFormatKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, SurfaceFormatKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator>>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, SurfaceFormatKHRAllocator & surfaceFormatKHRAllocator, Dispatch const & d ) const { - std::vector<SurfaceFormatKHR,Allocator> surfaceFormats( vectorAllocator ); + std::vector<SurfaceFormatKHR, SurfaceFormatKHRAllocator> surfaceFormats( surfaceFormatKHRAllocator ); uint32_t surfaceFormatCount; Result result; do @@ -89252,65 +91484,68 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && surfaceFormatCount ) { surfaceFormats.resize( surfaceFormatCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR*>( surfaceFormats.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &surfaceFormatCount, reinterpret_cast<VkSurfaceFormatKHR *>( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) { - VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); surfaceFormats.resize( surfaceFormatCount ); } return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormatsKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( pSurfaceInfo ), pPresentModeCount, reinterpret_cast<VkPresentModeKHR*>( pPresentModes ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( pSurfaceInfo ), pPresentModeCount, reinterpret_cast< VkPresentModeKHR *>( pPresentModes ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + template <typename PresentModeKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const { - std::vector<PresentModeKHR,Allocator> presentModes; + std::vector<PresentModeKHR, PresentModeKHRAllocator> presentModes; uint32_t presentModeCount; Result result; do { - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &presentModeCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &presentModeCount, nullptr ) ); if ( ( result == Result::eSuccess ) && presentModeCount ) { presentModes.resize( presentModeCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &presentModeCount, reinterpret_cast<VkPresentModeKHR*>( presentModes.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &presentModeCount, reinterpret_cast<VkPresentModeKHR *>( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) { - VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); presentModes.resize( presentModeCount ); } return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModes2EXT" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PresentModeKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d ) const { - std::vector<PresentModeKHR,Allocator> presentModes( vectorAllocator ); + std::vector<PresentModeKHR, PresentModeKHRAllocator> presentModes( presentModeKHRAllocator ); uint32_t presentModeCount; Result result; do { - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &presentModeCount, nullptr ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &presentModeCount, nullptr ) ); if ( ( result == Result::eSuccess ) && presentModeCount ) { presentModes.resize( presentModeCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR*>( &surfaceInfo ), &presentModeCount, reinterpret_cast<VkPresentModeKHR*>( presentModes.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast<const VkPhysicalDeviceSurfaceInfo2KHR *>( &surfaceInfo ), &presentModeCount, reinterpret_cast<VkPresentModeKHR *>( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) { - VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); presentModes.resize( presentModeCount ); } return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModes2EXT" ); @@ -89318,16 +91553,18 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pPresentModeCount, reinterpret_cast<VkPresentModeKHR*>( pPresentModes ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), pPresentModeCount, reinterpret_cast< VkPresentModeKHR *>( pPresentModes ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename PresentModeKHRAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { - std::vector<PresentModeKHR,Allocator> presentModes; + std::vector<PresentModeKHR, PresentModeKHRAllocator> presentModes; uint32_t presentModeCount; Result result; do @@ -89336,20 +91573,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && presentModeCount ) { presentModes.resize( presentModeCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, reinterpret_cast<VkPresentModeKHR*>( presentModes.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, reinterpret_cast<VkPresentModeKHR *>( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) { - VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); presentModes.resize( presentModeCount ); } return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModesKHR" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR,Allocator>>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PresentModeKHRAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PresentModeKHR>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PresentModeKHR, PresentModeKHRAllocator>>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d ) const { - std::vector<PresentModeKHR,Allocator> presentModes( vectorAllocator ); + std::vector<PresentModeKHR, PresentModeKHRAllocator> presentModes( presentModeKHRAllocator ); uint32_t presentModeCount; Result result; do @@ -89358,43 +91596,47 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && presentModeCount ) { presentModes.resize( presentModeCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, reinterpret_cast<VkPresentModeKHR*>( presentModes.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast<VkSurfaceKHR>( surface ), &presentModeCount, reinterpret_cast<VkPresentModeKHR *>( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) { - VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); presentModes.resize( presentModeCount ); } return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModesKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkBool32*>( pSupported ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast< VkBool32 *>( pSupported ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Bool32>::type PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::Bool32>::type PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::Bool32 supported; - Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkBool32*>( &supported ) ) ); - return createResultValue( result, supported, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceSupportKHR" ); + Result result = static_cast<Result>( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast<VkSurfaceKHR>( surface ), reinterpret_cast<VkBool32 *>( &supported ) ) ); + return createResultValue( result, supported, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, pToolCount, reinterpret_cast<VkPhysicalDeviceToolPropertiesEXT*>( pToolProperties ) ) ); + return static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, pToolCount, reinterpret_cast< VkPhysicalDeviceToolPropertiesEXT *>( pToolProperties ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT,Allocator>>::type PhysicalDevice::getToolPropertiesEXT(Dispatch const &d ) const + template <typename PhysicalDeviceToolPropertiesEXTAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator>>::type PhysicalDevice::getToolPropertiesEXT( Dispatch const & d ) const { - std::vector<PhysicalDeviceToolPropertiesEXT,Allocator> toolProperties; + std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator> toolProperties; uint32_t toolCount; Result result; do @@ -89403,20 +91645,21 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && toolCount ) { toolProperties.resize( toolCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast<VkPhysicalDeviceToolPropertiesEXT*>( toolProperties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast<VkPhysicalDeviceToolPropertiesEXT *>( toolProperties.data() ) ) ); + VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( toolCount < toolProperties.size() ) ) { - VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); toolProperties.resize( toolCount ); } return createResultValue( result, toolProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getToolPropertiesEXT" ); } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceToolPropertiesEXT>::value, int>::type> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT,Allocator>>::type PhysicalDevice::getToolPropertiesEXT(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename PhysicalDeviceToolPropertiesEXTAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, PhysicalDeviceToolPropertiesEXT>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType<std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator>>::type PhysicalDevice::getToolPropertiesEXT( PhysicalDeviceToolPropertiesEXTAllocator & physicalDeviceToolPropertiesEXTAllocator, Dispatch const & d ) const { - std::vector<PhysicalDeviceToolPropertiesEXT,Allocator> toolProperties( vectorAllocator ); + std::vector<PhysicalDeviceToolPropertiesEXT, PhysicalDeviceToolPropertiesEXTAllocator> toolProperties( physicalDeviceToolPropertiesEXTAllocator ); uint32_t toolCount; Result result; do @@ -89425,250 +91668,276 @@ namespace VULKAN_HPP_NAMESPACE if ( ( result == Result::eSuccess ) && toolCount ) { toolProperties.resize( toolCount ); - result = static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast<VkPhysicalDeviceToolPropertiesEXT*>( toolProperties.data() ) ) ); + result = static_cast<Result>( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast<VkPhysicalDeviceToolPropertiesEXT *>( toolProperties.data() ) ) ); + VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); } } while ( result == Result::eIncomplete ); - if ( result == Result::eSuccess ) + if ( ( result == Result::eSuccess ) && ( toolCount < toolProperties.size() ) ) { - VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); toolProperties.resize( toolCount ); } return createResultValue( result, toolProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getToolPropertiesEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_WAYLAND_KHR - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Bool32>( d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, display ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &display ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + #ifdef VK_USE_PLATFORM_WIN32_KHR -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Bool32>( d.vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex ) ); } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT - { - return d.vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex ); - } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ + #ifdef VK_USE_PLATFORM_XCB_KHR - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Bool32>( d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection, visual_id ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection, visual_id ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XCB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_KHR - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Bool32>( d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, dpy, visualID ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &dpy, visualID ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_KHR*/ + #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT - template<typename Dispatch> - VULKAN_HPP_INLINE Result PhysicalDevice::getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkGetRandROutputDisplayEXT( m_physicalDevice, dpy, rrOutput, reinterpret_cast<VkDisplayKHR*>( pDisplay ) ) ); + return static_cast<Result>( d.vkGetRandROutputDisplayEXT( m_physicalDevice, dpy, rrOutput, reinterpret_cast< VkDisplayKHR *>( pDisplay ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayKHR>::type PhysicalDevice::getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<VULKAN_HPP_NAMESPACE::DisplayKHR>::type PhysicalDevice::getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const & d ) const { VULKAN_HPP_NAMESPACE::DisplayKHR display; - Result result = static_cast<Result>( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast<VkDisplayKHR*>( &display ) ) ); - return createResultValue( result, display, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getRandROutputDisplayEXT" ); + Result result = static_cast<Result>( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast<VkDisplayKHR *>( &display ) ) ); + return createResultValue( result, display, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXT" ); } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<UniqueHandle<VULKAN_HPP_NAMESPACE::DisplayKHR, Dispatch>>::type PhysicalDevice::getRandROutputDisplayEXTUnique( Display & dpy, RROutput rrOutput, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayKHR display; + Result result = static_cast<Result>( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast<VkDisplayKHR *>( &display ) ) ); + ObjectRelease<PhysicalDevice, Dispatch> deleter( *this, d ); + return createResultValue<VULKAN_HPP_NAMESPACE::DisplayKHR, Dispatch>( result, display, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE Result PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE Result PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast<VkDisplayKHR>( display ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_INLINE typename ResultValueType<void>::type PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_INLINE typename ResultValueType<void>::type PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast<VkDisplayKHR>( display ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::releaseDisplayEXT" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::releaseDisplayEXT" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkGetQueueCheckpointDataNV( m_queue, pCheckpointDataCount, reinterpret_cast<VkCheckpointDataNV*>( pCheckpointData ) ); + d.vkGetQueueCheckpointDataNV( m_queue, pCheckpointDataCount, reinterpret_cast< VkCheckpointDataNV *>( pCheckpointData ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Allocator , typename Dispatch> - VULKAN_HPP_INLINE std::vector<CheckpointDataNV,Allocator> Queue::getCheckpointDataNV(Dispatch const &d ) const + template <typename CheckpointDataNVAllocator, typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<CheckpointDataNV, CheckpointDataNVAllocator> Queue::getCheckpointDataNV( Dispatch const & d ) const { - std::vector<CheckpointDataNV,Allocator> checkpointData; + std::vector<CheckpointDataNV, CheckpointDataNVAllocator> checkpointData; uint32_t checkpointDataCount; d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, nullptr ); checkpointData.resize( checkpointDataCount ); - d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast<VkCheckpointDataNV*>( checkpointData.data() ) ); + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast<VkCheckpointDataNV *>( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); return checkpointData; } - template<typename Allocator , typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CheckpointDataNV>::value, int>::type> - VULKAN_HPP_INLINE std::vector<CheckpointDataNV,Allocator> Queue::getCheckpointDataNV(Allocator const& vectorAllocator, Dispatch const &d ) const + + template <typename CheckpointDataNVAllocator, typename Dispatch, typename B, typename std::enable_if<std::is_same<typename B::value_type, CheckpointDataNV>::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector<CheckpointDataNV, CheckpointDataNVAllocator> Queue::getCheckpointDataNV( CheckpointDataNVAllocator & checkpointDataNVAllocator, Dispatch const & d ) const { - std::vector<CheckpointDataNV,Allocator> checkpointData( vectorAllocator ); + std::vector<CheckpointDataNV, CheckpointDataNVAllocator> checkpointData( checkpointDataNVAllocator ); uint32_t checkpointDataCount; d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, nullptr ); checkpointData.resize( checkpointDataCount ); - d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast<VkCheckpointDataNV*>( checkpointData.data() ) ); + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast<VkCheckpointDataNV *>( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); return checkpointData; } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT*>( pLabelInfo ) ); + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT *>( pLabelInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT*>( &labelInfo ) ); + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT *>( &labelInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkQueueBindSparse( m_queue, bindInfoCount, reinterpret_cast<const VkBindSparseInfo*>( pBindInfo ), static_cast<VkFence>( fence ) ) ); + return static_cast<Result>( d.vkQueueBindSparse( m_queue, bindInfoCount, reinterpret_cast<const VkBindSparseInfo *>( pBindInfo ), static_cast<VkFence>( fence ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::bindSparse( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindSparseInfo> const &bindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::bindSparse( ArrayProxy<const VULKAN_HPP_NAMESPACE::BindSparseInfo> const & bindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkQueueBindSparse( m_queue, bindInfo.size() , reinterpret_cast<const VkBindSparseInfo*>( bindInfo.data() ), static_cast<VkFence>( fence ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::bindSparse" ); + Result result = static_cast<Result>( d.vkQueueBindSparse( m_queue, bindInfo.size(), reinterpret_cast<const VkBindSparseInfo *>( bindInfo.data() ), static_cast<VkFence>( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT(Dispatch const &d) const VULKAN_HPP_NOEXCEPT - { - d.vkQueueEndDebugUtilsLabelEXT( m_queue ); - } -#else - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT(Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { d.vkQueueEndDebugUtilsLabelEXT( m_queue ); } -#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT*>( pLabelInfo ) ); + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT *>( pLabelInfo ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT*>( &labelInfo ) ); + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast<const VkDebugUtilsLabelEXT *>( &labelInfo ) ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR*>( pPresentInfo ) ) ); + return static_cast<Result>( d.vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR *>( pPresentInfo ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR & presentInfo, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR & presentInfo, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR*>( &presentInfo ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::presentKHR", { Result::eSuccess, Result::eSuboptimalKHR } ); + Result result = static_cast<Result>( d.vkQueuePresentKHR( m_queue, reinterpret_cast<const VkPresentInfoKHR *>( &presentInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::presentKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkQueueSetPerformanceConfigurationINTEL( m_queue, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const { Result result = static_cast<Result>( d.vkQueueSetPerformanceConfigurationINTEL( m_queue, static_cast<VkPerformanceConfigurationINTEL>( configuration ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::setPerformanceConfigurationINTEL" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d) const VULKAN_HPP_NOEXCEPT + + + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - return static_cast<Result>( d.vkQueueSubmit( m_queue, submitCount, reinterpret_cast<const VkSubmitInfo*>( pSubmits ), static_cast<VkFence>( fence ) ) ); + return static_cast<Result>( d.vkQueueSubmit( m_queue, submitCount, reinterpret_cast<const VkSubmitInfo *>( pSubmits ), static_cast<VkFence>( fence ) ) ); } + #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::submit( ArrayProxy<const VULKAN_HPP_NAMESPACE::SubmitInfo> const &submits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::submit( ArrayProxy<const VULKAN_HPP_NAMESPACE::SubmitInfo> const & submits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const { - Result result = static_cast<Result>( d.vkQueueSubmit( m_queue, submits.size() , reinterpret_cast<const VkSubmitInfo*>( submits.data() ), static_cast<VkFence>( fence ) ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::submit" ); + Result result = static_cast<Result>( d.vkQueueSubmit( m_queue, submits.size(), reinterpret_cast<const VkSubmitInfo *>( submits.data() ), static_cast<VkFence>( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE - template<typename Dispatch> - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::waitIdle(Dispatch const &d) const VULKAN_HPP_NOEXCEPT + template <typename Dispatch> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::waitIdle( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { return static_cast<Result>( d.vkQueueWaitIdle( m_queue ) ); } #else - template<typename Dispatch> - VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::waitIdle(Dispatch const &d ) const + template <typename Dispatch> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType<void>::type Queue::waitIdle( Dispatch const & d ) const { Result result = static_cast<Result>( d.vkQueueWaitIdle( m_queue ) ); - return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::waitIdle" ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + #ifdef VK_USE_PLATFORM_ANDROID_KHR template <> struct StructExtends<AndroidHardwareBufferFormatPropertiesANDROID, AndroidHardwareBufferPropertiesANDROID>{ enum { value = true }; }; #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ @@ -89685,6 +91954,8 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<BufferOpaqueCaptureAddressCreateInfo, BufferCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<CommandBufferInheritanceConditionalRenderingInfoEXT, CommandBufferInheritanceInfo>{ enum { value = true }; }; template <> struct StructExtends<CommandBufferInheritanceRenderPassTransformInfoQCOM, CommandBufferInheritanceInfo>{ enum { value = true }; }; + template <> struct StructExtends<CopyCommandTransformInfoQCOM, BufferImageCopy2KHR>{ enum { value = true }; }; + template <> struct StructExtends<CopyCommandTransformInfoQCOM, ImageBlit2KHR>{ enum { value = true }; }; #ifdef VK_USE_PLATFORM_WIN32_KHR template <> struct StructExtends<D3D12FenceSubmitInfoKHR, SubmitInfo>{ enum { value = true }; }; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -89693,17 +91964,11 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<DedicatedAllocationBufferCreateInfoNV, BufferCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DedicatedAllocationImageCreateInfoNV, ImageCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DedicatedAllocationMemoryAllocateInfoNV, MemoryAllocateInfo>{ enum { value = true }; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS - template <> struct StructExtends<DeferredOperationInfoKHR, RayTracingPipelineCreateInfoKHR>{ enum { value = true }; }; - template <> struct StructExtends<DeferredOperationInfoKHR, AccelerationStructureBuildGeometryInfoKHR>{ enum { value = true }; }; - template <> struct StructExtends<DeferredOperationInfoKHR, CopyAccelerationStructureInfoKHR>{ enum { value = true }; }; - template <> struct StructExtends<DeferredOperationInfoKHR, CopyMemoryToAccelerationStructureInfoKHR>{ enum { value = true }; }; - template <> struct StructExtends<DeferredOperationInfoKHR, CopyAccelerationStructureToMemoryInfoKHR>{ enum { value = true }; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <> struct StructExtends<DescriptorPoolInlineUniformBlockCreateInfoEXT, DescriptorPoolCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DescriptorSetLayoutBindingFlagsCreateInfo, DescriptorSetLayoutCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DescriptorSetVariableDescriptorCountAllocateInfo, DescriptorSetAllocateInfo>{ enum { value = true }; }; template <> struct StructExtends<DescriptorSetVariableDescriptorCountLayoutSupport, DescriptorSetLayoutSupport>{ enum { value = true }; }; + template <> struct StructExtends<DeviceDeviceMemoryReportCreateInfoEXT, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DeviceDiagnosticsConfigCreateInfoNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<DeviceGroupBindSparseInfo, BindSparseInfo>{ enum { value = true }; }; template <> struct StructExtends<DeviceGroupCommandBufferBeginInfo, CommandBufferBeginInfo>{ enum { value = true }; }; @@ -89743,6 +92008,7 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<ExternalMemoryImageCreateInfo, ImageCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<ExternalMemoryImageCreateInfoNV, ImageCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<FilterCubicImageViewImageFormatPropertiesEXT, ImageFormatProperties2>{ enum { value = true }; }; + template <> struct StructExtends<FragmentShadingRateAttachmentInfoKHR, SubpassDescription2>{ enum { value = true }; }; template <> struct StructExtends<FramebufferAttachmentsCreateInfo, FramebufferCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<GraphicsPipelineShaderGroupsCreateInfoNV, GraphicsPipelineCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<ImageDrmFormatModifierExplicitCreateInfoEXT, ImageCreateInfo>{ enum { value = true }; }; @@ -89781,6 +92047,9 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDevice8BitStorageFeatures, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceASTCDecodeFeaturesEXT, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceASTCDecodeFeaturesEXT, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceAccelerationStructureFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceAccelerationStructureFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceAccelerationStructurePropertiesKHR, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceBlendOperationAdvancedFeaturesEXT, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceBlendOperationAdvancedFeaturesEXT, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceBlendOperationAdvancedPropertiesEXT, PhysicalDeviceProperties2>{ enum { value = true }; }; @@ -89816,6 +92085,8 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDeviceDeviceGeneratedCommandsFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceDeviceGeneratedCommandsFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceDeviceGeneratedCommandsPropertiesNV, PhysicalDeviceProperties2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceDeviceMemoryReportFeaturesEXT, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceDeviceMemoryReportFeaturesEXT, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceDiagnosticsConfigFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceDiagnosticsConfigFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceDiscardRectanglePropertiesEXT, PhysicalDeviceProperties2>{ enum { value = true }; }; @@ -89838,6 +92109,12 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDeviceFragmentShaderBarycentricFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceFragmentShaderInterlockFeaturesEXT, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceFragmentShaderInterlockFeaturesEXT, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRateEnumsFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRateEnumsFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRateEnumsPropertiesNV, PhysicalDeviceProperties2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRateFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRateFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceFragmentShadingRatePropertiesKHR, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceHostQueryResetFeatures, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceHostQueryResetFeatures, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceIDProperties, PhysicalDeviceProperties2>{ enum { value = true }; }; @@ -89888,13 +92165,11 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDeviceProtectedMemoryFeatures, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceProtectedMemoryProperties, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDevicePushDescriptorPropertiesKHR, PhysicalDeviceProperties2>{ enum { value = true }; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS - template <> struct StructExtends<PhysicalDeviceRayTracingFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; - template <> struct StructExtends<PhysicalDeviceRayTracingFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - template <> struct StructExtends<PhysicalDeviceRayTracingPropertiesKHR, PhysicalDeviceProperties2>{ enum { value = true }; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + template <> struct StructExtends<PhysicalDeviceRayQueryFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceRayQueryFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceRayTracingPipelineFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceRayTracingPipelineFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceRayTracingPipelinePropertiesKHR, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceRayTracingPropertiesNV, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceRepresentativeFragmentTestFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceRepresentativeFragmentTestFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; @@ -89923,6 +92198,8 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDeviceShaderDrawParametersFeatures, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderFloat16Int8Features, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderFloat16Int8Features, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceShaderImageAtomicInt64FeaturesEXT, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceShaderImageAtomicInt64FeaturesEXT, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderImageFootprintFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderImageFootprintFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL, PhysicalDeviceFeatures2>{ enum { value = true }; }; @@ -89932,6 +92209,8 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PhysicalDeviceShaderSMBuiltinsPropertiesNV, PhysicalDeviceProperties2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderSubgroupExtendedTypesFeatures, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShaderSubgroupExtendedTypesFeatures, DeviceCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceShaderTerminateInvocationFeaturesKHR, PhysicalDeviceFeatures2>{ enum { value = true }; }; + template <> struct StructExtends<PhysicalDeviceShaderTerminateInvocationFeaturesKHR, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShadingRateImageFeaturesNV, PhysicalDeviceFeatures2>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShadingRateImageFeaturesNV, DeviceCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PhysicalDeviceShadingRateImagePropertiesNV, PhysicalDeviceProperties2>{ enum { value = true }; }; @@ -89976,10 +92255,10 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<PipelineCreationFeedbackCreateInfoEXT, GraphicsPipelineCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PipelineCreationFeedbackCreateInfoEXT, ComputePipelineCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PipelineCreationFeedbackCreateInfoEXT, RayTracingPipelineCreateInfoNV>{ enum { value = true }; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS template <> struct StructExtends<PipelineCreationFeedbackCreateInfoEXT, RayTracingPipelineCreateInfoKHR>{ enum { value = true }; }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <> struct StructExtends<PipelineDiscardRectangleStateCreateInfoEXT, GraphicsPipelineCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PipelineFragmentShadingRateEnumStateCreateInfoNV, GraphicsPipelineCreateInfo>{ enum { value = true }; }; + template <> struct StructExtends<PipelineFragmentShadingRateStateCreateInfoKHR, GraphicsPipelineCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PipelineRasterizationConservativeStateCreateInfoEXT, PipelineRasterizationStateCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PipelineRasterizationDepthClipStateCreateInfoEXT, PipelineRasterizationStateCreateInfo>{ enum { value = true }; }; template <> struct StructExtends<PipelineRasterizationLineStateCreateInfoEXT, PipelineRasterizationStateCreateInfo>{ enum { value = true }; }; @@ -90048,6 +92327,7 @@ namespace VULKAN_HPP_NAMESPACE template <> struct StructExtends<Win32KeyedMutexAcquireReleaseInfoNV, SubmitInfo>{ enum { value = true }; }; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ template <> struct StructExtends<WriteDescriptorSetAccelerationStructureKHR, WriteDescriptorSet>{ enum { value = true }; }; + template <> struct StructExtends<WriteDescriptorSetAccelerationStructureNV, WriteDescriptorSet>{ enum { value = true }; }; template <> struct StructExtends<WriteDescriptorSetInlineUniformBlockEXT, WriteDescriptorSet>{ enum { value = true }; }; #if VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL @@ -90055,9 +92335,9 @@ namespace VULKAN_HPP_NAMESPACE { public: # ifdef VULKAN_HPP_NO_EXCEPTIONS - DynamicLoader( std::string const & vulkanLibraryName = {} ) VULKAN_HPP_NOEXCEPT : m_success( false ) + DynamicLoader( std::string const & vulkanLibraryName = {} ) VULKAN_HPP_NOEXCEPT # else - DynamicLoader( std::string const & vulkanLibraryName = {} ) : m_success( false ) + DynamicLoader( std::string const & vulkanLibraryName = {} ) # endif { if ( !vulkanLibraryName.empty() ) @@ -90087,9 +92367,8 @@ namespace VULKAN_HPP_NAMESPACE # endif } - m_success = (m_library != nullptr); #ifndef VULKAN_HPP_NO_EXCEPTIONS - if ( !m_success ) + if ( m_library == nullptr ) { // NOTE there should be an InitializationFailedError, but msvc insists on the symbol does not exist within the scope of this function. throw std::runtime_error( "Failed to load vulkan library!" ); @@ -90099,9 +92378,7 @@ namespace VULKAN_HPP_NAMESPACE DynamicLoader( DynamicLoader const& ) = delete; - DynamicLoader( DynamicLoader && other ) VULKAN_HPP_NOEXCEPT - : m_success(other.m_success) - , m_library(other.m_library) + DynamicLoader( DynamicLoader && other ) VULKAN_HPP_NOEXCEPT : m_library(other.m_library) { other.m_library = nullptr; } @@ -90110,7 +92387,6 @@ namespace VULKAN_HPP_NAMESPACE DynamicLoader &operator=( DynamicLoader && other ) VULKAN_HPP_NOEXCEPT { - m_success = other.m_success; std::swap(m_library, other.m_library); return *this; } @@ -90141,10 +92417,9 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool success() const VULKAN_HPP_NOEXCEPT { return m_success; } + bool success() const VULKAN_HPP_NOEXCEPT { return m_library != nullptr; } private: - bool m_success; # if defined( __linux__ ) || defined( __APPLE__ ) void * m_library; # elif defined( _WIN32 ) @@ -90174,18 +92449,13 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkAllocateMemory vkAllocateMemory = 0; PFN_vkBeginCommandBuffer vkBeginCommandBuffer = 0; PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkBindAccelerationStructureMemoryKHR vkBindAccelerationStructureMemoryKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkBindBufferMemory vkBindBufferMemory = 0; PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR = 0; PFN_vkBindBufferMemory2 vkBindBufferMemory2 = 0; PFN_vkBindImageMemory vkBindImageMemory = 0; PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR = 0; PFN_vkBindImageMemory2 vkBindImageMemory2 = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkBuildAccelerationStructureKHR vkBuildAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR = 0; PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT = 0; PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT = 0; PFN_vkCmdBeginQuery vkCmdBeginQuery = 0; @@ -90204,23 +92474,15 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT = 0; PFN_vkCmdBlitImage vkCmdBlitImage = 0; PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkCmdBuildAccelerationStructureIndirectKHR vkCmdBuildAccelerationStructureIndirectKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkCmdBuildAccelerationStructureKHR vkCmdBuildAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV = 0; + PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR = 0; + PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR = 0; PFN_vkCmdClearAttachments vkCmdClearAttachments = 0; PFN_vkCmdClearColorImage vkCmdClearColorImage = 0; PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCmdCopyBuffer vkCmdCopyBuffer = 0; PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR = 0; PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage = 0; @@ -90229,9 +92491,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR = 0; PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer = 0; PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults = 0; PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT = 0; PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT = 0; @@ -90293,6 +92553,8 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT = 0; PFN_vkCmdSetEvent vkCmdSetEvent = 0; PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV = 0; + PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV = 0; + PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR = 0; PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT = 0; PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT = 0; PFN_vkCmdSetLineWidth vkCmdSetLineWidth = 0; @@ -90300,6 +92562,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL = 0; PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL = 0; PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT = 0; + PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR = 0; PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT = 0; PFN_vkCmdSetScissor vkCmdSetScissor = 0; PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT = 0; @@ -90312,34 +92575,20 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV = 0; PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV = 0; PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV = 0; PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer = 0; PFN_vkCmdWaitEvents vkCmdWaitEvents = 0; - PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV = 0; PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD = 0; PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp = 0; PFN_vkCompileDeferredNV vkCompileDeferredNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = 0; #ifdef VK_USE_PLATFORM_ANDROID_KHR PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR = 0; @@ -90350,9 +92599,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCreateComputePipelines vkCreateComputePipelines = 0; PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT = 0; PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCreateDescriptorPool vkCreateDescriptorPool = 0; PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout = 0; PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = 0; @@ -90388,9 +92635,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkCreatePipelineLayout vkCreatePipelineLayout = 0; PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT = 0; PFN_vkCreateQueryPool vkCreateQueryPool = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = 0; PFN_vkCreateRenderPass vkCreateRenderPass = 0; PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = 0; @@ -90424,21 +92669,15 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT = 0; PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT = 0; PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV = 0; PFN_vkDestroyBuffer vkDestroyBuffer = 0; PFN_vkDestroyBufferView vkDestroyBufferView = 0; PFN_vkDestroyCommandPool vkDestroyCommandPool = 0; PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT = 0; PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool = 0; PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout = 0; PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = 0; @@ -90481,13 +92720,9 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkFreeCommandBuffers vkFreeCommandBuffers = 0; PFN_vkFreeDescriptorSets vkFreeDescriptorSets = 0; PFN_vkFreeMemory vkFreeMemory = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS + PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR = 0; PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkGetAccelerationStructureMemoryRequirementsKHR vkGetAccelerationStructureMemoryRequirementsKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV = 0; #ifdef VK_USE_PLATFORM_ANDROID_KHR PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID = 0; @@ -90501,17 +92736,11 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR = 0; PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress = 0; PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR = 0; PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR = 0; PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures = 0; PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR = 0; @@ -90587,6 +92816,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties = 0; PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = 0; PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR = 0; PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties = 0; PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR = 0; PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2 = 0; @@ -90639,13 +92869,10 @@ namespace VULKAN_HPP_NAMESPACE #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT = 0; #endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR = 0; PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE = 0; PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity = 0; PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR = 0; @@ -90714,9 +92941,7 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkWaitForFences vkWaitForFences = 0; PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR = 0; PFN_vkWaitSemaphores vkWaitSemaphores = 0; -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR = 0; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ public: DispatchLoaderDynamic() VULKAN_HPP_NOEXCEPT = default; @@ -90870,6 +93095,7 @@ namespace VULKAN_HPP_NAMESPACE vkGetPhysicalDeviceFormatProperties2KHR = PFN_vkGetPhysicalDeviceFormatProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFormatProperties2KHR" ) ); vkGetPhysicalDeviceFormatProperties2 = PFN_vkGetPhysicalDeviceFormatProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFormatProperties2" ) ); if ( !vkGetPhysicalDeviceFormatProperties2 ) vkGetPhysicalDeviceFormatProperties2 = vkGetPhysicalDeviceFormatProperties2KHR; + vkGetPhysicalDeviceFragmentShadingRatesKHR = PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR" ) ); vkGetPhysicalDeviceImageFormatProperties = PFN_vkGetPhysicalDeviceImageFormatProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties" ) ); vkGetPhysicalDeviceImageFormatProperties2KHR = PFN_vkGetPhysicalDeviceImageFormatProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties2KHR" ) ); vkGetPhysicalDeviceImageFormatProperties2 = PFN_vkGetPhysicalDeviceImageFormatProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties2" ) ); @@ -90934,10 +93160,6 @@ namespace VULKAN_HPP_NAMESPACE vkAllocateMemory = PFN_vkAllocateMemory( vkGetInstanceProcAddr( instance, "vkAllocateMemory" ) ); vkBeginCommandBuffer = PFN_vkBeginCommandBuffer( vkGetInstanceProcAddr( instance, "vkBeginCommandBuffer" ) ); vkBindAccelerationStructureMemoryNV = PFN_vkBindAccelerationStructureMemoryNV( vkGetInstanceProcAddr( instance, "vkBindAccelerationStructureMemoryNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkBindAccelerationStructureMemoryKHR = PFN_vkBindAccelerationStructureMemoryKHR( vkGetInstanceProcAddr( instance, "vkBindAccelerationStructureMemoryKHR" ) ); - if ( !vkBindAccelerationStructureMemoryKHR ) vkBindAccelerationStructureMemoryKHR = vkBindAccelerationStructureMemoryNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkBindBufferMemory = PFN_vkBindBufferMemory( vkGetInstanceProcAddr( instance, "vkBindBufferMemory" ) ); vkBindBufferMemory2KHR = PFN_vkBindBufferMemory2KHR( vkGetInstanceProcAddr( instance, "vkBindBufferMemory2KHR" ) ); vkBindBufferMemory2 = PFN_vkBindBufferMemory2( vkGetInstanceProcAddr( instance, "vkBindBufferMemory2" ) ); @@ -90946,9 +93168,7 @@ namespace VULKAN_HPP_NAMESPACE vkBindImageMemory2KHR = PFN_vkBindImageMemory2KHR( vkGetInstanceProcAddr( instance, "vkBindImageMemory2KHR" ) ); vkBindImageMemory2 = PFN_vkBindImageMemory2( vkGetInstanceProcAddr( instance, "vkBindImageMemory2" ) ); if ( !vkBindImageMemory2 ) vkBindImageMemory2 = vkBindImageMemory2KHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkBuildAccelerationStructureKHR = PFN_vkBuildAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkBuildAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkBuildAccelerationStructuresKHR = PFN_vkBuildAccelerationStructuresKHR( vkGetInstanceProcAddr( instance, "vkBuildAccelerationStructuresKHR" ) ); vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginConditionalRenderingEXT" ) ); vkCmdBeginDebugUtilsLabelEXT = PFN_vkCmdBeginDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginDebugUtilsLabelEXT" ) ); vkCmdBeginQuery = PFN_vkCmdBeginQuery( vkGetInstanceProcAddr( instance, "vkCmdBeginQuery" ) ); @@ -90968,23 +93188,15 @@ namespace VULKAN_HPP_NAMESPACE vkCmdBindVertexBuffers2EXT = PFN_vkCmdBindVertexBuffers2EXT( vkGetInstanceProcAddr( instance, "vkCmdBindVertexBuffers2EXT" ) ); vkCmdBlitImage = PFN_vkCmdBlitImage( vkGetInstanceProcAddr( instance, "vkCmdBlitImage" ) ); vkCmdBlitImage2KHR = PFN_vkCmdBlitImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdBlitImage2KHR" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkCmdBuildAccelerationStructureIndirectKHR = PFN_vkCmdBuildAccelerationStructureIndirectKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructureIndirectKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkCmdBuildAccelerationStructureKHR = PFN_vkCmdBuildAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdBuildAccelerationStructureNV = PFN_vkCmdBuildAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructureNV" ) ); + vkCmdBuildAccelerationStructuresIndirectKHR = PFN_vkCmdBuildAccelerationStructuresIndirectKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructuresIndirectKHR" ) ); + vkCmdBuildAccelerationStructuresKHR = PFN_vkCmdBuildAccelerationStructuresKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructuresKHR" ) ); vkCmdClearAttachments = PFN_vkCmdClearAttachments( vkGetInstanceProcAddr( instance, "vkCmdClearAttachments" ) ); vkCmdClearColorImage = PFN_vkCmdClearColorImage( vkGetInstanceProcAddr( instance, "vkCmdClearColorImage" ) ); vkCmdClearDepthStencilImage = PFN_vkCmdClearDepthStencilImage( vkGetInstanceProcAddr( instance, "vkCmdClearDepthStencilImage" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyAccelerationStructureKHR = PFN_vkCmdCopyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyAccelerationStructureNV = PFN_vkCmdCopyAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyAccelerationStructureToMemoryKHR = PFN_vkCmdCopyAccelerationStructureToMemoryKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureToMemoryKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyBuffer = PFN_vkCmdCopyBuffer( vkGetInstanceProcAddr( instance, "vkCmdCopyBuffer" ) ); vkCmdCopyBuffer2KHR = PFN_vkCmdCopyBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyBuffer2KHR" ) ); vkCmdCopyBufferToImage = PFN_vkCmdCopyBufferToImage( vkGetInstanceProcAddr( instance, "vkCmdCopyBufferToImage" ) ); @@ -90993,9 +93205,7 @@ namespace VULKAN_HPP_NAMESPACE vkCmdCopyImage2KHR = PFN_vkCmdCopyImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyImage2KHR" ) ); vkCmdCopyImageToBuffer = PFN_vkCmdCopyImageToBuffer( vkGetInstanceProcAddr( instance, "vkCmdCopyImageToBuffer" ) ); vkCmdCopyImageToBuffer2KHR = PFN_vkCmdCopyImageToBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyImageToBuffer2KHR" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyMemoryToAccelerationStructureKHR = PFN_vkCmdCopyMemoryToAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyMemoryToAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyQueryPoolResults = PFN_vkCmdCopyQueryPoolResults( vkGetInstanceProcAddr( instance, "vkCmdCopyQueryPoolResults" ) ); vkCmdDebugMarkerBeginEXT = PFN_vkCmdDebugMarkerBeginEXT( vkGetInstanceProcAddr( instance, "vkCmdDebugMarkerBeginEXT" ) ); vkCmdDebugMarkerEndEXT = PFN_vkCmdDebugMarkerEndEXT( vkGetInstanceProcAddr( instance, "vkCmdDebugMarkerEndEXT" ) ); @@ -91065,6 +93275,8 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetDiscardRectangleEXT = PFN_vkCmdSetDiscardRectangleEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDiscardRectangleEXT" ) ); vkCmdSetEvent = PFN_vkCmdSetEvent( vkGetInstanceProcAddr( instance, "vkCmdSetEvent" ) ); vkCmdSetExclusiveScissorNV = PFN_vkCmdSetExclusiveScissorNV( vkGetInstanceProcAddr( instance, "vkCmdSetExclusiveScissorNV" ) ); + vkCmdSetFragmentShadingRateEnumNV = PFN_vkCmdSetFragmentShadingRateEnumNV( vkGetInstanceProcAddr( instance, "vkCmdSetFragmentShadingRateEnumNV" ) ); + vkCmdSetFragmentShadingRateKHR = PFN_vkCmdSetFragmentShadingRateKHR( vkGetInstanceProcAddr( instance, "vkCmdSetFragmentShadingRateKHR" ) ); vkCmdSetFrontFaceEXT = PFN_vkCmdSetFrontFaceEXT( vkGetInstanceProcAddr( instance, "vkCmdSetFrontFaceEXT" ) ); vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetInstanceProcAddr( instance, "vkCmdSetLineStippleEXT" ) ); vkCmdSetLineWidth = PFN_vkCmdSetLineWidth( vkGetInstanceProcAddr( instance, "vkCmdSetLineWidth" ) ); @@ -91072,6 +93284,7 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetPerformanceOverrideINTEL = PFN_vkCmdSetPerformanceOverrideINTEL( vkGetInstanceProcAddr( instance, "vkCmdSetPerformanceOverrideINTEL" ) ); vkCmdSetPerformanceStreamMarkerINTEL = PFN_vkCmdSetPerformanceStreamMarkerINTEL( vkGetInstanceProcAddr( instance, "vkCmdSetPerformanceStreamMarkerINTEL" ) ); vkCmdSetPrimitiveTopologyEXT = PFN_vkCmdSetPrimitiveTopologyEXT( vkGetInstanceProcAddr( instance, "vkCmdSetPrimitiveTopologyEXT" ) ); + vkCmdSetRayTracingPipelineStackSizeKHR = PFN_vkCmdSetRayTracingPipelineStackSizeKHR( vkGetInstanceProcAddr( instance, "vkCmdSetRayTracingPipelineStackSizeKHR" ) ); vkCmdSetSampleLocationsEXT = PFN_vkCmdSetSampleLocationsEXT( vkGetInstanceProcAddr( instance, "vkCmdSetSampleLocationsEXT" ) ); vkCmdSetScissor = PFN_vkCmdSetScissor( vkGetInstanceProcAddr( instance, "vkCmdSetScissor" ) ); vkCmdSetScissorWithCountEXT = PFN_vkCmdSetScissorWithCountEXT( vkGetInstanceProcAddr( instance, "vkCmdSetScissorWithCountEXT" ) ); @@ -91084,43 +93297,26 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetViewportShadingRatePaletteNV = PFN_vkCmdSetViewportShadingRatePaletteNV( vkGetInstanceProcAddr( instance, "vkCmdSetViewportShadingRatePaletteNV" ) ); vkCmdSetViewportWScalingNV = PFN_vkCmdSetViewportWScalingNV( vkGetInstanceProcAddr( instance, "vkCmdSetViewportWScalingNV" ) ); vkCmdSetViewportWithCountEXT = PFN_vkCmdSetViewportWithCountEXT( vkGetInstanceProcAddr( instance, "vkCmdSetViewportWithCountEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdTraceRaysIndirectKHR = PFN_vkCmdTraceRaysIndirectKHR( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysIndirectKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdTraceRaysKHR = PFN_vkCmdTraceRaysKHR( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdTraceRaysNV = PFN_vkCmdTraceRaysNV( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysNV" ) ); vkCmdUpdateBuffer = PFN_vkCmdUpdateBuffer( vkGetInstanceProcAddr( instance, "vkCmdUpdateBuffer" ) ); vkCmdWaitEvents = PFN_vkCmdWaitEvents( vkGetInstanceProcAddr( instance, "vkCmdWaitEvents" ) ); - vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetInstanceProcAddr( instance, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdWriteAccelerationStructuresPropertiesKHR = PFN_vkCmdWriteAccelerationStructuresPropertiesKHR( vkGetInstanceProcAddr( instance, "vkCmdWriteAccelerationStructuresPropertiesKHR" ) ); - if ( !vkCmdWriteAccelerationStructuresPropertiesKHR ) vkCmdWriteAccelerationStructuresPropertiesKHR = vkCmdWriteAccelerationStructuresPropertiesNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetInstanceProcAddr( instance, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); vkCmdWriteBufferMarkerAMD = PFN_vkCmdWriteBufferMarkerAMD( vkGetInstanceProcAddr( instance, "vkCmdWriteBufferMarkerAMD" ) ); vkCmdWriteTimestamp = PFN_vkCmdWriteTimestamp( vkGetInstanceProcAddr( instance, "vkCmdWriteTimestamp" ) ); vkCompileDeferredNV = PFN_vkCompileDeferredNV( vkGetInstanceProcAddr( instance, "vkCompileDeferredNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyAccelerationStructureKHR = PFN_vkCopyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCopyAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyAccelerationStructureToMemoryKHR = PFN_vkCopyAccelerationStructureToMemoryKHR( vkGetInstanceProcAddr( instance, "vkCopyAccelerationStructureToMemoryKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyMemoryToAccelerationStructureKHR = PFN_vkCopyMemoryToAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCopyMemoryToAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateAccelerationStructureKHR = PFN_vkCreateAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCreateAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateAccelerationStructureNV = PFN_vkCreateAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCreateAccelerationStructureNV" ) ); vkCreateBuffer = PFN_vkCreateBuffer( vkGetInstanceProcAddr( instance, "vkCreateBuffer" ) ); vkCreateBufferView = PFN_vkCreateBufferView( vkGetInstanceProcAddr( instance, "vkCreateBufferView" ) ); vkCreateCommandPool = PFN_vkCreateCommandPool( vkGetInstanceProcAddr( instance, "vkCreateCommandPool" ) ); vkCreateComputePipelines = PFN_vkCreateComputePipelines( vkGetInstanceProcAddr( instance, "vkCreateComputePipelines" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateDeferredOperationKHR = PFN_vkCreateDeferredOperationKHR( vkGetInstanceProcAddr( instance, "vkCreateDeferredOperationKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateDescriptorPool = PFN_vkCreateDescriptorPool( vkGetInstanceProcAddr( instance, "vkCreateDescriptorPool" ) ); vkCreateDescriptorSetLayout = PFN_vkCreateDescriptorSetLayout( vkGetInstanceProcAddr( instance, "vkCreateDescriptorSetLayout" ) ); vkCreateDescriptorUpdateTemplateKHR = PFN_vkCreateDescriptorUpdateTemplateKHR( vkGetInstanceProcAddr( instance, "vkCreateDescriptorUpdateTemplateKHR" ) ); @@ -91137,9 +93333,7 @@ namespace VULKAN_HPP_NAMESPACE vkCreatePipelineLayout = PFN_vkCreatePipelineLayout( vkGetInstanceProcAddr( instance, "vkCreatePipelineLayout" ) ); vkCreatePrivateDataSlotEXT = PFN_vkCreatePrivateDataSlotEXT( vkGetInstanceProcAddr( instance, "vkCreatePrivateDataSlotEXT" ) ); vkCreateQueryPool = PFN_vkCreateQueryPool( vkGetInstanceProcAddr( instance, "vkCreateQueryPool" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateRayTracingPipelinesKHR = PFN_vkCreateRayTracingPipelinesKHR( vkGetInstanceProcAddr( instance, "vkCreateRayTracingPipelinesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateRayTracingPipelinesNV = PFN_vkCreateRayTracingPipelinesNV( vkGetInstanceProcAddr( instance, "vkCreateRayTracingPipelinesNV" ) ); vkCreateRenderPass = PFN_vkCreateRenderPass( vkGetInstanceProcAddr( instance, "vkCreateRenderPass" ) ); vkCreateRenderPass2KHR = PFN_vkCreateRenderPass2KHR( vkGetInstanceProcAddr( instance, "vkCreateRenderPass2KHR" ) ); @@ -91156,20 +93350,13 @@ namespace VULKAN_HPP_NAMESPACE vkCreateValidationCacheEXT = PFN_vkCreateValidationCacheEXT( vkGetInstanceProcAddr( instance, "vkCreateValidationCacheEXT" ) ); vkDebugMarkerSetObjectNameEXT = PFN_vkDebugMarkerSetObjectNameEXT( vkGetInstanceProcAddr( instance, "vkDebugMarkerSetObjectNameEXT" ) ); vkDebugMarkerSetObjectTagEXT = PFN_vkDebugMarkerSetObjectTagEXT( vkGetInstanceProcAddr( instance, "vkDebugMarkerSetObjectTagEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDeferredOperationJoinKHR = PFN_vkDeferredOperationJoinKHR( vkGetInstanceProcAddr( instance, "vkDeferredOperationJoinKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkDestroyAccelerationStructureNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDestroyAccelerationStructureKHR = PFN_vkDestroyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkDestroyAccelerationStructureKHR" ) ); - if ( !vkDestroyAccelerationStructureKHR ) vkDestroyAccelerationStructureKHR = vkDestroyAccelerationStructureNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkDestroyAccelerationStructureNV" ) ); vkDestroyBuffer = PFN_vkDestroyBuffer( vkGetInstanceProcAddr( instance, "vkDestroyBuffer" ) ); vkDestroyBufferView = PFN_vkDestroyBufferView( vkGetInstanceProcAddr( instance, "vkDestroyBufferView" ) ); vkDestroyCommandPool = PFN_vkDestroyCommandPool( vkGetInstanceProcAddr( instance, "vkDestroyCommandPool" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDestroyDeferredOperationKHR = PFN_vkDestroyDeferredOperationKHR( vkGetInstanceProcAddr( instance, "vkDestroyDeferredOperationKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkDestroyDescriptorPool = PFN_vkDestroyDescriptorPool( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorPool" ) ); vkDestroyDescriptorSetLayout = PFN_vkDestroyDescriptorSetLayout( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorSetLayout" ) ); vkDestroyDescriptorUpdateTemplateKHR = PFN_vkDestroyDescriptorUpdateTemplateKHR( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorUpdateTemplateKHR" ) ); @@ -91203,13 +93390,9 @@ namespace VULKAN_HPP_NAMESPACE vkFreeCommandBuffers = PFN_vkFreeCommandBuffers( vkGetInstanceProcAddr( instance, "vkFreeCommandBuffers" ) ); vkFreeDescriptorSets = PFN_vkFreeDescriptorSets( vkGetInstanceProcAddr( instance, "vkFreeDescriptorSets" ) ); vkFreeMemory = PFN_vkFreeMemory( vkGetInstanceProcAddr( instance, "vkFreeMemory" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS + vkGetAccelerationStructureBuildSizesKHR = PFN_vkGetAccelerationStructureBuildSizesKHR( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureBuildSizesKHR" ) ); vkGetAccelerationStructureDeviceAddressKHR = PFN_vkGetAccelerationStructureDeviceAddressKHR( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureDeviceAddressKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetAccelerationStructureHandleNV = PFN_vkGetAccelerationStructureHandleNV( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureHandleNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkGetAccelerationStructureMemoryRequirementsKHR = PFN_vkGetAccelerationStructureMemoryRequirementsKHR( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureMemoryRequirementsKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetAccelerationStructureMemoryRequirementsNV = PFN_vkGetAccelerationStructureMemoryRequirementsNV( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureMemoryRequirementsNV" ) ); #ifdef VK_USE_PLATFORM_ANDROID_KHR vkGetAndroidHardwareBufferPropertiesANDROID = PFN_vkGetAndroidHardwareBufferPropertiesANDROID( vkGetInstanceProcAddr( instance, "vkGetAndroidHardwareBufferPropertiesANDROID" ) ); @@ -91227,18 +93410,12 @@ namespace VULKAN_HPP_NAMESPACE vkGetBufferOpaqueCaptureAddress = PFN_vkGetBufferOpaqueCaptureAddress( vkGetInstanceProcAddr( instance, "vkGetBufferOpaqueCaptureAddress" ) ); if ( !vkGetBufferOpaqueCaptureAddress ) vkGetBufferOpaqueCaptureAddress = vkGetBufferOpaqueCaptureAddressKHR; vkGetCalibratedTimestampsEXT = PFN_vkGetCalibratedTimestampsEXT( vkGetInstanceProcAddr( instance, "vkGetCalibratedTimestampsEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeferredOperationMaxConcurrencyKHR = PFN_vkGetDeferredOperationMaxConcurrencyKHR( vkGetInstanceProcAddr( instance, "vkGetDeferredOperationMaxConcurrencyKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeferredOperationResultKHR = PFN_vkGetDeferredOperationResultKHR( vkGetInstanceProcAddr( instance, "vkGetDeferredOperationResultKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetDescriptorSetLayoutSupportKHR = PFN_vkGetDescriptorSetLayoutSupportKHR( vkGetInstanceProcAddr( instance, "vkGetDescriptorSetLayoutSupportKHR" ) ); vkGetDescriptorSetLayoutSupport = PFN_vkGetDescriptorSetLayoutSupport( vkGetInstanceProcAddr( instance, "vkGetDescriptorSetLayoutSupport" ) ); if ( !vkGetDescriptorSetLayoutSupport ) vkGetDescriptorSetLayoutSupport = vkGetDescriptorSetLayoutSupportKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeviceAccelerationStructureCompatibilityKHR = PFN_vkGetDeviceAccelerationStructureCompatibilityKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceAccelerationStructureCompatibilityKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetDeviceGroupPeerMemoryFeaturesKHR = PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupPeerMemoryFeaturesKHR" ) ); vkGetDeviceGroupPeerMemoryFeatures = PFN_vkGetDeviceGroupPeerMemoryFeatures( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupPeerMemoryFeatures" ) ); if ( !vkGetDeviceGroupPeerMemoryFeatures ) vkGetDeviceGroupPeerMemoryFeatures = vkGetDeviceGroupPeerMemoryFeaturesKHR; @@ -91297,14 +93474,11 @@ namespace VULKAN_HPP_NAMESPACE vkGetPrivateDataEXT = PFN_vkGetPrivateDataEXT( vkGetInstanceProcAddr( instance, "vkGetPrivateDataEXT" ) ); vkGetQueryPoolResults = PFN_vkGetQueryPoolResults( vkGetInstanceProcAddr( instance, "vkGetQueryPoolResults" ) ); vkGetQueueCheckpointDataNV = PFN_vkGetQueueCheckpointDataNV( vkGetInstanceProcAddr( instance, "vkGetQueueCheckpointDataNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetRayTracingShaderGroupHandlesNV = PFN_vkGetRayTracingShaderGroupHandlesNV( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupHandlesNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetRayTracingShaderGroupHandlesKHR = PFN_vkGetRayTracingShaderGroupHandlesKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupHandlesKHR" ) ); if ( !vkGetRayTracingShaderGroupHandlesKHR ) vkGetRayTracingShaderGroupHandlesKHR = vkGetRayTracingShaderGroupHandlesNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkGetRayTracingShaderGroupStackSizeKHR = PFN_vkGetRayTracingShaderGroupStackSizeKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupStackSizeKHR" ) ); vkGetRefreshCycleDurationGOOGLE = PFN_vkGetRefreshCycleDurationGOOGLE( vkGetInstanceProcAddr( instance, "vkGetRefreshCycleDurationGOOGLE" ) ); vkGetRenderAreaGranularity = PFN_vkGetRenderAreaGranularity( vkGetInstanceProcAddr( instance, "vkGetRenderAreaGranularity" ) ); vkGetSemaphoreCounterValueKHR = PFN_vkGetSemaphoreCounterValueKHR( vkGetInstanceProcAddr( instance, "vkGetSemaphoreCounterValueKHR" ) ); @@ -91377,9 +93551,7 @@ namespace VULKAN_HPP_NAMESPACE vkWaitSemaphoresKHR = PFN_vkWaitSemaphoresKHR( vkGetInstanceProcAddr( instance, "vkWaitSemaphoresKHR" ) ); vkWaitSemaphores = PFN_vkWaitSemaphores( vkGetInstanceProcAddr( instance, "vkWaitSemaphores" ) ); if ( !vkWaitSemaphores ) vkWaitSemaphores = vkWaitSemaphoresKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS vkWriteAccelerationStructuresPropertiesKHR = PFN_vkWriteAccelerationStructuresPropertiesKHR( vkGetInstanceProcAddr( instance, "vkWriteAccelerationStructuresPropertiesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ } void init( VULKAN_HPP_NAMESPACE::Device deviceCpp ) VULKAN_HPP_NOEXCEPT @@ -91397,10 +93569,6 @@ namespace VULKAN_HPP_NAMESPACE vkAllocateMemory = PFN_vkAllocateMemory( vkGetDeviceProcAddr( device, "vkAllocateMemory" ) ); vkBeginCommandBuffer = PFN_vkBeginCommandBuffer( vkGetDeviceProcAddr( device, "vkBeginCommandBuffer" ) ); vkBindAccelerationStructureMemoryNV = PFN_vkBindAccelerationStructureMemoryNV( vkGetDeviceProcAddr( device, "vkBindAccelerationStructureMemoryNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkBindAccelerationStructureMemoryKHR = PFN_vkBindAccelerationStructureMemoryKHR( vkGetDeviceProcAddr( device, "vkBindAccelerationStructureMemoryKHR" ) ); - if ( !vkBindAccelerationStructureMemoryKHR ) vkBindAccelerationStructureMemoryKHR = vkBindAccelerationStructureMemoryNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkBindBufferMemory = PFN_vkBindBufferMemory( vkGetDeviceProcAddr( device, "vkBindBufferMemory" ) ); vkBindBufferMemory2KHR = PFN_vkBindBufferMemory2KHR( vkGetDeviceProcAddr( device, "vkBindBufferMemory2KHR" ) ); vkBindBufferMemory2 = PFN_vkBindBufferMemory2( vkGetDeviceProcAddr( device, "vkBindBufferMemory2" ) ); @@ -91409,9 +93577,7 @@ namespace VULKAN_HPP_NAMESPACE vkBindImageMemory2KHR = PFN_vkBindImageMemory2KHR( vkGetDeviceProcAddr( device, "vkBindImageMemory2KHR" ) ); vkBindImageMemory2 = PFN_vkBindImageMemory2( vkGetDeviceProcAddr( device, "vkBindImageMemory2" ) ); if ( !vkBindImageMemory2 ) vkBindImageMemory2 = vkBindImageMemory2KHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkBuildAccelerationStructureKHR = PFN_vkBuildAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkBuildAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkBuildAccelerationStructuresKHR = PFN_vkBuildAccelerationStructuresKHR( vkGetDeviceProcAddr( device, "vkBuildAccelerationStructuresKHR" ) ); vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetDeviceProcAddr( device, "vkCmdBeginConditionalRenderingEXT" ) ); vkCmdBeginDebugUtilsLabelEXT = PFN_vkCmdBeginDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkCmdBeginDebugUtilsLabelEXT" ) ); vkCmdBeginQuery = PFN_vkCmdBeginQuery( vkGetDeviceProcAddr( device, "vkCmdBeginQuery" ) ); @@ -91431,23 +93597,15 @@ namespace VULKAN_HPP_NAMESPACE vkCmdBindVertexBuffers2EXT = PFN_vkCmdBindVertexBuffers2EXT( vkGetDeviceProcAddr( device, "vkCmdBindVertexBuffers2EXT" ) ); vkCmdBlitImage = PFN_vkCmdBlitImage( vkGetDeviceProcAddr( device, "vkCmdBlitImage" ) ); vkCmdBlitImage2KHR = PFN_vkCmdBlitImage2KHR( vkGetDeviceProcAddr( device, "vkCmdBlitImage2KHR" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkCmdBuildAccelerationStructureIndirectKHR = PFN_vkCmdBuildAccelerationStructureIndirectKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructureIndirectKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkCmdBuildAccelerationStructureKHR = PFN_vkCmdBuildAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdBuildAccelerationStructureNV = PFN_vkCmdBuildAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructureNV" ) ); + vkCmdBuildAccelerationStructuresIndirectKHR = PFN_vkCmdBuildAccelerationStructuresIndirectKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructuresIndirectKHR" ) ); + vkCmdBuildAccelerationStructuresKHR = PFN_vkCmdBuildAccelerationStructuresKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructuresKHR" ) ); vkCmdClearAttachments = PFN_vkCmdClearAttachments( vkGetDeviceProcAddr( device, "vkCmdClearAttachments" ) ); vkCmdClearColorImage = PFN_vkCmdClearColorImage( vkGetDeviceProcAddr( device, "vkCmdClearColorImage" ) ); vkCmdClearDepthStencilImage = PFN_vkCmdClearDepthStencilImage( vkGetDeviceProcAddr( device, "vkCmdClearDepthStencilImage" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyAccelerationStructureKHR = PFN_vkCmdCopyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyAccelerationStructureNV = PFN_vkCmdCopyAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyAccelerationStructureToMemoryKHR = PFN_vkCmdCopyAccelerationStructureToMemoryKHR( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureToMemoryKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyBuffer = PFN_vkCmdCopyBuffer( vkGetDeviceProcAddr( device, "vkCmdCopyBuffer" ) ); vkCmdCopyBuffer2KHR = PFN_vkCmdCopyBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyBuffer2KHR" ) ); vkCmdCopyBufferToImage = PFN_vkCmdCopyBufferToImage( vkGetDeviceProcAddr( device, "vkCmdCopyBufferToImage" ) ); @@ -91456,9 +93614,7 @@ namespace VULKAN_HPP_NAMESPACE vkCmdCopyImage2KHR = PFN_vkCmdCopyImage2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyImage2KHR" ) ); vkCmdCopyImageToBuffer = PFN_vkCmdCopyImageToBuffer( vkGetDeviceProcAddr( device, "vkCmdCopyImageToBuffer" ) ); vkCmdCopyImageToBuffer2KHR = PFN_vkCmdCopyImageToBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyImageToBuffer2KHR" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdCopyMemoryToAccelerationStructureKHR = PFN_vkCmdCopyMemoryToAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCmdCopyMemoryToAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdCopyQueryPoolResults = PFN_vkCmdCopyQueryPoolResults( vkGetDeviceProcAddr( device, "vkCmdCopyQueryPoolResults" ) ); vkCmdDebugMarkerBeginEXT = PFN_vkCmdDebugMarkerBeginEXT( vkGetDeviceProcAddr( device, "vkCmdDebugMarkerBeginEXT" ) ); vkCmdDebugMarkerEndEXT = PFN_vkCmdDebugMarkerEndEXT( vkGetDeviceProcAddr( device, "vkCmdDebugMarkerEndEXT" ) ); @@ -91528,6 +93684,8 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetDiscardRectangleEXT = PFN_vkCmdSetDiscardRectangleEXT( vkGetDeviceProcAddr( device, "vkCmdSetDiscardRectangleEXT" ) ); vkCmdSetEvent = PFN_vkCmdSetEvent( vkGetDeviceProcAddr( device, "vkCmdSetEvent" ) ); vkCmdSetExclusiveScissorNV = PFN_vkCmdSetExclusiveScissorNV( vkGetDeviceProcAddr( device, "vkCmdSetExclusiveScissorNV" ) ); + vkCmdSetFragmentShadingRateEnumNV = PFN_vkCmdSetFragmentShadingRateEnumNV( vkGetDeviceProcAddr( device, "vkCmdSetFragmentShadingRateEnumNV" ) ); + vkCmdSetFragmentShadingRateKHR = PFN_vkCmdSetFragmentShadingRateKHR( vkGetDeviceProcAddr( device, "vkCmdSetFragmentShadingRateKHR" ) ); vkCmdSetFrontFaceEXT = PFN_vkCmdSetFrontFaceEXT( vkGetDeviceProcAddr( device, "vkCmdSetFrontFaceEXT" ) ); vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleEXT" ) ); vkCmdSetLineWidth = PFN_vkCmdSetLineWidth( vkGetDeviceProcAddr( device, "vkCmdSetLineWidth" ) ); @@ -91535,6 +93693,7 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetPerformanceOverrideINTEL = PFN_vkCmdSetPerformanceOverrideINTEL( vkGetDeviceProcAddr( device, "vkCmdSetPerformanceOverrideINTEL" ) ); vkCmdSetPerformanceStreamMarkerINTEL = PFN_vkCmdSetPerformanceStreamMarkerINTEL( vkGetDeviceProcAddr( device, "vkCmdSetPerformanceStreamMarkerINTEL" ) ); vkCmdSetPrimitiveTopologyEXT = PFN_vkCmdSetPrimitiveTopologyEXT( vkGetDeviceProcAddr( device, "vkCmdSetPrimitiveTopologyEXT" ) ); + vkCmdSetRayTracingPipelineStackSizeKHR = PFN_vkCmdSetRayTracingPipelineStackSizeKHR( vkGetDeviceProcAddr( device, "vkCmdSetRayTracingPipelineStackSizeKHR" ) ); vkCmdSetSampleLocationsEXT = PFN_vkCmdSetSampleLocationsEXT( vkGetDeviceProcAddr( device, "vkCmdSetSampleLocationsEXT" ) ); vkCmdSetScissor = PFN_vkCmdSetScissor( vkGetDeviceProcAddr( device, "vkCmdSetScissor" ) ); vkCmdSetScissorWithCountEXT = PFN_vkCmdSetScissorWithCountEXT( vkGetDeviceProcAddr( device, "vkCmdSetScissorWithCountEXT" ) ); @@ -91547,43 +93706,26 @@ namespace VULKAN_HPP_NAMESPACE vkCmdSetViewportShadingRatePaletteNV = PFN_vkCmdSetViewportShadingRatePaletteNV( vkGetDeviceProcAddr( device, "vkCmdSetViewportShadingRatePaletteNV" ) ); vkCmdSetViewportWScalingNV = PFN_vkCmdSetViewportWScalingNV( vkGetDeviceProcAddr( device, "vkCmdSetViewportWScalingNV" ) ); vkCmdSetViewportWithCountEXT = PFN_vkCmdSetViewportWithCountEXT( vkGetDeviceProcAddr( device, "vkCmdSetViewportWithCountEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdTraceRaysIndirectKHR = PFN_vkCmdTraceRaysIndirectKHR( vkGetDeviceProcAddr( device, "vkCmdTraceRaysIndirectKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdTraceRaysKHR = PFN_vkCmdTraceRaysKHR( vkGetDeviceProcAddr( device, "vkCmdTraceRaysKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCmdTraceRaysNV = PFN_vkCmdTraceRaysNV( vkGetDeviceProcAddr( device, "vkCmdTraceRaysNV" ) ); vkCmdUpdateBuffer = PFN_vkCmdUpdateBuffer( vkGetDeviceProcAddr( device, "vkCmdUpdateBuffer" ) ); vkCmdWaitEvents = PFN_vkCmdWaitEvents( vkGetDeviceProcAddr( device, "vkCmdWaitEvents" ) ); - vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetDeviceProcAddr( device, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCmdWriteAccelerationStructuresPropertiesKHR = PFN_vkCmdWriteAccelerationStructuresPropertiesKHR( vkGetDeviceProcAddr( device, "vkCmdWriteAccelerationStructuresPropertiesKHR" ) ); - if ( !vkCmdWriteAccelerationStructuresPropertiesKHR ) vkCmdWriteAccelerationStructuresPropertiesKHR = vkCmdWriteAccelerationStructuresPropertiesNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetDeviceProcAddr( device, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); vkCmdWriteBufferMarkerAMD = PFN_vkCmdWriteBufferMarkerAMD( vkGetDeviceProcAddr( device, "vkCmdWriteBufferMarkerAMD" ) ); vkCmdWriteTimestamp = PFN_vkCmdWriteTimestamp( vkGetDeviceProcAddr( device, "vkCmdWriteTimestamp" ) ); vkCompileDeferredNV = PFN_vkCompileDeferredNV( vkGetDeviceProcAddr( device, "vkCompileDeferredNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyAccelerationStructureKHR = PFN_vkCopyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCopyAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyAccelerationStructureToMemoryKHR = PFN_vkCopyAccelerationStructureToMemoryKHR( vkGetDeviceProcAddr( device, "vkCopyAccelerationStructureToMemoryKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCopyMemoryToAccelerationStructureKHR = PFN_vkCopyMemoryToAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCopyMemoryToAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateAccelerationStructureKHR = PFN_vkCreateAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCreateAccelerationStructureKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateAccelerationStructureNV = PFN_vkCreateAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCreateAccelerationStructureNV" ) ); vkCreateBuffer = PFN_vkCreateBuffer( vkGetDeviceProcAddr( device, "vkCreateBuffer" ) ); vkCreateBufferView = PFN_vkCreateBufferView( vkGetDeviceProcAddr( device, "vkCreateBufferView" ) ); vkCreateCommandPool = PFN_vkCreateCommandPool( vkGetDeviceProcAddr( device, "vkCreateCommandPool" ) ); vkCreateComputePipelines = PFN_vkCreateComputePipelines( vkGetDeviceProcAddr( device, "vkCreateComputePipelines" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateDeferredOperationKHR = PFN_vkCreateDeferredOperationKHR( vkGetDeviceProcAddr( device, "vkCreateDeferredOperationKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateDescriptorPool = PFN_vkCreateDescriptorPool( vkGetDeviceProcAddr( device, "vkCreateDescriptorPool" ) ); vkCreateDescriptorSetLayout = PFN_vkCreateDescriptorSetLayout( vkGetDeviceProcAddr( device, "vkCreateDescriptorSetLayout" ) ); vkCreateDescriptorUpdateTemplateKHR = PFN_vkCreateDescriptorUpdateTemplateKHR( vkGetDeviceProcAddr( device, "vkCreateDescriptorUpdateTemplateKHR" ) ); @@ -91600,9 +93742,7 @@ namespace VULKAN_HPP_NAMESPACE vkCreatePipelineLayout = PFN_vkCreatePipelineLayout( vkGetDeviceProcAddr( device, "vkCreatePipelineLayout" ) ); vkCreatePrivateDataSlotEXT = PFN_vkCreatePrivateDataSlotEXT( vkGetDeviceProcAddr( device, "vkCreatePrivateDataSlotEXT" ) ); vkCreateQueryPool = PFN_vkCreateQueryPool( vkGetDeviceProcAddr( device, "vkCreateQueryPool" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkCreateRayTracingPipelinesKHR = PFN_vkCreateRayTracingPipelinesKHR( vkGetDeviceProcAddr( device, "vkCreateRayTracingPipelinesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkCreateRayTracingPipelinesNV = PFN_vkCreateRayTracingPipelinesNV( vkGetDeviceProcAddr( device, "vkCreateRayTracingPipelinesNV" ) ); vkCreateRenderPass = PFN_vkCreateRenderPass( vkGetDeviceProcAddr( device, "vkCreateRenderPass" ) ); vkCreateRenderPass2KHR = PFN_vkCreateRenderPass2KHR( vkGetDeviceProcAddr( device, "vkCreateRenderPass2KHR" ) ); @@ -91619,20 +93759,13 @@ namespace VULKAN_HPP_NAMESPACE vkCreateValidationCacheEXT = PFN_vkCreateValidationCacheEXT( vkGetDeviceProcAddr( device, "vkCreateValidationCacheEXT" ) ); vkDebugMarkerSetObjectNameEXT = PFN_vkDebugMarkerSetObjectNameEXT( vkGetDeviceProcAddr( device, "vkDebugMarkerSetObjectNameEXT" ) ); vkDebugMarkerSetObjectTagEXT = PFN_vkDebugMarkerSetObjectTagEXT( vkGetDeviceProcAddr( device, "vkDebugMarkerSetObjectTagEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDeferredOperationJoinKHR = PFN_vkDeferredOperationJoinKHR( vkGetDeviceProcAddr( device, "vkDeferredOperationJoinKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkDestroyAccelerationStructureNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDestroyAccelerationStructureKHR = PFN_vkDestroyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkDestroyAccelerationStructureKHR" ) ); - if ( !vkDestroyAccelerationStructureKHR ) vkDestroyAccelerationStructureKHR = vkDestroyAccelerationStructureNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkDestroyAccelerationStructureNV" ) ); vkDestroyBuffer = PFN_vkDestroyBuffer( vkGetDeviceProcAddr( device, "vkDestroyBuffer" ) ); vkDestroyBufferView = PFN_vkDestroyBufferView( vkGetDeviceProcAddr( device, "vkDestroyBufferView" ) ); vkDestroyCommandPool = PFN_vkDestroyCommandPool( vkGetDeviceProcAddr( device, "vkDestroyCommandPool" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkDestroyDeferredOperationKHR = PFN_vkDestroyDeferredOperationKHR( vkGetDeviceProcAddr( device, "vkDestroyDeferredOperationKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkDestroyDescriptorPool = PFN_vkDestroyDescriptorPool( vkGetDeviceProcAddr( device, "vkDestroyDescriptorPool" ) ); vkDestroyDescriptorSetLayout = PFN_vkDestroyDescriptorSetLayout( vkGetDeviceProcAddr( device, "vkDestroyDescriptorSetLayout" ) ); vkDestroyDescriptorUpdateTemplateKHR = PFN_vkDestroyDescriptorUpdateTemplateKHR( vkGetDeviceProcAddr( device, "vkDestroyDescriptorUpdateTemplateKHR" ) ); @@ -91666,13 +93799,9 @@ namespace VULKAN_HPP_NAMESPACE vkFreeCommandBuffers = PFN_vkFreeCommandBuffers( vkGetDeviceProcAddr( device, "vkFreeCommandBuffers" ) ); vkFreeDescriptorSets = PFN_vkFreeDescriptorSets( vkGetDeviceProcAddr( device, "vkFreeDescriptorSets" ) ); vkFreeMemory = PFN_vkFreeMemory( vkGetDeviceProcAddr( device, "vkFreeMemory" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS + vkGetAccelerationStructureBuildSizesKHR = PFN_vkGetAccelerationStructureBuildSizesKHR( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureBuildSizesKHR" ) ); vkGetAccelerationStructureDeviceAddressKHR = PFN_vkGetAccelerationStructureDeviceAddressKHR( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureDeviceAddressKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetAccelerationStructureHandleNV = PFN_vkGetAccelerationStructureHandleNV( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureHandleNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS - vkGetAccelerationStructureMemoryRequirementsKHR = PFN_vkGetAccelerationStructureMemoryRequirementsKHR( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureMemoryRequirementsKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetAccelerationStructureMemoryRequirementsNV = PFN_vkGetAccelerationStructureMemoryRequirementsNV( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureMemoryRequirementsNV" ) ); #ifdef VK_USE_PLATFORM_ANDROID_KHR vkGetAndroidHardwareBufferPropertiesANDROID = PFN_vkGetAndroidHardwareBufferPropertiesANDROID( vkGetDeviceProcAddr( device, "vkGetAndroidHardwareBufferPropertiesANDROID" ) ); @@ -91690,18 +93819,12 @@ namespace VULKAN_HPP_NAMESPACE vkGetBufferOpaqueCaptureAddress = PFN_vkGetBufferOpaqueCaptureAddress( vkGetDeviceProcAddr( device, "vkGetBufferOpaqueCaptureAddress" ) ); if ( !vkGetBufferOpaqueCaptureAddress ) vkGetBufferOpaqueCaptureAddress = vkGetBufferOpaqueCaptureAddressKHR; vkGetCalibratedTimestampsEXT = PFN_vkGetCalibratedTimestampsEXT( vkGetDeviceProcAddr( device, "vkGetCalibratedTimestampsEXT" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeferredOperationMaxConcurrencyKHR = PFN_vkGetDeferredOperationMaxConcurrencyKHR( vkGetDeviceProcAddr( device, "vkGetDeferredOperationMaxConcurrencyKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeferredOperationResultKHR = PFN_vkGetDeferredOperationResultKHR( vkGetDeviceProcAddr( device, "vkGetDeferredOperationResultKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetDescriptorSetLayoutSupportKHR = PFN_vkGetDescriptorSetLayoutSupportKHR( vkGetDeviceProcAddr( device, "vkGetDescriptorSetLayoutSupportKHR" ) ); vkGetDescriptorSetLayoutSupport = PFN_vkGetDescriptorSetLayoutSupport( vkGetDeviceProcAddr( device, "vkGetDescriptorSetLayoutSupport" ) ); if ( !vkGetDescriptorSetLayoutSupport ) vkGetDescriptorSetLayoutSupport = vkGetDescriptorSetLayoutSupportKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetDeviceAccelerationStructureCompatibilityKHR = PFN_vkGetDeviceAccelerationStructureCompatibilityKHR( vkGetDeviceProcAddr( device, "vkGetDeviceAccelerationStructureCompatibilityKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetDeviceGroupPeerMemoryFeaturesKHR = PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR( vkGetDeviceProcAddr( device, "vkGetDeviceGroupPeerMemoryFeaturesKHR" ) ); vkGetDeviceGroupPeerMemoryFeatures = PFN_vkGetDeviceGroupPeerMemoryFeatures( vkGetDeviceProcAddr( device, "vkGetDeviceGroupPeerMemoryFeatures" ) ); if ( !vkGetDeviceGroupPeerMemoryFeatures ) vkGetDeviceGroupPeerMemoryFeatures = vkGetDeviceGroupPeerMemoryFeaturesKHR; @@ -91760,14 +93883,11 @@ namespace VULKAN_HPP_NAMESPACE vkGetPrivateDataEXT = PFN_vkGetPrivateDataEXT( vkGetDeviceProcAddr( device, "vkGetPrivateDataEXT" ) ); vkGetQueryPoolResults = PFN_vkGetQueryPoolResults( vkGetDeviceProcAddr( device, "vkGetQueryPoolResults" ) ); vkGetQueueCheckpointDataNV = PFN_vkGetQueueCheckpointDataNV( vkGetDeviceProcAddr( device, "vkGetQueueCheckpointDataNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ vkGetRayTracingShaderGroupHandlesNV = PFN_vkGetRayTracingShaderGroupHandlesNV( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupHandlesNV" ) ); -#ifdef VK_ENABLE_BETA_EXTENSIONS vkGetRayTracingShaderGroupHandlesKHR = PFN_vkGetRayTracingShaderGroupHandlesKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupHandlesKHR" ) ); if ( !vkGetRayTracingShaderGroupHandlesKHR ) vkGetRayTracingShaderGroupHandlesKHR = vkGetRayTracingShaderGroupHandlesNV; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + vkGetRayTracingShaderGroupStackSizeKHR = PFN_vkGetRayTracingShaderGroupStackSizeKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupStackSizeKHR" ) ); vkGetRefreshCycleDurationGOOGLE = PFN_vkGetRefreshCycleDurationGOOGLE( vkGetDeviceProcAddr( device, "vkGetRefreshCycleDurationGOOGLE" ) ); vkGetRenderAreaGranularity = PFN_vkGetRenderAreaGranularity( vkGetDeviceProcAddr( device, "vkGetRenderAreaGranularity" ) ); vkGetSemaphoreCounterValueKHR = PFN_vkGetSemaphoreCounterValueKHR( vkGetDeviceProcAddr( device, "vkGetSemaphoreCounterValueKHR" ) ); @@ -91840,9 +93960,7 @@ namespace VULKAN_HPP_NAMESPACE vkWaitSemaphoresKHR = PFN_vkWaitSemaphoresKHR( vkGetDeviceProcAddr( device, "vkWaitSemaphoresKHR" ) ); vkWaitSemaphores = PFN_vkWaitSemaphores( vkGetDeviceProcAddr( device, "vkWaitSemaphores" ) ); if ( !vkWaitSemaphores ) vkWaitSemaphores = vkWaitSemaphoresKHR; -#ifdef VK_ENABLE_BETA_EXTENSIONS vkWriteAccelerationStructuresPropertiesKHR = PFN_vkWriteAccelerationStructuresPropertiesKHR( vkGetDeviceProcAddr( device, "vkWriteAccelerationStructuresPropertiesKHR" ) ); -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ } }; @@ -91859,6 +93977,14 @@ namespace std } }; + template <> struct hash<VULKAN_HPP_NAMESPACE::AccelerationStructureNV> + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::AccelerationStructureNV const& accelerationStructureNV) const VULKAN_HPP_NOEXCEPT + { + return std::hash<VkAccelerationStructureNV>{}(static_cast<VkAccelerationStructureNV>(accelerationStructureNV)); + } + }; + template <> struct hash<VULKAN_HPP_NAMESPACE::Buffer> { std::size_t operator()(VULKAN_HPP_NAMESPACE::Buffer const& buffer) const VULKAN_HPP_NOEXCEPT @@ -91907,7 +94033,6 @@ namespace std } }; -#ifdef VK_ENABLE_BETA_EXTENSIONS template <> struct hash<VULKAN_HPP_NAMESPACE::DeferredOperationKHR> { std::size_t operator()(VULKAN_HPP_NAMESPACE::DeferredOperationKHR const& deferredOperationKHR) const VULKAN_HPP_NOEXCEPT @@ -91915,7 +94040,6 @@ namespace std return std::hash<VkDeferredOperationKHR>{}(static_cast<VkDeferredOperationKHR>(deferredOperationKHR)); } }; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <> struct hash<VULKAN_HPP_NAMESPACE::DescriptorPool> { diff --git a/thirdparty/vulkan/include/vulkan/vulkan_beta.h b/thirdparty/vulkan/include/vulkan/vulkan_beta.h index 4b7f2b2993..23513b3270 100644 --- a/thirdparty/vulkan/include/vulkan/vulkan_beta.h +++ b/thirdparty/vulkan/include/vulkan/vulkan_beta.h @@ -49,409 +49,6 @@ typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR { } VkPhysicalDevicePortabilitySubsetPropertiesKHR; - -#define VK_KHR_deferred_host_operations 1 -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) -#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 3 -#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" -typedef struct VkDeferredOperationInfoKHR { - VkStructureType sType; - const void* pNext; - VkDeferredOperationKHR operationHandle; -} VkDeferredOperationInfoKHR; - -typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); -typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); -typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); -typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); -typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); - -#ifndef VK_NO_PROTOTYPES -VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( - VkDevice device, - const VkAllocationCallbacks* pAllocator, - VkDeferredOperationKHR* pDeferredOperation); - -VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( - VkDevice device, - VkDeferredOperationKHR operation, - const VkAllocationCallbacks* pAllocator); - -VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( - VkDevice device, - VkDeferredOperationKHR operation); - -VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( - VkDevice device, - VkDeferredOperationKHR operation); - -VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( - VkDevice device, - VkDeferredOperationKHR operation); -#endif - - -#define VK_KHR_pipeline_library 1 -#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 -#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" -typedef struct VkPipelineLibraryCreateInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t libraryCount; - const VkPipeline* pLibraries; -} VkPipelineLibraryCreateInfoKHR; - - - -#define VK_KHR_ray_tracing 1 -#define VK_KHR_RAY_TRACING_SPEC_VERSION 8 -#define VK_KHR_RAY_TRACING_EXTENSION_NAME "VK_KHR_ray_tracing" - -typedef enum VkAccelerationStructureBuildTypeKHR { - VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, - VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, - VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, - VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF -} VkAccelerationStructureBuildTypeKHR; -typedef union VkDeviceOrHostAddressKHR { - VkDeviceAddress deviceAddress; - void* hostAddress; -} VkDeviceOrHostAddressKHR; - -typedef union VkDeviceOrHostAddressConstKHR { - VkDeviceAddress deviceAddress; - const void* hostAddress; -} VkDeviceOrHostAddressConstKHR; - -typedef struct VkAccelerationStructureBuildOffsetInfoKHR { - uint32_t primitiveCount; - uint32_t primitiveOffset; - uint32_t firstVertex; - uint32_t transformOffset; -} VkAccelerationStructureBuildOffsetInfoKHR; - -typedef struct VkRayTracingShaderGroupCreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkRayTracingShaderGroupTypeKHR type; - uint32_t generalShader; - uint32_t closestHitShader; - uint32_t anyHitShader; - uint32_t intersectionShader; - const void* pShaderGroupCaptureReplayHandle; -} VkRayTracingShaderGroupCreateInfoKHR; - -typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t maxPayloadSize; - uint32_t maxAttributeSize; - uint32_t maxCallableSize; -} VkRayTracingPipelineInterfaceCreateInfoKHR; - -typedef struct VkRayTracingPipelineCreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkPipelineCreateFlags flags; - uint32_t stageCount; - const VkPipelineShaderStageCreateInfo* pStages; - uint32_t groupCount; - const VkRayTracingShaderGroupCreateInfoKHR* pGroups; - uint32_t maxRecursionDepth; - VkPipelineLibraryCreateInfoKHR libraries; - const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; - VkPipelineLayout layout; - VkPipeline basePipelineHandle; - int32_t basePipelineIndex; -} VkRayTracingPipelineCreateInfoKHR; - -typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { - VkStructureType sType; - const void* pNext; - VkFormat vertexFormat; - VkDeviceOrHostAddressConstKHR vertexData; - VkDeviceSize vertexStride; - VkIndexType indexType; - VkDeviceOrHostAddressConstKHR indexData; - VkDeviceOrHostAddressConstKHR transformData; -} VkAccelerationStructureGeometryTrianglesDataKHR; - -typedef struct VkAccelerationStructureGeometryAabbsDataKHR { - VkStructureType sType; - const void* pNext; - VkDeviceOrHostAddressConstKHR data; - VkDeviceSize stride; -} VkAccelerationStructureGeometryAabbsDataKHR; - -typedef struct VkAccelerationStructureGeometryInstancesDataKHR { - VkStructureType sType; - const void* pNext; - VkBool32 arrayOfPointers; - VkDeviceOrHostAddressConstKHR data; -} VkAccelerationStructureGeometryInstancesDataKHR; - -typedef union VkAccelerationStructureGeometryDataKHR { - VkAccelerationStructureGeometryTrianglesDataKHR triangles; - VkAccelerationStructureGeometryAabbsDataKHR aabbs; - VkAccelerationStructureGeometryInstancesDataKHR instances; -} VkAccelerationStructureGeometryDataKHR; - -typedef struct VkAccelerationStructureGeometryKHR { - VkStructureType sType; - const void* pNext; - VkGeometryTypeKHR geometryType; - VkAccelerationStructureGeometryDataKHR geometry; - VkGeometryFlagsKHR flags; -} VkAccelerationStructureGeometryKHR; - -typedef struct VkAccelerationStructureBuildGeometryInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureTypeKHR type; - VkBuildAccelerationStructureFlagsKHR flags; - VkBool32 update; - VkAccelerationStructureKHR srcAccelerationStructure; - VkAccelerationStructureKHR dstAccelerationStructure; - VkBool32 geometryArrayOfPointers; - uint32_t geometryCount; - const VkAccelerationStructureGeometryKHR* const* ppGeometries; - VkDeviceOrHostAddressKHR scratchData; -} VkAccelerationStructureBuildGeometryInfoKHR; - -typedef struct VkAccelerationStructureCreateGeometryTypeInfoKHR { - VkStructureType sType; - const void* pNext; - VkGeometryTypeKHR geometryType; - uint32_t maxPrimitiveCount; - VkIndexType indexType; - uint32_t maxVertexCount; - VkFormat vertexFormat; - VkBool32 allowsTransforms; -} VkAccelerationStructureCreateGeometryTypeInfoKHR; - -typedef struct VkAccelerationStructureCreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkDeviceSize compactedSize; - VkAccelerationStructureTypeKHR type; - VkBuildAccelerationStructureFlagsKHR flags; - uint32_t maxGeometryCount; - const VkAccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos; - VkDeviceAddress deviceAddress; -} VkAccelerationStructureCreateInfoKHR; - -typedef struct VkAccelerationStructureMemoryRequirementsInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureMemoryRequirementsTypeKHR type; - VkAccelerationStructureBuildTypeKHR buildType; - VkAccelerationStructureKHR accelerationStructure; -} VkAccelerationStructureMemoryRequirementsInfoKHR; - -typedef struct VkPhysicalDeviceRayTracingFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 rayTracing; - VkBool32 rayTracingShaderGroupHandleCaptureReplay; - VkBool32 rayTracingShaderGroupHandleCaptureReplayMixed; - VkBool32 rayTracingAccelerationStructureCaptureReplay; - VkBool32 rayTracingIndirectTraceRays; - VkBool32 rayTracingIndirectAccelerationStructureBuild; - VkBool32 rayTracingHostAccelerationStructureCommands; - VkBool32 rayQuery; - VkBool32 rayTracingPrimitiveCulling; -} VkPhysicalDeviceRayTracingFeaturesKHR; - -typedef struct VkPhysicalDeviceRayTracingPropertiesKHR { - VkStructureType sType; - void* pNext; - uint32_t shaderGroupHandleSize; - uint32_t maxRecursionDepth; - uint32_t maxShaderGroupStride; - uint32_t shaderGroupBaseAlignment; - uint64_t maxGeometryCount; - uint64_t maxInstanceCount; - uint64_t maxPrimitiveCount; - uint32_t maxDescriptorSetAccelerationStructures; - uint32_t shaderGroupHandleCaptureReplaySize; -} VkPhysicalDeviceRayTracingPropertiesKHR; - -typedef struct VkAccelerationStructureDeviceAddressInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureKHR accelerationStructure; -} VkAccelerationStructureDeviceAddressInfoKHR; - -typedef struct VkAccelerationStructureVersionKHR { - VkStructureType sType; - const void* pNext; - const uint8_t* versionData; -} VkAccelerationStructureVersionKHR; - -typedef struct VkStridedBufferRegionKHR { - VkBuffer buffer; - VkDeviceSize offset; - VkDeviceSize stride; - VkDeviceSize size; -} VkStridedBufferRegionKHR; - -typedef struct VkTraceRaysIndirectCommandKHR { - uint32_t width; - uint32_t height; - uint32_t depth; -} VkTraceRaysIndirectCommandKHR; - -typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureKHR src; - VkDeviceOrHostAddressKHR dst; - VkCopyAccelerationStructureModeKHR mode; -} VkCopyAccelerationStructureToMemoryInfoKHR; - -typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { - VkStructureType sType; - const void* pNext; - VkDeviceOrHostAddressConstKHR src; - VkAccelerationStructureKHR dst; - VkCopyAccelerationStructureModeKHR mode; -} VkCopyMemoryToAccelerationStructureInfoKHR; - -typedef struct VkCopyAccelerationStructureInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureKHR src; - VkAccelerationStructureKHR dst; - VkCopyAccelerationStructureModeKHR mode; -} VkCopyAccelerationStructureInfoKHR; - -typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); -typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsKHR)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements); -typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); -typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureIndirectKHR)(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride); -typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructureKHR)(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); -typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo); -typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); -typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); -typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); -typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); -typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); -typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); -typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); -typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); -typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); -typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); -typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset); -typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionKHR* version); - -#ifndef VK_NO_PROTOTYPES -VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( - VkDevice device, - const VkAccelerationStructureCreateInfoKHR* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkAccelerationStructureKHR* pAccelerationStructure); - -VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsKHR( - VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, - VkMemoryRequirements2* pMemoryRequirements); - -VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureKHR( - VkCommandBuffer commandBuffer, - uint32_t infoCount, - const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, - const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); - -VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureIndirectKHR( - VkCommandBuffer commandBuffer, - const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, - VkBuffer indirectBuffer, - VkDeviceSize indirectOffset, - uint32_t indirectStride); - -VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructureKHR( - VkDevice device, - uint32_t infoCount, - const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, - const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos); - -VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( - VkDevice device, - const VkCopyAccelerationStructureInfoKHR* pInfo); - -VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( - VkDevice device, - const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); - -VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( - VkDevice device, - const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); - -VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( - VkDevice device, - uint32_t accelerationStructureCount, - const VkAccelerationStructureKHR* pAccelerationStructures, - VkQueryType queryType, - size_t dataSize, - void* pData, - size_t stride); - -VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( - VkCommandBuffer commandBuffer, - const VkCopyAccelerationStructureInfoKHR* pInfo); - -VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( - VkCommandBuffer commandBuffer, - const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); - -VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( - VkCommandBuffer commandBuffer, - const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); - -VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( - VkCommandBuffer commandBuffer, - const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, - const VkStridedBufferRegionKHR* pMissShaderBindingTable, - const VkStridedBufferRegionKHR* pHitShaderBindingTable, - const VkStridedBufferRegionKHR* pCallableShaderBindingTable, - uint32_t width, - uint32_t height, - uint32_t depth); - -VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( - VkDevice device, - VkPipelineCache pipelineCache, - uint32_t createInfoCount, - const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, - const VkAllocationCallbacks* pAllocator, - VkPipeline* pPipelines); - -VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( - VkDevice device, - const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); - -VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( - VkDevice device, - VkPipeline pipeline, - uint32_t firstGroup, - uint32_t groupCount, - size_t dataSize, - void* pData); - -VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( - VkCommandBuffer commandBuffer, - const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, - const VkStridedBufferRegionKHR* pMissShaderBindingTable, - const VkStridedBufferRegionKHR* pHitShaderBindingTable, - const VkStridedBufferRegionKHR* pCallableShaderBindingTable, - VkBuffer buffer, - VkDeviceSize offset); - -VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( - VkDevice device, - const VkAccelerationStructureVersionKHR* version); -#endif - #ifdef __cplusplus } #endif diff --git a/thirdparty/vulkan/include/vulkan/vulkan_core.h b/thirdparty/vulkan/include/vulkan/vulkan_core.h index ac904bca21..cc0851f77c 100644 --- a/thirdparty/vulkan/include/vulkan/vulkan_core.h +++ b/thirdparty/vulkan/include/vulkan/vulkan_core.h @@ -43,7 +43,7 @@ extern "C" { #define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 // Version of this file -#define VK_HEADER_VERSION 154 +#define VK_HEADER_VERSION 162 // Complete version of this file #define VK_HEADER_VERSION_COMPLETE VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION) @@ -131,7 +131,6 @@ typedef enum VkResult { VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, VK_ERROR_INVALID_SHADER_NV = -1000012000, - VK_ERROR_INCOMPATIBLE_VERSION_KHR = -1000150000, VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, VK_ERROR_NOT_PERMITTED_EXT = -1000174001, VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, @@ -438,31 +437,31 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, - VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR = 1000165006, - VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000165007, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR = 1000150001, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR = 1000150008, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR = 1000150009, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR = 1000150013, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, - VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, @@ -480,6 +479,8 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, @@ -522,6 +523,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = 1000215000, VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, @@ -529,8 +531,14 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, @@ -558,7 +566,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, - VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR = 1000268000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, @@ -578,6 +585,9 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, + VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, @@ -590,8 +600,12 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, + VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = 1000335000, VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = 1000337000, VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = 1000337001, @@ -689,8 +703,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, - VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR, - VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, @@ -747,6 +759,7 @@ typedef enum VkImageLayout { VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, @@ -789,15 +802,15 @@ typedef enum VkObjectType { VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, - VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000165000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = 1000295000, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, - VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF } VkObjectType; @@ -1139,10 +1152,10 @@ typedef enum VkQueryType { VK_QUERY_TYPE_TIMESTAMP = 2, VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, - VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000165000, - VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, - VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR, VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF } VkQueryType; @@ -1277,9 +1290,11 @@ typedef enum VkDynamicState { VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000, VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, VK_DYNAMIC_STATE_CULL_MODE_EXT = 1000267000, VK_DYNAMIC_STATE_FRONT_FACE_EXT = 1000267001, @@ -1412,8 +1427,8 @@ typedef enum VkDescriptorType { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, - VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000165000, - VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF } VkDescriptorType; @@ -1491,6 +1506,7 @@ typedef enum VkAccessFlagBits { VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000, VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkAccessFlagBits; typedef VkFlags VkAccessFlags; @@ -1541,6 +1557,7 @@ typedef enum VkFormatFeatureFlagBits { VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, @@ -1605,6 +1622,7 @@ typedef enum VkImageUsageFlagBits { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00000100, VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageUsageFlagBits; typedef VkFlags VkImageUsageFlags; @@ -1668,8 +1686,8 @@ typedef enum VkPipelineStageFlagBits { VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, - VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, @@ -1677,6 +1695,7 @@ typedef enum VkPipelineStageFlagBits { VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineStageFlagBits; typedef VkFlags VkPipelineStageFlags; @@ -1756,8 +1775,10 @@ typedef enum VkBufferUsageFlagBits { VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, - VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR = 0x00000400, - VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000, + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF @@ -1804,6 +1825,7 @@ typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, + VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000, VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, @@ -6673,8 +6695,10 @@ typedef enum VkPerformanceCounterStorageKHR { } VkPerformanceCounterStorageKHR; typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { - VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = 0x00000001, - VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterDescriptionFlagBitsKHR; typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; @@ -7191,6 +7215,96 @@ typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryMo +#define VK_KHR_shader_terminate_invocation 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation" +typedef struct VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderTerminateInvocation; +} VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR; + + + +#define VK_KHR_fragment_shading_rate 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate" + +typedef enum VkFragmentShadingRateCombinerOpKHR { + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF +} VkFragmentShadingRateCombinerOpKHR; +typedef struct VkFragmentShadingRateAttachmentInfoKHR { + VkStructureType sType; + const void* pNext; + const VkAttachmentReference2* pFragmentShadingRateAttachment; + VkExtent2D shadingRateAttachmentTexelSize; +} VkFragmentShadingRateAttachmentInfoKHR; + +typedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExtent2D fragmentSize; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateStateCreateInfoKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineFragmentShadingRate; + VkBool32 primitiveFragmentShadingRate; + VkBool32 attachmentFragmentShadingRate; +} VkPhysicalDeviceFragmentShadingRateFeaturesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentShadingRateAttachmentTexelSize; + VkExtent2D maxFragmentShadingRateAttachmentTexelSize; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio; + VkBool32 primitiveFragmentShadingRateWithMultipleViewports; + VkBool32 layeredShadingRateAttachments; + VkBool32 fragmentShadingRateNonTrivialCombinerOps; + VkExtent2D maxFragmentSize; + uint32_t maxFragmentSizeAspectRatio; + uint32_t maxFragmentShadingRateCoverageSamples; + VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples; + VkBool32 fragmentShadingRateWithShaderDepthStencilWrites; + VkBool32 fragmentShadingRateWithSampleMask; + VkBool32 fragmentShadingRateWithShaderSampleMask; + VkBool32 fragmentShadingRateWithConservativeRasterization; + VkBool32 fragmentShadingRateWithFragmentShaderInterlock; + VkBool32 fragmentShadingRateWithCustomSampleLocations; + VkBool32 fragmentShadingRateStrictMultiplyCombiner; +} VkPhysicalDeviceFragmentShadingRatePropertiesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateKHR { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleCounts; + VkExtent2D fragmentSize; +} VkPhysicalDeviceFragmentShadingRateKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + #define VK_KHR_spirv_1_4 1 #define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 #define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" @@ -7257,6 +7371,41 @@ VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( #endif +#define VK_KHR_deferred_host_operations 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) +#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4 +#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" +typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); +typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); +typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + + #define VK_KHR_pipeline_executable_properties 1 #define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 #define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" @@ -7347,6 +7496,18 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR #endif +#define VK_KHR_pipeline_library 1 +#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" +typedef struct VkPipelineLibraryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t libraryCount; + const VkPipeline* pLibraries; +} VkPipelineLibraryCreateInfoKHR; + + + #define VK_KHR_shader_non_semantic_info 1 #define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 #define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" @@ -7540,12 +7701,12 @@ typedef enum VkDebugReportObjectTypeEXT { VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, - VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, - VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDebugReportObjectTypeEXT; @@ -8168,7 +8329,8 @@ VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( #define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" typedef enum VkSurfaceCounterFlagBitsEXT { - VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, + VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkSurfaceCounterFlagBitsEXT; typedef VkFlags VkSurfaceCounterFlagsEXT; @@ -9135,9 +9297,7 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( #define VK_NV_ray_tracing 1 -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) -typedef VkAccelerationStructureKHR VkAccelerationStructureNV; - +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) #define VK_NV_RAY_TRACING_SPEC_VERSION 3 #define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" #define VK_SHADER_UNUSED_KHR (~0U) @@ -9158,7 +9318,7 @@ typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; typedef enum VkGeometryTypeKHR { VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, VK_GEOMETRY_TYPE_AABBS_KHR = 1, - VK_GEOMETRY_TYPE_INSTANCES_KHR = 1000150000, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF @@ -9169,6 +9329,7 @@ typedef VkGeometryTypeKHR VkGeometryTypeNV; typedef enum VkAccelerationStructureTypeKHR { VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2, VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF @@ -9188,17 +9349,12 @@ typedef enum VkCopyAccelerationStructureModeKHR { typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; -typedef enum VkAccelerationStructureMemoryRequirementsTypeKHR { - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR = 0, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR = 1, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR = 2, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR, - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF -} VkAccelerationStructureMemoryRequirementsTypeKHR; -typedef VkAccelerationStructureMemoryRequirementsTypeKHR VkAccelerationStructureMemoryRequirementsTypeNV; - +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; typedef enum VkGeometryFlagBitsKHR { VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, @@ -9327,26 +9483,22 @@ typedef struct VkAccelerationStructureCreateInfoNV { VkAccelerationStructureInfoNV info; } VkAccelerationStructureCreateInfoNV; -typedef struct VkBindAccelerationStructureMemoryInfoKHR { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureKHR accelerationStructure; - VkDeviceMemory memory; - VkDeviceSize memoryOffset; - uint32_t deviceIndexCount; - const uint32_t* pDeviceIndices; -} VkBindAccelerationStructureMemoryInfoKHR; - -typedef VkBindAccelerationStructureMemoryInfoKHR VkBindAccelerationStructureMemoryInfoNV; - -typedef struct VkWriteDescriptorSetAccelerationStructureKHR { - VkStructureType sType; - const void* pNext; - uint32_t accelerationStructureCount; - const VkAccelerationStructureKHR* pAccelerationStructures; -} VkWriteDescriptorSetAccelerationStructureKHR; - -typedef VkWriteDescriptorSetAccelerationStructureKHR VkWriteDescriptorSetAccelerationStructureNV; +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { VkStructureType sType; @@ -9397,20 +9549,17 @@ typedef struct VkAccelerationStructureInstanceKHR { typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); -typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); -typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); -typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryKHR)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); -typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); -typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkBuffer scratch, VkDeviceSize scratchOffset); -typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkCopyAccelerationStructureModeKHR mode); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); -typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData); -typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); -typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); #ifndef VK_NO_PROTOTYPES @@ -9420,14 +9569,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); -VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( - VkDevice device, - VkAccelerationStructureKHR accelerationStructure, - const VkAllocationCallbacks* pAllocator); - VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( VkDevice device, - VkAccelerationStructureKHR accelerationStructure, + VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( @@ -9435,15 +9579,10 @@ VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); -VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryKHR( - VkDevice device, - uint32_t bindInfoCount, - const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); - VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, - const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos); + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, @@ -9451,15 +9590,15 @@ VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, - VkAccelerationStructureKHR dst, - VkAccelerationStructureKHR src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureKHR dst, - VkAccelerationStructureKHR src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( @@ -9505,22 +9644,14 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( VkDevice device, - VkAccelerationStructureKHR accelerationStructure, + VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); -VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( - VkCommandBuffer commandBuffer, - uint32_t accelerationStructureCount, - const VkAccelerationStructureKHR* pAccelerationStructures, - VkQueryType queryType, - VkQueryPool queryPool, - uint32_t firstQuery); - VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, - const VkAccelerationStructureKHR* pAccelerationStructures, + const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); @@ -10233,6 +10364,18 @@ typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { +#define VK_EXT_shader_image_atomic_int64 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME "VK_EXT_shader_image_atomic_int64" +typedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderImageInt64Atomics; + VkBool32 sparseImageInt64Atomics; +} VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + + + #define VK_EXT_memory_budget 1 #define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 #define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" @@ -10947,6 +11090,51 @@ typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { +#define VK_EXT_device_memory_report 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME "VK_EXT_device_memory_report" + +typedef enum VkDeviceMemoryReportEventTypeEXT { + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceMemoryReportEventTypeEXT; +typedef VkFlags VkDeviceMemoryReportFlagsEXT; +typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceMemoryReport; +} VkPhysicalDeviceDeviceMemoryReportFeaturesEXT; + +typedef struct VkDeviceMemoryReportCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + VkDeviceMemoryReportEventTypeEXT type; + uint64_t memoryObjectId; + VkDeviceSize size; + VkObjectType objectType; + uint64_t objectHandle; + uint32_t heapIndex; +} VkDeviceMemoryReportCallbackDataEXT; + +typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( + const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDeviceDeviceMemoryReportCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback; + void* pUserData; +} VkDeviceDeviceMemoryReportCreateInfoEXT; + + + #define VK_EXT_robustness2 1 #define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 #define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" @@ -11098,6 +11286,63 @@ typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { #define VK_QCOM_render_pass_store_ops_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" +#define VK_NV_fragment_shading_rate_enums 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums" + +typedef enum VkFragmentShadingRateTypeNV { + VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0, + VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1, + VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateTypeNV; + +typedef enum VkFragmentShadingRateNV { + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10, + VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11, + VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12, + VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13, + VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14, + VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15, + VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateNV; +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShadingRateEnums; + VkBool32 supersampleFragmentShadingRates; + VkBool32 noInvocationFragmentShadingRates; +} VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV { + VkStructureType sType; + void* pNext; + VkSampleCountFlagBits maxFragmentShadingRateInvocationCount; +} VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +typedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkFragmentShadingRateTypeNV shadingRateType; + VkFragmentShadingRateNV shadingRate; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateEnumStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + #define VK_EXT_fragment_density_map2 1 #define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1 #define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME "VK_EXT_fragment_density_map2" @@ -11118,6 +11363,17 @@ typedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT { +#define VK_QCOM_rotated_copy_commands 1 +#define VK_QCOM_rotated_copy_commands_SPEC_VERSION 0 +#define VK_QCOM_rotated_copy_commands_EXTENSION_NAME "VK_QCOM_rotated_copy_commands" +typedef struct VkCopyCommandTransformInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkCopyCommandTransformInfoQCOM; + + + #define VK_EXT_image_robustness 1 #define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1 #define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness" @@ -11140,6 +11396,450 @@ typedef struct VkPhysicalDevice4444FormatsFeaturesEXT { } VkPhysicalDevice4444FormatsFeaturesEXT; + +#define VK_KHR_acceleration_structure 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) +#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 11 +#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure" + +typedef enum VkBuildAccelerationStructureModeKHR { + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureModeKHR; + +typedef enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureBuildTypeKHR; + +typedef enum VkAccelerationStructureCompatibilityKHR { + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCompatibilityKHR; + +typedef enum VkAccelerationStructureCreateFlagBitsKHR { + VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001, + VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCreateFlagBitsKHR; +typedef VkFlags VkAccelerationStructureCreateFlagsKHR; +typedef union VkDeviceOrHostAddressKHR { + VkDeviceAddress deviceAddress; + void* hostAddress; +} VkDeviceOrHostAddressKHR; + +typedef union VkDeviceOrHostAddressConstKHR { + VkDeviceAddress deviceAddress; + const void* hostAddress; +} VkDeviceOrHostAddressConstKHR; + +typedef struct VkAccelerationStructureBuildRangeInfoKHR { + uint32_t primitiveCount; + uint32_t primitiveOffset; + uint32_t firstVertex; + uint32_t transformOffset; +} VkAccelerationStructureBuildRangeInfoKHR; + +typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + uint32_t maxVertex; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceOrHostAddressConstKHR transformData; +} VkAccelerationStructureGeometryTrianglesDataKHR; + +typedef struct VkAccelerationStructureGeometryAabbsDataKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR data; + VkDeviceSize stride; +} VkAccelerationStructureGeometryAabbsDataKHR; + +typedef struct VkAccelerationStructureGeometryInstancesDataKHR { + VkStructureType sType; + const void* pNext; + VkBool32 arrayOfPointers; + VkDeviceOrHostAddressConstKHR data; +} VkAccelerationStructureGeometryInstancesDataKHR; + +typedef union VkAccelerationStructureGeometryDataKHR { + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +} VkAccelerationStructureGeometryDataKHR; + +typedef struct VkAccelerationStructureGeometryKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkAccelerationStructureGeometryDataKHR geometry; + VkGeometryFlagsKHR flags; +} VkAccelerationStructureGeometryKHR; + +typedef struct VkAccelerationStructureBuildGeometryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + VkBuildAccelerationStructureModeKHR mode; + VkAccelerationStructureKHR srcAccelerationStructure; + VkAccelerationStructureKHR dstAccelerationStructure; + uint32_t geometryCount; + const VkAccelerationStructureGeometryKHR* pGeometries; + const VkAccelerationStructureGeometryKHR* const* ppGeometries; + VkDeviceOrHostAddressKHR scratchData; +} VkAccelerationStructureBuildGeometryInfoKHR; + +typedef struct VkAccelerationStructureCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureCreateFlagsKHR createFlags; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; + VkAccelerationStructureTypeKHR type; + VkDeviceAddress deviceAddress; +} VkAccelerationStructureCreateInfoKHR; + +typedef struct VkWriteDescriptorSetAccelerationStructureKHR { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureKHR* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureKHR; + +typedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 accelerationStructure; + VkBool32 accelerationStructureCaptureReplay; + VkBool32 accelerationStructureIndirectBuild; + VkBool32 accelerationStructureHostCommands; + VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind; +} VkPhysicalDeviceAccelerationStructureFeaturesKHR; + +typedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { + VkStructureType sType; + void* pNext; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxPrimitiveCount; + uint32_t maxPerStageDescriptorAccelerationStructures; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures; + uint32_t maxDescriptorSetAccelerationStructures; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures; + uint32_t minAccelerationStructureScratchOffsetAlignment; +} VkPhysicalDeviceAccelerationStructurePropertiesKHR; + +typedef struct VkAccelerationStructureDeviceAddressInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureDeviceAddressInfoKHR; + +typedef struct VkAccelerationStructureVersionInfoKHR { + VkStructureType sType; + const void* pNext; + const uint8_t* pVersionData; +} VkAccelerationStructureVersionInfoKHR; + +typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkDeviceOrHostAddressKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureToMemoryInfoKHR; + +typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyMemoryToAccelerationStructureInfoKHR; + +typedef struct VkCopyAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureInfoKHR; + +typedef struct VkAccelerationStructureBuildSizesInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize accelerationStructureSize; + VkDeviceSize updateScratchSize; + VkDeviceSize buildScratchSize; +} VkAccelerationStructureBuildSizesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); +typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts); + +VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif + + +#define VK_KHR_ray_tracing_pipeline 1 +#define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME "VK_KHR_ray_tracing_pipeline" + +typedef enum VkShaderGroupShaderKHR { + VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, + VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, + VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, + VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, + VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF +} VkShaderGroupShaderKHR; +typedef struct VkRayTracingShaderGroupCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; + const void* pShaderGroupCaptureReplayHandle; +} VkRayTracingShaderGroupCreateInfoKHR; + +typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxPipelineRayPayloadSize; + uint32_t maxPipelineRayHitAttributeSize; +} VkRayTracingPipelineInterfaceCreateInfoKHR; + +typedef struct VkRayTracingPipelineCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoKHR* pGroups; + uint32_t maxPipelineRayRecursionDepth; + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo; + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingPipeline; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed; + VkBool32 rayTracingPipelineTraceRaysIndirect; + VkBool32 rayTraversalPrimitiveCulling; +} VkPhysicalDeviceRayTracingPipelineFeaturesKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRayRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint32_t shaderGroupHandleCaptureReplaySize; + uint32_t maxRayDispatchInvocationCount; + uint32_t shaderGroupHandleAlignment; + uint32_t maxRayHitAttributeSize; +} VkPhysicalDeviceRayTracingPipelinePropertiesKHR; + +typedef struct VkStridedDeviceAddressRegionKHR { + VkDeviceAddress deviceAddress; + VkDeviceSize stride; + VkDeviceSize size; +} VkStridedDeviceAddressRegionKHR; + +typedef struct VkTraceRaysIndirectCommandKHR { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommandKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); +typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); +typedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress); + +VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize); +#endif + + +#define VK_KHR_ray_query 1 +#define VK_KHR_RAY_QUERY_SPEC_VERSION 1 +#define VK_KHR_RAY_QUERY_EXTENSION_NAME "VK_KHR_ray_query" +typedef struct VkPhysicalDeviceRayQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayQuery; +} VkPhysicalDeviceRayQueryFeaturesKHR; + + #ifdef __cplusplus } #endif diff --git a/thirdparty/vulkan/loader/loader.c b/thirdparty/vulkan/loader/loader.c index 86ac79d963..158097a8aa 100644 --- a/thirdparty/vulkan/loader/loader.c +++ b/thirdparty/vulkan/loader/loader.c @@ -253,7 +253,7 @@ void *loader_device_heap_realloc(const struct loader_device *device, void *pMemo } // Environment variables -#if defined(__linux__) || defined(__APPLE__) +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) static inline bool IsHighIntegrity() { return geteuid() != getuid() || getegid() != getgid(); @@ -277,6 +277,8 @@ static inline char *loader_secure_getenv(const char *name, const struct loader_i // This algorithm is derived from glibc code that sets an internal // variable (__libc_enable_secure) if the process is running under setuid or setgid. return IsHighIntegrity() ? NULL : loader_getenv(name, inst); +#elif defined(__Fuchsia__) + return loader_getenv(name, inst); #else // Linux #if defined(HAVE_SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH) @@ -287,13 +289,12 @@ static inline char *loader_secure_getenv(const char *name, const struct loader_i out = __secure_getenv(name); #else out = loader_getenv(name, inst); +#if !defined(USE_UNSAFE_FILE_SEARCH) + loader_log(inst, LOADER_INFO_BIT, 0, "Loader is using non-secure environment variable lookup for %s", name); #endif #endif - if (out == NULL) { - loader_log(inst, LOADER_INFO_BIT, 0, - "Loader is running with elevated permissions. Environment variable %s will be ignored.", name); - } return out; +#endif } static inline void loader_free_getenv(char *val, const struct loader_instance *inst) { @@ -355,8 +356,8 @@ static inline char *loader_getenv(const char *name, const struct loader_instance static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) { #if !defined(USE_UNSAFE_FILE_SEARCH) if (IsHighIntegrity()) { - loader_log(inst, LOADER_INFO_BIT, 0, - "Loader is running with elevated permissions. Environment variable %s will be ignored.", name); + loader_log(inst, LOADER_INFO_BIT, 0, "Loader is running with elevated permissions. Environment variable %s will be ignored", + name); return NULL; } #endif @@ -2317,7 +2318,11 @@ static VkResult loader_scanned_icd_add(const struct loader_instance *inst, struc // TODO implement smarter opening/closing of libraries. For now this // function leaves libraries open and the scanned_icd_clear closes them +#if defined(__Fuchsia__) + handle = loader_platform_open_driver(filename); +#else handle = loader_platform_open_library(filename); +#endif if (NULL == handle) { loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(filename)); goto out; @@ -2647,7 +2652,12 @@ static VkResult loader_get_json(const struct loader_instance *inst, const char * res = VK_ERROR_INITIALIZATION_FAILED; goto out; } - fseek(file, 0, SEEK_END); + // NOTE: We can't just use fseek(file, 0, SEEK_END) because that isn't guaranteed to be supported on all systems + do { + // We're just seeking the end of the file, so this buffer is never used + char buffer[256]; + fread(buffer, 1, sizeof(buffer), file); + } while (!feof(file)); len = ftell(file); fseek(file, 0, SEEK_SET); json_buf = (char *)loader_stack_alloc(len + 1); @@ -3189,30 +3199,31 @@ static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct l name); } else { props->num_blacklist_layers = cJSON_GetArraySize(blacklisted_layers); - - // Allocate the blacklist array - props->blacklist_layer_names = loader_instance_heap_alloc( - inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); - if (props->blacklist_layer_names == NULL) { - result = VK_ERROR_OUT_OF_HOST_MEMORY; - goto out; - } - - // Copy the blacklisted layers into the array - for (i = 0; i < (int)props->num_blacklist_layers; ++i) { - cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i); - if (black_layer == NULL) { - continue; - } - temp = cJSON_Print(black_layer); - if (temp == NULL) { + if (props->num_blacklist_layers > 0) { + // Allocate the blacklist array + props->blacklist_layer_names = loader_instance_heap_alloc( + inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (props->blacklist_layer_names == NULL) { result = VK_ERROR_OUT_OF_HOST_MEMORY; goto out; } - temp[strlen(temp) - 1] = '\0'; - strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1); - props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0'; - cJSON_Free(temp); + + // Copy the blacklisted layers into the array + for (i = 0; i < (int)props->num_blacklist_layers; ++i) { + cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i); + if (black_layer == NULL) { + continue; + } + temp = cJSON_Print(black_layer); + if (temp == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1); + props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); + } } } } @@ -3226,28 +3237,29 @@ static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct l } int count = cJSON_GetArraySize(override_paths); props->num_override_paths = count; + if (count > 0) { + // Allocate buffer for override paths + props->override_paths = + loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == props->override_paths) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } - // Allocate buffer for override paths - props->override_paths = - loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); - if (NULL == props->override_paths) { - result = VK_ERROR_OUT_OF_HOST_MEMORY; - goto out; - } - - // Copy the override paths into the array - for (i = 0; i < count; i++) { - cJSON *override_path = cJSON_GetArrayItem(override_paths, i); - if (NULL != override_path) { - temp = cJSON_Print(override_path); - if (NULL == temp) { - result = VK_ERROR_OUT_OF_HOST_MEMORY; - goto out; + // Copy the override paths into the array + for (i = 0; i < count; i++) { + cJSON *override_path = cJSON_GetArrayItem(override_paths, i); + if (NULL != override_path) { + temp = cJSON_Print(override_path); + if (NULL == temp) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1); + props->override_paths[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); } - temp[strlen(temp) - 1] = '\0'; - strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1); - props->override_paths[i][MAX_STRING_SIZE - 1] = '\0'; - cJSON_Free(temp); } } } @@ -3953,12 +3965,14 @@ static VkResult ReadDataFilesInSearchPaths(const struct loader_instance *inst, e if (xdgdatadirs == NULL) { xdgdata_alloc = false; } +#if !defined(__Fuchsia__) if (xdgconfdirs == NULL || xdgconfdirs[0] == '\0') { xdgconfdirs = FALLBACK_CONFIG_DIRS; } if (xdgdatadirs == NULL || xdgdatadirs[0] == '\0') { xdgdatadirs = FALLBACK_DATA_DIRS; } +#endif // Only use HOME if XDG_DATA_HOME is not present on the system if (NULL == xdgdatahome) { @@ -4306,7 +4320,6 @@ out: static VkResult ReadDataFilesInRegistry(const struct loader_instance *inst, enum loader_data_files_type data_file_type, bool warn_if_not_present, char *registry_location, struct loader_data_files *out_files) { VkResult vk_result = VK_SUCCESS; - bool is_icd = (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD); char *search_path = NULL; // These calls look at the PNP/Device section of the registry. @@ -6044,7 +6057,7 @@ VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, c VkLoaderFeatureFlags feature_flags = 0; #if defined(_WIN32) IDXGIFactory6* dxgi_factory = NULL; - HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, &dxgi_factory); + HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory); if (hres == S_OK) { feature_flags |= VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING; dxgi_factory->lpVtbl->Release(dxgi_factory); @@ -7073,7 +7086,7 @@ VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSo uint32_t sorted_alloc = 0; struct loader_icd_term *icd_term = NULL; IDXGIFactory6* dxgi_factory = NULL; - HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, &dxgi_factory); + HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory); if (hres != S_OK) { loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to create DXGI factory 6. Physical devices will not be sorted"); } @@ -7090,7 +7103,7 @@ VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSo *sorted_count = 0; for (uint32_t i = 0; ; ++i) { IDXGIAdapter1* adapter; - hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, &adapter); + hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, (void **)&adapter); if (hres == DXGI_ERROR_NOT_FOUND) { break; // No more adapters } diff --git a/thirdparty/vulkan/loader/loader.h b/thirdparty/vulkan/loader/loader.h index a3c1f6b635..847bc6101e 100644 --- a/thirdparty/vulkan/loader/loader.h +++ b/thirdparty/vulkan/loader/loader.h @@ -338,10 +338,16 @@ struct loader_instance { #ifdef VK_USE_PLATFORM_IOS_MVK bool wsi_ios_surface_enabled; #endif +#ifdef VK_USE_PLATFORM_GGP + bool wsi_ggp_surface_enabled; +#endif bool wsi_headless_surface_enabled; #if defined(VK_USE_PLATFORM_METAL_EXT) bool wsi_metal_surface_enabled; #endif +#ifdef VK_USE_PLATFORM_FUCHSIA + bool wsi_imagepipe_surface_enabled; +#endif bool wsi_display_enabled; bool wsi_display_props2_enabled; }; diff --git a/thirdparty/vulkan/loader/vk_dispatch_table_helper.h b/thirdparty/vulkan/loader/vk_dispatch_table_helper.h index e6026aa698..0eabfdb79c 100644 --- a/thirdparty/vulkan/loader/vk_dispatch_table_helper.h +++ b/thirdparty/vulkan/loader/vk_dispatch_table_helper.h @@ -91,24 +91,15 @@ static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndexedIndirectCountKHR(VkCommandBu static VKAPI_ATTR VkResult VKAPI_CALL StubGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t* pValue) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { }; static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; }; static VKAPI_ATTR uint64_t VKAPI_CALL StubGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; }; static VKAPI_ATTR uint64_t VKAPI_CALL StubGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) { return 0L; }; -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR VkResult VKAPI_CALL StubCreateDeferredOperationKHR(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR void VKAPI_CALL StubDestroyDeferredOperationKHR(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR uint32_t VKAPI_CALL StubGetDeferredOperationMaxConcurrencyKHR(VkDevice device, VkDeferredOperationKHR operation) { return 0; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeferredOperationResultKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR VkResult VKAPI_CALL StubDeferredOperationJoinKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableInternalRepresentationsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) { return VK_SUCCESS; }; @@ -164,20 +155,17 @@ static VKAPI_ATTR void VKAPI_CALL StubCmdBindShadingRateImageNV(VkCommandBuffer static VKAPI_ATTR void VKAPI_CALL StubCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes) { }; static VKAPI_ATTR void VKAPI_CALL StubCmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) { }; static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure) { return VK_SUCCESS; }; -static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; -static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureMemoryRequirementsNV(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) { }; -static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryKHR(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { return VK_SUCCESS; }; -static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { return VK_SUCCESS; }; -static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkBuffer scratch, VkDeviceSize scratchOffset) { }; -static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkCopyAccelerationStructureModeKHR mode) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode) { }; static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) { }; static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesNV(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; -static VKAPI_ATTR VkResult VKAPI_CALL StubGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData) { return VK_SUCCESS; }; -static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; -static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; static VKAPI_ATTR VkResult VKAPI_CALL StubCompileDeferredNV(VkDevice device, VkPipeline pipeline, uint32_t shader) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryHostPointerPropertiesEXT(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) { return VK_SUCCESS; }; static VKAPI_ATTR void VKAPI_CALL StubCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) { }; @@ -232,60 +220,29 @@ static VKAPI_ATTR VkResult VKAPI_CALL StubCreatePrivateDataSlotEXT(VkDevice devi static VKAPI_ATTR void VKAPI_CALL StubDestroyPrivateDataSlotEXT(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator) { }; static VKAPI_ATTR VkResult VKAPI_CALL StubSetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data) { return VK_SUCCESS; }; static VKAPI_ATTR void VKAPI_CALL StubGetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData) { }; -#ifdef VK_ENABLE_BETA_EXTENSIONS +static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateEnumNV(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { }; static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureMemoryRequirementsKHR(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureIndirectKHR(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubBuildAccelerationStructureKHR(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureKHR(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureToMemoryKHR(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubCopyMemoryToAccelerationStructureKHR(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS +static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBuildAccelerationStructuresKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureToMemoryKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyMemoryToAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR void VKAPI_CALL StubCmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesKHR(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetAccelerationStructureDeviceAddressKHR(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { return 0L; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS +static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetDeviceAccelerationStructureCompatibilityKHR(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureBuildSizesKHR(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; }; static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset) { }; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeviceAccelerationStructureCompatibilityKHR(VkDevice device, const VkAccelerationStructureVersionKHR* version) { return VK_SUCCESS; }; -#endif // VK_ENABLE_BETA_EXTENSIONS +static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) { }; +static VKAPI_ATTR VkDeviceSize VKAPI_CALL StubGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) { return 0L; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) { }; @@ -554,32 +511,24 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp if (table->WaitSemaphoresKHR == nullptr) { table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)StubWaitSemaphoresKHR; } table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR) gpa(device, "vkSignalSemaphoreKHR"); if (table->SignalSemaphoreKHR == nullptr) { table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)StubSignalSemaphoreKHR; } + table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR) gpa(device, "vkCmdSetFragmentShadingRateKHR"); + if (table->CmdSetFragmentShadingRateKHR == nullptr) { table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)StubCmdSetFragmentShadingRateKHR; } table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR) gpa(device, "vkGetBufferDeviceAddressKHR"); if (table->GetBufferDeviceAddressKHR == nullptr) { table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)StubGetBufferDeviceAddressKHR; } table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR) gpa(device, "vkGetBufferOpaqueCaptureAddressKHR"); if (table->GetBufferOpaqueCaptureAddressKHR == nullptr) { table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)StubGetBufferOpaqueCaptureAddressKHR; } table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR) gpa(device, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); if (table->GetDeviceMemoryOpaqueCaptureAddressKHR == nullptr) { table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)StubGetDeviceMemoryOpaqueCaptureAddressKHR; } -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR) gpa(device, "vkCreateDeferredOperationKHR"); if (table->CreateDeferredOperationKHR == nullptr) { table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)StubCreateDeferredOperationKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR) gpa(device, "vkDestroyDeferredOperationKHR"); if (table->DestroyDeferredOperationKHR == nullptr) { table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)StubDestroyDeferredOperationKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR) gpa(device, "vkGetDeferredOperationMaxConcurrencyKHR"); if (table->GetDeferredOperationMaxConcurrencyKHR == nullptr) { table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)StubGetDeferredOperationMaxConcurrencyKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR) gpa(device, "vkGetDeferredOperationResultKHR"); if (table->GetDeferredOperationResultKHR == nullptr) { table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)StubGetDeferredOperationResultKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR) gpa(device, "vkDeferredOperationJoinKHR"); if (table->DeferredOperationJoinKHR == nullptr) { table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)StubDeferredOperationJoinKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR) gpa(device, "vkGetPipelineExecutablePropertiesKHR"); if (table->GetPipelineExecutablePropertiesKHR == nullptr) { table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)StubGetPipelineExecutablePropertiesKHR; } table->GetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR) gpa(device, "vkGetPipelineExecutableStatisticsKHR"); @@ -692,14 +641,10 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp if (table->CmdSetCoarseSampleOrderNV == nullptr) { table->CmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)StubCmdSetCoarseSampleOrderNV; } table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV) gpa(device, "vkCreateAccelerationStructureNV"); if (table->CreateAccelerationStructureNV == nullptr) { table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)StubCreateAccelerationStructureNV; } - table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR) gpa(device, "vkDestroyAccelerationStructureKHR"); - if (table->DestroyAccelerationStructureKHR == nullptr) { table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)StubDestroyAccelerationStructureKHR; } table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV) gpa(device, "vkDestroyAccelerationStructureNV"); if (table->DestroyAccelerationStructureNV == nullptr) { table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)StubDestroyAccelerationStructureNV; } table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV) gpa(device, "vkGetAccelerationStructureMemoryRequirementsNV"); if (table->GetAccelerationStructureMemoryRequirementsNV == nullptr) { table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)StubGetAccelerationStructureMemoryRequirementsNV; } - table->BindAccelerationStructureMemoryKHR = (PFN_vkBindAccelerationStructureMemoryKHR) gpa(device, "vkBindAccelerationStructureMemoryKHR"); - if (table->BindAccelerationStructureMemoryKHR == nullptr) { table->BindAccelerationStructureMemoryKHR = (PFN_vkBindAccelerationStructureMemoryKHR)StubBindAccelerationStructureMemoryKHR; } table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV) gpa(device, "vkBindAccelerationStructureMemoryNV"); if (table->BindAccelerationStructureMemoryNV == nullptr) { table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)StubBindAccelerationStructureMemoryNV; } table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV) gpa(device, "vkCmdBuildAccelerationStructureNV"); @@ -716,8 +661,6 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp if (table->GetRayTracingShaderGroupHandlesNV == nullptr) { table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)StubGetRayTracingShaderGroupHandlesNV; } table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV) gpa(device, "vkGetAccelerationStructureHandleNV"); if (table->GetAccelerationStructureHandleNV == nullptr) { table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)StubGetAccelerationStructureHandleNV; } - table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesKHR"); - if (table->CmdWriteAccelerationStructuresPropertiesKHR == nullptr) { table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)StubCmdWriteAccelerationStructuresPropertiesKHR; } table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesNV"); if (table->CmdWriteAccelerationStructuresPropertiesNV == nullptr) { table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)StubCmdWriteAccelerationStructuresPropertiesNV; } table->CompileDeferredNV = (PFN_vkCompileDeferredNV) gpa(device, "vkCompileDeferredNV"); @@ -822,78 +765,52 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp if (table->SetPrivateDataEXT == nullptr) { table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)StubSetPrivateDataEXT; } table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT) gpa(device, "vkGetPrivateDataEXT"); if (table->GetPrivateDataEXT == nullptr) { table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)StubGetPrivateDataEXT; } -#ifdef VK_ENABLE_BETA_EXTENSIONS + table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV) gpa(device, "vkCmdSetFragmentShadingRateEnumNV"); + if (table->CmdSetFragmentShadingRateEnumNV == nullptr) { table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)StubCmdSetFragmentShadingRateEnumNV; } table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR) gpa(device, "vkCreateAccelerationStructureKHR"); if (table->CreateAccelerationStructureKHR == nullptr) { table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)StubCreateAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetAccelerationStructureMemoryRequirementsKHR = (PFN_vkGetAccelerationStructureMemoryRequirementsKHR) gpa(device, "vkGetAccelerationStructureMemoryRequirementsKHR"); - if (table->GetAccelerationStructureMemoryRequirementsKHR == nullptr) { table->GetAccelerationStructureMemoryRequirementsKHR = (PFN_vkGetAccelerationStructureMemoryRequirementsKHR)StubGetAccelerationStructureMemoryRequirementsKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->CmdBuildAccelerationStructureKHR = (PFN_vkCmdBuildAccelerationStructureKHR) gpa(device, "vkCmdBuildAccelerationStructureKHR"); - if (table->CmdBuildAccelerationStructureKHR == nullptr) { table->CmdBuildAccelerationStructureKHR = (PFN_vkCmdBuildAccelerationStructureKHR)StubCmdBuildAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->CmdBuildAccelerationStructureIndirectKHR = (PFN_vkCmdBuildAccelerationStructureIndirectKHR) gpa(device, "vkCmdBuildAccelerationStructureIndirectKHR"); - if (table->CmdBuildAccelerationStructureIndirectKHR == nullptr) { table->CmdBuildAccelerationStructureIndirectKHR = (PFN_vkCmdBuildAccelerationStructureIndirectKHR)StubCmdBuildAccelerationStructureIndirectKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->BuildAccelerationStructureKHR = (PFN_vkBuildAccelerationStructureKHR) gpa(device, "vkBuildAccelerationStructureKHR"); - if (table->BuildAccelerationStructureKHR == nullptr) { table->BuildAccelerationStructureKHR = (PFN_vkBuildAccelerationStructureKHR)StubBuildAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR) gpa(device, "vkDestroyAccelerationStructureKHR"); + if (table->DestroyAccelerationStructureKHR == nullptr) { table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)StubDestroyAccelerationStructureKHR; } + table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR) gpa(device, "vkCmdBuildAccelerationStructuresKHR"); + if (table->CmdBuildAccelerationStructuresKHR == nullptr) { table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)StubCmdBuildAccelerationStructuresKHR; } + table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR) gpa(device, "vkCmdBuildAccelerationStructuresIndirectKHR"); + if (table->CmdBuildAccelerationStructuresIndirectKHR == nullptr) { table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)StubCmdBuildAccelerationStructuresIndirectKHR; } + table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR) gpa(device, "vkBuildAccelerationStructuresKHR"); + if (table->BuildAccelerationStructuresKHR == nullptr) { table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)StubBuildAccelerationStructuresKHR; } table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR) gpa(device, "vkCopyAccelerationStructureKHR"); if (table->CopyAccelerationStructureKHR == nullptr) { table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)StubCopyAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCopyAccelerationStructureToMemoryKHR"); if (table->CopyAccelerationStructureToMemoryKHR == nullptr) { table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)StubCopyAccelerationStructureToMemoryKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCopyMemoryToAccelerationStructureKHR"); if (table->CopyMemoryToAccelerationStructureKHR == nullptr) { table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)StubCopyMemoryToAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkWriteAccelerationStructuresPropertiesKHR"); if (table->WriteAccelerationStructuresPropertiesKHR == nullptr) { table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)StubWriteAccelerationStructuresPropertiesKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR) gpa(device, "vkCmdCopyAccelerationStructureKHR"); if (table->CmdCopyAccelerationStructureKHR == nullptr) { table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)StubCmdCopyAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCmdCopyAccelerationStructureToMemoryKHR"); if (table->CmdCopyAccelerationStructureToMemoryKHR == nullptr) { table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)StubCmdCopyAccelerationStructureToMemoryKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCmdCopyMemoryToAccelerationStructureKHR"); if (table->CmdCopyMemoryToAccelerationStructureKHR == nullptr) { table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)StubCmdCopyMemoryToAccelerationStructureKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR) gpa(device, "vkGetAccelerationStructureDeviceAddressKHR"); + if (table->GetAccelerationStructureDeviceAddressKHR == nullptr) { table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)StubGetAccelerationStructureDeviceAddressKHR; } + table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesKHR"); + if (table->CmdWriteAccelerationStructuresPropertiesKHR == nullptr) { table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)StubCmdWriteAccelerationStructuresPropertiesKHR; } + table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR) gpa(device, "vkGetDeviceAccelerationStructureCompatibilityKHR"); + if (table->GetDeviceAccelerationStructureCompatibilityKHR == nullptr) { table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)StubGetDeviceAccelerationStructureCompatibilityKHR; } + table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR) gpa(device, "vkGetAccelerationStructureBuildSizesKHR"); + if (table->GetAccelerationStructureBuildSizesKHR == nullptr) { table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)StubGetAccelerationStructureBuildSizesKHR; } table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR) gpa(device, "vkCmdTraceRaysKHR"); if (table->CmdTraceRaysKHR == nullptr) { table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)StubCmdTraceRaysKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR) gpa(device, "vkCreateRayTracingPipelinesKHR"); if (table->CreateRayTracingPipelinesKHR == nullptr) { table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)StubCreateRayTracingPipelinesKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR) gpa(device, "vkGetAccelerationStructureDeviceAddressKHR"); - if (table->GetAccelerationStructureDeviceAddressKHR == nullptr) { table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)StubGetAccelerationStructureDeviceAddressKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR) gpa(device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); if (table->GetRayTracingCaptureReplayShaderGroupHandlesKHR == nullptr) { table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)StubGetRayTracingCaptureReplayShaderGroupHandlesKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR) gpa(device, "vkCmdTraceRaysIndirectKHR"); if (table->CmdTraceRaysIndirectKHR == nullptr) { table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)StubCmdTraceRaysIndirectKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR) gpa(device, "vkGetDeviceAccelerationStructureCompatibilityKHR"); - if (table->GetDeviceAccelerationStructureCompatibilityKHR == nullptr) { table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)StubGetDeviceAccelerationStructureCompatibilityKHR; } -#endif // VK_ENABLE_BETA_EXTENSIONS + table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR) gpa(device, "vkGetRayTracingShaderGroupStackSizeKHR"); + if (table->GetRayTracingShaderGroupStackSizeKHR == nullptr) { table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)StubGetRayTracingShaderGroupStackSizeKHR; } + table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR) gpa(device, "vkCmdSetRayTracingPipelineStackSizeKHR"); + if (table->CmdSetRayTracingPipelineStackSizeKHR == nullptr) { table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)StubCmdSetRayTracingPipelineStackSizeKHR; } } @@ -982,6 +899,7 @@ static inline void layer_init_instance_dispatch_table(VkInstance instance, VkLay table->GetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR) gpa(instance, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"); table->GetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR) gpa(instance, "vkGetDisplayModeProperties2KHR"); table->GetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR) gpa(instance, "vkGetDisplayPlaneCapabilities2KHR"); + table->GetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR) gpa(instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) gpa(instance, "vkCreateDebugReportCallbackEXT"); table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) gpa(instance, "vkDestroyDebugReportCallbackEXT"); table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) gpa(instance, "vkDebugReportMessageEXT"); diff --git a/thirdparty/vulkan/loader/vk_layer_dispatch_table.h b/thirdparty/vulkan/loader/vk_layer_dispatch_table.h index d774098d66..45b20db45c 100644 --- a/thirdparty/vulkan/loader/vk_layer_dispatch_table.h +++ b/thirdparty/vulkan/loader/vk_layer_dispatch_table.h @@ -154,6 +154,9 @@ typedef struct VkLayerInstanceDispatchTable_ { PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR; PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR; + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR; + // ---- VK_EXT_debug_report extension commands PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT; PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT; @@ -506,27 +509,20 @@ typedef struct VkLayerDispatchTable_ { PFN_vkWaitSemaphoresKHR WaitSemaphoresKHR; PFN_vkSignalSemaphoreKHR SignalSemaphoreKHR; + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkCmdSetFragmentShadingRateKHR CmdSetFragmentShadingRateKHR; + // ---- VK_KHR_buffer_device_address extension commands PFN_vkGetBufferDeviceAddressKHR GetBufferDeviceAddressKHR; PFN_vkGetBufferOpaqueCaptureAddressKHR GetBufferOpaqueCaptureAddressKHR; PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR GetDeviceMemoryOpaqueCaptureAddressKHR; // ---- VK_KHR_deferred_host_operations extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCreateDeferredOperationKHR CreateDeferredOperationKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkDestroyDeferredOperationKHR DestroyDeferredOperationKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetDeferredOperationMaxConcurrencyKHR GetDeferredOperationMaxConcurrencyKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetDeferredOperationResultKHR GetDeferredOperationResultKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkDeferredOperationJoinKHR DeferredOperationJoinKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS // ---- VK_KHR_pipeline_executable_properties extension commands PFN_vkGetPipelineExecutablePropertiesKHR GetPipelineExecutablePropertiesKHR; @@ -632,10 +628,8 @@ typedef struct VkLayerDispatchTable_ { // ---- VK_NV_ray_tracing extension commands PFN_vkCreateAccelerationStructureNV CreateAccelerationStructureNV; - PFN_vkDestroyAccelerationStructureKHR DestroyAccelerationStructureKHR; PFN_vkDestroyAccelerationStructureNV DestroyAccelerationStructureNV; PFN_vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV; - PFN_vkBindAccelerationStructureMemoryKHR BindAccelerationStructureMemoryKHR; PFN_vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV; PFN_vkCmdBuildAccelerationStructureNV CmdBuildAccelerationStructureNV; PFN_vkCmdCopyAccelerationStructureNV CmdCopyAccelerationStructureNV; @@ -644,7 +638,6 @@ typedef struct VkLayerDispatchTable_ { PFN_vkGetRayTracingShaderGroupHandlesKHR GetRayTracingShaderGroupHandlesKHR; PFN_vkGetRayTracingShaderGroupHandlesNV GetRayTracingShaderGroupHandlesNV; PFN_vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV; - PFN_vkCmdWriteAccelerationStructuresPropertiesKHR CmdWriteAccelerationStructuresPropertiesKHR; PFN_vkCmdWriteAccelerationStructuresPropertiesNV CmdWriteAccelerationStructuresPropertiesNV; PFN_vkCompileDeferredNV CompileDeferredNV; @@ -731,61 +724,34 @@ typedef struct VkLayerDispatchTable_ { PFN_vkSetPrivateDataEXT SetPrivateDataEXT; PFN_vkGetPrivateDataEXT GetPrivateDataEXT; - // ---- VK_KHR_ray_tracing extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS + // ---- VK_NV_fragment_shading_rate_enums extension commands + PFN_vkCmdSetFragmentShadingRateEnumNV CmdSetFragmentShadingRateEnumNV; + + // ---- VK_KHR_acceleration_structure extension commands PFN_vkCreateAccelerationStructureKHR CreateAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkGetAccelerationStructureMemoryRequirementsKHR GetAccelerationStructureMemoryRequirementsKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkCmdBuildAccelerationStructureKHR CmdBuildAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkCmdBuildAccelerationStructureIndirectKHR CmdBuildAccelerationStructureIndirectKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkBuildAccelerationStructureKHR BuildAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + PFN_vkDestroyAccelerationStructureKHR DestroyAccelerationStructureKHR; + PFN_vkCmdBuildAccelerationStructuresKHR CmdBuildAccelerationStructuresKHR; + PFN_vkCmdBuildAccelerationStructuresIndirectKHR CmdBuildAccelerationStructuresIndirectKHR; + PFN_vkBuildAccelerationStructuresKHR BuildAccelerationStructuresKHR; PFN_vkCopyAccelerationStructureKHR CopyAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCopyAccelerationStructureToMemoryKHR CopyAccelerationStructureToMemoryKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCopyMemoryToAccelerationStructureKHR CopyMemoryToAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkWriteAccelerationStructuresPropertiesKHR WriteAccelerationStructuresPropertiesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyAccelerationStructureKHR CmdCopyAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyAccelerationStructureToMemoryKHR CmdCopyAccelerationStructureToMemoryKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdCopyMemoryToAccelerationStructureKHR CmdCopyMemoryToAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + PFN_vkGetAccelerationStructureDeviceAddressKHR GetAccelerationStructureDeviceAddressKHR; + PFN_vkCmdWriteAccelerationStructuresPropertiesKHR CmdWriteAccelerationStructuresPropertiesKHR; + PFN_vkGetDeviceAccelerationStructureCompatibilityKHR GetDeviceAccelerationStructureCompatibilityKHR; + PFN_vkGetAccelerationStructureBuildSizesKHR GetAccelerationStructureBuildSizesKHR; + + // ---- VK_KHR_ray_tracing_pipeline extension commands PFN_vkCmdTraceRaysKHR CmdTraceRaysKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCreateRayTracingPipelinesKHR CreateRayTracingPipelinesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkGetAccelerationStructureDeviceAddressKHR GetAccelerationStructureDeviceAddressKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR GetRayTracingCaptureReplayShaderGroupHandlesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS PFN_vkCmdTraceRaysIndirectKHR CmdTraceRaysIndirectKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - PFN_vkGetDeviceAccelerationStructureCompatibilityKHR GetDeviceAccelerationStructureCompatibilityKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS + PFN_vkGetRayTracingShaderGroupStackSizeKHR GetRayTracingShaderGroupStackSizeKHR; + PFN_vkCmdSetRayTracingPipelineStackSizeKHR CmdSetRayTracingPipelineStackSizeKHR; } VkLayerDispatchTable; diff --git a/thirdparty/vulkan/loader/vk_loader_extensions.c b/thirdparty/vulkan/loader/vk_loader_extensions.c index 72d4027281..7a37d0ec8b 100644 --- a/thirdparty/vulkan/loader/vk_loader_extensions.c +++ b/thirdparty/vulkan/loader/vk_loader_extensions.c @@ -186,6 +186,9 @@ VKAPI_ATTR bool VKAPI_CALL loader_icd_init_entries(struct loader_icd_term *icd_t LOOKUP_GIPA(GetDisplayModeProperties2KHR, false); LOOKUP_GIPA(GetDisplayPlaneCapabilities2KHR, false); + // ---- VK_KHR_fragment_shading_rate extension commands + LOOKUP_GIPA(GetPhysicalDeviceFragmentShadingRatesKHR, false); + // ---- VK_EXT_debug_report extension commands LOOKUP_GIPA(CreateDebugReportCallbackEXT, false); LOOKUP_GIPA(DestroyDebugReportCallbackEXT, false); @@ -569,27 +572,20 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)gdpa(dev, "vkWaitSemaphoresKHR"); table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)gdpa(dev, "vkSignalSemaphoreKHR"); + // ---- VK_KHR_fragment_shading_rate extension commands + table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)gdpa(dev, "vkCmdSetFragmentShadingRateKHR"); + // ---- VK_KHR_buffer_device_address extension commands table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)gdpa(dev, "vkGetBufferDeviceAddressKHR"); table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)gdpa(dev, "vkGetBufferOpaqueCaptureAddressKHR"); table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)gdpa(dev, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); // ---- VK_KHR_deferred_host_operations extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)gdpa(dev, "vkCreateDeferredOperationKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)gdpa(dev, "vkDestroyDeferredOperationKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)gdpa(dev, "vkGetDeferredOperationMaxConcurrencyKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)gdpa(dev, "vkGetDeferredOperationResultKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)gdpa(dev, "vkDeferredOperationJoinKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS // ---- VK_KHR_pipeline_executable_properties extension commands table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)gdpa(dev, "vkGetPipelineExecutablePropertiesKHR"); @@ -695,10 +691,8 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo // ---- VK_NV_ray_tracing extension commands table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)gdpa(dev, "vkCreateAccelerationStructureNV"); - table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)gdpa(dev, "vkDestroyAccelerationStructureKHR"); table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)gdpa(dev, "vkDestroyAccelerationStructureNV"); table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)gdpa(dev, "vkGetAccelerationStructureMemoryRequirementsNV"); - table->BindAccelerationStructureMemoryKHR = (PFN_vkBindAccelerationStructureMemoryKHR)gdpa(dev, "vkBindAccelerationStructureMemoryKHR"); table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)gdpa(dev, "vkBindAccelerationStructureMemoryNV"); table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)gdpa(dev, "vkCmdBuildAccelerationStructureNV"); table->CmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)gdpa(dev, "vkCmdCopyAccelerationStructureNV"); @@ -707,7 +701,6 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo table->GetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)gdpa(dev, "vkGetRayTracingShaderGroupHandlesKHR"); table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)gdpa(dev, "vkGetRayTracingShaderGroupHandlesNV"); table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)gdpa(dev, "vkGetAccelerationStructureHandleNV"); - table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)gdpa(dev, "vkCmdWriteAccelerationStructuresPropertiesKHR"); table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)gdpa(dev, "vkCmdWriteAccelerationStructuresPropertiesNV"); table->CompileDeferredNV = (PFN_vkCompileDeferredNV)gdpa(dev, "vkCompileDeferredNV"); @@ -794,61 +787,34 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)gdpa(dev, "vkSetPrivateDataEXT"); table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)gdpa(dev, "vkGetPrivateDataEXT"); - // ---- VK_KHR_ray_tracing extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS + // ---- VK_NV_fragment_shading_rate_enums extension commands + table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)gdpa(dev, "vkCmdSetFragmentShadingRateEnumNV"); + + // ---- VK_KHR_acceleration_structure extension commands table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)gdpa(dev, "vkCreateAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetAccelerationStructureMemoryRequirementsKHR = (PFN_vkGetAccelerationStructureMemoryRequirementsKHR)gdpa(dev, "vkGetAccelerationStructureMemoryRequirementsKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->CmdBuildAccelerationStructureKHR = (PFN_vkCmdBuildAccelerationStructureKHR)gdpa(dev, "vkCmdBuildAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->CmdBuildAccelerationStructureIndirectKHR = (PFN_vkCmdBuildAccelerationStructureIndirectKHR)gdpa(dev, "vkCmdBuildAccelerationStructureIndirectKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->BuildAccelerationStructureKHR = (PFN_vkBuildAccelerationStructureKHR)gdpa(dev, "vkBuildAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)gdpa(dev, "vkDestroyAccelerationStructureKHR"); + table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)gdpa(dev, "vkCmdBuildAccelerationStructuresKHR"); + table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)gdpa(dev, "vkCmdBuildAccelerationStructuresIndirectKHR"); + table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)gdpa(dev, "vkBuildAccelerationStructuresKHR"); table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)gdpa(dev, "vkCopyAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)gdpa(dev, "vkCopyAccelerationStructureToMemoryKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)gdpa(dev, "vkCopyMemoryToAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)gdpa(dev, "vkWriteAccelerationStructuresPropertiesKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)gdpa(dev, "vkCmdCopyAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)gdpa(dev, "vkCmdCopyAccelerationStructureToMemoryKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)gdpa(dev, "vkCmdCopyMemoryToAccelerationStructureKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)gdpa(dev, "vkGetAccelerationStructureDeviceAddressKHR"); + table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)gdpa(dev, "vkCmdWriteAccelerationStructuresPropertiesKHR"); + table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)gdpa(dev, "vkGetDeviceAccelerationStructureCompatibilityKHR"); + table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)gdpa(dev, "vkGetAccelerationStructureBuildSizesKHR"); + + // ---- VK_KHR_ray_tracing_pipeline extension commands table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)gdpa(dev, "vkCmdTraceRaysKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)gdpa(dev, "vkCreateRayTracingPipelinesKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)gdpa(dev, "vkGetAccelerationStructureDeviceAddressKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)gdpa(dev, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)gdpa(dev, "vkCmdTraceRaysIndirectKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)gdpa(dev, "vkGetDeviceAccelerationStructureCompatibilityKHR"); -#endif // VK_ENABLE_BETA_EXTENSIONS + table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)gdpa(dev, "vkGetRayTracingShaderGroupStackSizeKHR"); + table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)gdpa(dev, "vkCmdSetRayTracingPipelineStackSizeKHR"); } // Init Instance function pointer dispatch table with core commands @@ -978,6 +944,9 @@ VKAPI_ATTR void VKAPI_CALL loader_init_instance_extension_dispatch_table(VkLayer table->GetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR)gpa(inst, "vkGetDisplayModeProperties2KHR"); table->GetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR)gpa(inst, "vkGetDisplayPlaneCapabilities2KHR"); + // ---- VK_KHR_fragment_shading_rate extension commands + table->GetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)gpa(inst, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); + // ---- VK_EXT_debug_report extension commands table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)gpa(inst, "vkCreateDebugReportCallbackEXT"); table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)gpa(inst, "vkDestroyDebugReportCallbackEXT"); @@ -1333,27 +1302,20 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis if (!strcmp(name, "WaitSemaphoresKHR")) return (void *)table->WaitSemaphoresKHR; if (!strcmp(name, "SignalSemaphoreKHR")) return (void *)table->SignalSemaphoreKHR; + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp(name, "CmdSetFragmentShadingRateKHR")) return (void *)table->CmdSetFragmentShadingRateKHR; + // ---- VK_KHR_buffer_device_address extension commands if (!strcmp(name, "GetBufferDeviceAddressKHR")) return (void *)table->GetBufferDeviceAddressKHR; if (!strcmp(name, "GetBufferOpaqueCaptureAddressKHR")) return (void *)table->GetBufferOpaqueCaptureAddressKHR; if (!strcmp(name, "GetDeviceMemoryOpaqueCaptureAddressKHR")) return (void *)table->GetDeviceMemoryOpaqueCaptureAddressKHR; // ---- VK_KHR_deferred_host_operations extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CreateDeferredOperationKHR")) return (void *)table->CreateDeferredOperationKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "DestroyDeferredOperationKHR")) return (void *)table->DestroyDeferredOperationKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "GetDeferredOperationMaxConcurrencyKHR")) return (void *)table->GetDeferredOperationMaxConcurrencyKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "GetDeferredOperationResultKHR")) return (void *)table->GetDeferredOperationResultKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "DeferredOperationJoinKHR")) return (void *)table->DeferredOperationJoinKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS // ---- VK_KHR_pipeline_executable_properties extension commands if (!strcmp(name, "GetPipelineExecutablePropertiesKHR")) return (void *)table->GetPipelineExecutablePropertiesKHR; @@ -1459,10 +1421,8 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis // ---- VK_NV_ray_tracing extension commands if (!strcmp(name, "CreateAccelerationStructureNV")) return (void *)table->CreateAccelerationStructureNV; - if (!strcmp(name, "DestroyAccelerationStructureKHR")) return (void *)table->DestroyAccelerationStructureKHR; if (!strcmp(name, "DestroyAccelerationStructureNV")) return (void *)table->DestroyAccelerationStructureNV; if (!strcmp(name, "GetAccelerationStructureMemoryRequirementsNV")) return (void *)table->GetAccelerationStructureMemoryRequirementsNV; - if (!strcmp(name, "BindAccelerationStructureMemoryKHR")) return (void *)table->BindAccelerationStructureMemoryKHR; if (!strcmp(name, "BindAccelerationStructureMemoryNV")) return (void *)table->BindAccelerationStructureMemoryNV; if (!strcmp(name, "CmdBuildAccelerationStructureNV")) return (void *)table->CmdBuildAccelerationStructureNV; if (!strcmp(name, "CmdCopyAccelerationStructureNV")) return (void *)table->CmdCopyAccelerationStructureNV; @@ -1471,7 +1431,6 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis if (!strcmp(name, "GetRayTracingShaderGroupHandlesKHR")) return (void *)table->GetRayTracingShaderGroupHandlesKHR; if (!strcmp(name, "GetRayTracingShaderGroupHandlesNV")) return (void *)table->GetRayTracingShaderGroupHandlesNV; if (!strcmp(name, "GetAccelerationStructureHandleNV")) return (void *)table->GetAccelerationStructureHandleNV; - if (!strcmp(name, "CmdWriteAccelerationStructuresPropertiesKHR")) return (void *)table->CmdWriteAccelerationStructuresPropertiesKHR; if (!strcmp(name, "CmdWriteAccelerationStructuresPropertiesNV")) return (void *)table->CmdWriteAccelerationStructuresPropertiesNV; if (!strcmp(name, "CompileDeferredNV")) return (void *)table->CompileDeferredNV; @@ -1558,61 +1517,34 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis if (!strcmp(name, "SetPrivateDataEXT")) return (void *)table->SetPrivateDataEXT; if (!strcmp(name, "GetPrivateDataEXT")) return (void *)table->GetPrivateDataEXT; - // ---- VK_KHR_ray_tracing extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS + // ---- VK_NV_fragment_shading_rate_enums extension commands + if (!strcmp(name, "CmdSetFragmentShadingRateEnumNV")) return (void *)table->CmdSetFragmentShadingRateEnumNV; + + // ---- VK_KHR_acceleration_structure extension commands if (!strcmp(name, "CreateAccelerationStructureKHR")) return (void *)table->CreateAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "GetAccelerationStructureMemoryRequirementsKHR")) return (void *)table->GetAccelerationStructureMemoryRequirementsKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "CmdBuildAccelerationStructureKHR")) return (void *)table->CmdBuildAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "CmdBuildAccelerationStructureIndirectKHR")) return (void *)table->CmdBuildAccelerationStructureIndirectKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "BuildAccelerationStructureKHR")) return (void *)table->BuildAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + if (!strcmp(name, "DestroyAccelerationStructureKHR")) return (void *)table->DestroyAccelerationStructureKHR; + if (!strcmp(name, "CmdBuildAccelerationStructuresKHR")) return (void *)table->CmdBuildAccelerationStructuresKHR; + if (!strcmp(name, "CmdBuildAccelerationStructuresIndirectKHR")) return (void *)table->CmdBuildAccelerationStructuresIndirectKHR; + if (!strcmp(name, "BuildAccelerationStructuresKHR")) return (void *)table->BuildAccelerationStructuresKHR; if (!strcmp(name, "CopyAccelerationStructureKHR")) return (void *)table->CopyAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CopyAccelerationStructureToMemoryKHR")) return (void *)table->CopyAccelerationStructureToMemoryKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CopyMemoryToAccelerationStructureKHR")) return (void *)table->CopyMemoryToAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "WriteAccelerationStructuresPropertiesKHR")) return (void *)table->WriteAccelerationStructuresPropertiesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CmdCopyAccelerationStructureKHR")) return (void *)table->CmdCopyAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CmdCopyAccelerationStructureToMemoryKHR")) return (void *)table->CmdCopyAccelerationStructureToMemoryKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CmdCopyMemoryToAccelerationStructureKHR")) return (void *)table->CmdCopyMemoryToAccelerationStructureKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + if (!strcmp(name, "GetAccelerationStructureDeviceAddressKHR")) return (void *)table->GetAccelerationStructureDeviceAddressKHR; + if (!strcmp(name, "CmdWriteAccelerationStructuresPropertiesKHR")) return (void *)table->CmdWriteAccelerationStructuresPropertiesKHR; + if (!strcmp(name, "GetDeviceAccelerationStructureCompatibilityKHR")) return (void *)table->GetDeviceAccelerationStructureCompatibilityKHR; + if (!strcmp(name, "GetAccelerationStructureBuildSizesKHR")) return (void *)table->GetAccelerationStructureBuildSizesKHR; + + // ---- VK_KHR_ray_tracing_pipeline extension commands if (!strcmp(name, "CmdTraceRaysKHR")) return (void *)table->CmdTraceRaysKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CreateRayTracingPipelinesKHR")) return (void *)table->CreateRayTracingPipelinesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "GetAccelerationStructureDeviceAddressKHR")) return (void *)table->GetAccelerationStructureDeviceAddressKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "GetRayTracingCaptureReplayShaderGroupHandlesKHR")) return (void *)table->GetRayTracingCaptureReplayShaderGroupHandlesKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp(name, "CmdTraceRaysIndirectKHR")) return (void *)table->CmdTraceRaysIndirectKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp(name, "GetDeviceAccelerationStructureCompatibilityKHR")) return (void *)table->GetDeviceAccelerationStructureCompatibilityKHR; -#endif // VK_ENABLE_BETA_EXTENSIONS + if (!strcmp(name, "GetRayTracingShaderGroupStackSizeKHR")) return (void *)table->GetRayTracingShaderGroupStackSizeKHR; + if (!strcmp(name, "CmdSetRayTracingPipelineStackSizeKHR")) return (void *)table->CmdSetRayTracingPipelineStackSizeKHR; return NULL; } @@ -1746,6 +1678,9 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_instance_dispatch_table(const VkLayerI if (!strcmp(name, "GetDisplayModeProperties2KHR")) return (void *)table->GetDisplayModeProperties2KHR; if (!strcmp(name, "GetDisplayPlaneCapabilities2KHR")) return (void *)table->GetDisplayPlaneCapabilities2KHR; + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp(name, "GetPhysicalDeviceFragmentShadingRatesKHR")) return (void *)table->GetPhysicalDeviceFragmentShadingRatesKHR; + // ---- VK_EXT_debug_report extension commands if (!strcmp(name, "CreateDebugReportCallbackEXT")) return (void *)table->CreateDebugReportCallbackEXT; if (!strcmp(name, "DestroyDebugReportCallbackEXT")) return (void *)table->DestroyDebugReportCallbackEXT; @@ -2301,6 +2236,40 @@ VKAPI_ATTR VkResult VKAPI_CALL SignalSemaphoreKHR( } +// ---- VK_KHR_fragment_shading_rate extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceFragmentShadingRatesKHR(unwrapped_phys_dev, pFragmentShadingRateCount, pFragmentShadingRates); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceFragmentShadingRatesKHR) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceFragmentShadingRatesKHR"); + } + return icd_term->dispatch.GetPhysicalDeviceFragmentShadingRatesKHR(phys_dev_term->phys_dev, pFragmentShadingRateCount, pFragmentShadingRates); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetFragmentShadingRateKHR(commandBuffer, pFragmentSize, combinerOps); +} + + // ---- VK_KHR_buffer_device_address extension trampoline/terminators VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressKHR( @@ -2327,7 +2296,6 @@ VKAPI_ATTR uint64_t VKAPI_CALL GetDeviceMemoryOpaqueCaptureAddressKHR( // ---- VK_KHR_deferred_host_operations extension trampoline/terminators -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CreateDeferredOperationKHR( VkDevice device, const VkAllocationCallbacks* pAllocator, @@ -2336,8 +2304,6 @@ VKAPI_ATTR VkResult VKAPI_CALL CreateDeferredOperationKHR( return disp->CreateDeferredOperationKHR(device, pAllocator, pDeferredOperation); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR void VKAPI_CALL DestroyDeferredOperationKHR( VkDevice device, VkDeferredOperationKHR operation, @@ -2346,8 +2312,6 @@ VKAPI_ATTR void VKAPI_CALL DestroyDeferredOperationKHR( disp->DestroyDeferredOperationKHR(device, operation, pAllocator); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR uint32_t VKAPI_CALL GetDeferredOperationMaxConcurrencyKHR( VkDevice device, VkDeferredOperationKHR operation) { @@ -2355,8 +2319,6 @@ VKAPI_ATTR uint32_t VKAPI_CALL GetDeferredOperationMaxConcurrencyKHR( return disp->GetDeferredOperationMaxConcurrencyKHR(device, operation); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL GetDeferredOperationResultKHR( VkDevice device, VkDeferredOperationKHR operation) { @@ -2364,8 +2326,6 @@ VKAPI_ATTR VkResult VKAPI_CALL GetDeferredOperationResultKHR( return disp->GetDeferredOperationResultKHR(device, operation); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL DeferredOperationJoinKHR( VkDevice device, VkDeferredOperationKHR operation) { @@ -2373,7 +2333,6 @@ VKAPI_ATTR VkResult VKAPI_CALL DeferredOperationJoinKHR( return disp->DeferredOperationJoinKHR(device, operation); } -#endif // VK_ENABLE_BETA_EXTENSIONS // ---- VK_KHR_pipeline_executable_properties extension trampoline/terminators @@ -2681,28 +2640,6 @@ VKAPI_ATTR VkResult VKAPI_CALL GetShaderInfoAMD( } -// ---- VK_GGP_stream_descriptor_surface extension trampoline/terminators - -#ifdef VK_USE_PLATFORM_GGP -VKAPI_ATTR VkResult VKAPI_CALL CreateStreamDescriptorSurfaceGGP( - VkInstance instance, - const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkSurfaceKHR* pSurface) { -#error("Not implemented. Likely needs to be manually generated!"); - return disp->CreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface); -} - -VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateStreamDescriptorSurfaceGGP( - VkInstance instance, - const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkSurfaceKHR* pSurface) { -#error("Not implemented. Likely needs to be manually generated!"); -} - -#endif // VK_USE_PLATFORM_GGP - // ---- VK_NV_external_memory_win32 extension trampoline/terminators #ifdef VK_USE_PLATFORM_WIN32_KHR @@ -3206,17 +3143,9 @@ VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureNV( return disp->CreateAccelerationStructureNV(device, pCreateInfo, pAllocator, pAccelerationStructure); } -VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureKHR( - VkDevice device, - VkAccelerationStructureKHR accelerationStructure, - const VkAllocationCallbacks* pAllocator) { - const VkLayerDispatchTable *disp = loader_get_dispatch(device); - disp->DestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator); -} - VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureNV( VkDevice device, - VkAccelerationStructureKHR accelerationStructure, + VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); disp->DestroyAccelerationStructureNV(device, accelerationStructure, pAllocator); @@ -3230,18 +3159,10 @@ VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureMemoryRequirementsNV( disp->GetAccelerationStructureMemoryRequirementsNV(device, pInfo, pMemoryRequirements); } -VKAPI_ATTR VkResult VKAPI_CALL BindAccelerationStructureMemoryKHR( - VkDevice device, - uint32_t bindInfoCount, - const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { - const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->BindAccelerationStructureMemoryKHR(device, bindInfoCount, pBindInfos); -} - VKAPI_ATTR VkResult VKAPI_CALL BindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, - const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); return disp->BindAccelerationStructureMemoryNV(device, bindInfoCount, pBindInfos); } @@ -3252,8 +3173,8 @@ VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureNV( VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, - VkAccelerationStructureKHR dst, - VkAccelerationStructureKHR src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) { const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); @@ -3262,8 +3183,8 @@ VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureNV( VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureKHR dst, - VkAccelerationStructureKHR src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode) { const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); disp->CmdCopyAccelerationStructureNV(commandBuffer, dst, src, mode); @@ -3324,28 +3245,17 @@ VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingShaderGroupHandlesNV( VKAPI_ATTR VkResult VKAPI_CALL GetAccelerationStructureHandleNV( VkDevice device, - VkAccelerationStructureKHR accelerationStructure, + VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); return disp->GetAccelerationStructureHandleNV(device, accelerationStructure, dataSize, pData); } -VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesKHR( - VkCommandBuffer commandBuffer, - uint32_t accelerationStructureCount, - const VkAccelerationStructureKHR* pAccelerationStructures, - VkQueryType queryType, - VkQueryPool queryPool, - uint32_t firstQuery) { - const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); - disp->CmdWriteAccelerationStructuresPropertiesKHR(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); -} - VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, - const VkAccelerationStructureKHR* pAccelerationStructures, + const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { @@ -3564,28 +3474,6 @@ VKAPI_ATTR void VKAPI_CALL SetLocalDimmingAMD( } -// ---- VK_FUCHSIA_imagepipe_surface extension trampoline/terminators - -#ifdef VK_USE_PLATFORM_FUCHSIA -VKAPI_ATTR VkResult VKAPI_CALL CreateImagePipeSurfaceFUCHSIA( - VkInstance instance, - const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkSurfaceKHR* pSurface) { -#error("Not implemented. Likely needs to be manually generated!"); - return disp->CreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface); -} - -VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA( - VkInstance instance, - const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkSurfaceKHR* pSurface) { -#error("Not implemented. Likely needs to be manually generated!"); -} - -#endif // VK_USE_PLATFORM_FUCHSIA - // ---- VK_EXT_buffer_device_address extension trampoline/terminators VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressEXT( @@ -3882,9 +3770,19 @@ VKAPI_ATTR void VKAPI_CALL GetPrivateDataEXT( } -// ---- VK_KHR_ray_tracing extension trampoline/terminators +// ---- VK_NV_fragment_shading_rate_enums extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetFragmentShadingRateEnumNV(commandBuffer, shadingRate, combinerOps); +} + + +// ---- VK_KHR_acceleration_structure extension trampoline/terminators -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureKHR( VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, @@ -3894,79 +3792,68 @@ VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureKHR( return disp->CreateAccelerationStructureKHR(device, pCreateInfo, pAllocator, pAccelerationStructure); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureMemoryRequirementsKHR( +VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureKHR( VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, - VkMemoryRequirements2* pMemoryRequirements) { + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - disp->GetAccelerationStructureMemoryRequirementsKHR(device, pInfo, pMemoryRequirements); + disp->DestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureKHR( +VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, - const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); - disp->CmdBuildAccelerationStructureKHR(commandBuffer, infoCount, pInfos, ppOffsetInfos); + disp->CmdBuildAccelerationStructuresKHR(commandBuffer, infoCount, pInfos, ppBuildRangeInfos); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureIndirectKHR( +VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructuresIndirectKHR( VkCommandBuffer commandBuffer, - const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, - VkBuffer indirectBuffer, - VkDeviceSize indirectOffset, - uint32_t indirectStride) { + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts) { const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); - disp->CmdBuildAccelerationStructureIndirectKHR(commandBuffer, pInfo, indirectBuffer, indirectOffset, indirectStride); + disp->CmdBuildAccelerationStructuresIndirectKHR(commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR VkResult VKAPI_CALL BuildAccelerationStructureKHR( +VKAPI_ATTR VkResult VKAPI_CALL BuildAccelerationStructuresKHR( VkDevice device, + VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, - const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->BuildAccelerationStructureKHR(device, infoCount, pInfos, ppOffsetInfos); + return disp->BuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CopyAccelerationStructureKHR( VkDevice device, + VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->CopyAccelerationStructureKHR(device, pInfo); + return disp->CopyAccelerationStructureKHR(device, deferredOperation, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CopyAccelerationStructureToMemoryKHR( VkDevice device, + VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->CopyAccelerationStructureToMemoryKHR(device, pInfo); + return disp->CopyAccelerationStructureToMemoryKHR(device, deferredOperation, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CopyMemoryToAccelerationStructureKHR( VkDevice device, + VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->CopyMemoryToAccelerationStructureKHR(device, pInfo); + return disp->CopyMemoryToAccelerationStructureKHR(device, deferredOperation, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL WriteAccelerationStructuresPropertiesKHR( VkDevice device, uint32_t accelerationStructureCount, @@ -3979,8 +3866,6 @@ VKAPI_ATTR VkResult VKAPI_CALL WriteAccelerationStructuresPropertiesKHR( return disp->WriteAccelerationStructuresPropertiesKHR(device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo) { @@ -3988,8 +3873,6 @@ VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureKHR( disp->CmdCopyAccelerationStructureKHR(commandBuffer, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { @@ -3997,8 +3880,6 @@ VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureToMemoryKHR( disp->CmdCopyAccelerationStructureToMemoryKHR(commandBuffer, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR void VKAPI_CALL CmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { @@ -4006,14 +3887,51 @@ VKAPI_ATTR void VKAPI_CALL CmdCopyMemoryToAccelerationStructureKHR( disp->CmdCopyMemoryToAccelerationStructureKHR(commandBuffer, pInfo); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS +VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetAccelerationStructureDeviceAddressKHR(device, pInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdWriteAccelerationStructuresPropertiesKHR(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); +} + +VKAPI_ATTR void VKAPI_CALL GetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDeviceAccelerationStructureCompatibilityKHR(device, pVersionInfo, pCompatibility); +} + +VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo); +} + + +// ---- VK_KHR_ray_tracing_pipeline extension trampoline/terminators + VKAPI_ATTR void VKAPI_CALL CmdTraceRaysKHR( VkCommandBuffer commandBuffer, - const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, - const VkStridedBufferRegionKHR* pMissShaderBindingTable, - const VkStridedBufferRegionKHR* pHitShaderBindingTable, - const VkStridedBufferRegionKHR* pCallableShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { @@ -4021,30 +3939,18 @@ VKAPI_ATTR void VKAPI_CALL CmdTraceRaysKHR( disp->CmdTraceRaysKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesKHR( VkDevice device, + VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->CreateRayTracingPipelinesKHR(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); -} - -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetAccelerationStructureDeviceAddressKHR( - VkDevice device, - const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { - const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->GetAccelerationStructureDeviceAddressKHR(device, pInfo); + return disp->CreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingCaptureReplayShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, @@ -4056,30 +3962,33 @@ VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingCaptureReplayShaderGroupHandlesKHR( return disp->GetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS VKAPI_ATTR void VKAPI_CALL CmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, - const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, - const VkStridedBufferRegionKHR* pMissShaderBindingTable, - const VkStridedBufferRegionKHR* pHitShaderBindingTable, - const VkStridedBufferRegionKHR* pCallableShaderBindingTable, - VkBuffer buffer, - VkDeviceSize offset) { + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress) { const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); - disp->CmdTraceRaysIndirectKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, buffer, offset); + disp->CmdTraceRaysIndirectKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress); } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS -VKAPI_ATTR VkResult VKAPI_CALL GetDeviceAccelerationStructureCompatibilityKHR( +VKAPI_ATTR VkDeviceSize VKAPI_CALL GetRayTracingShaderGroupStackSizeKHR( VkDevice device, - const VkAccelerationStructureVersionKHR* version) { + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader) { const VkLayerDispatchTable *disp = loader_get_dispatch(device); - return disp->GetDeviceAccelerationStructureCompatibilityKHR(device, version); + return disp->GetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetRayTracingPipelineStackSizeKHR(commandBuffer, pipelineStackSize); } -#endif // VK_ENABLE_BETA_EXTENSIONS // GPA helpers for extensions bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr) { *addr = NULL; @@ -4383,6 +4292,16 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na return true; } + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp("vkGetPhysicalDeviceFragmentShadingRatesKHR", name)) { + *addr = (void *)GetPhysicalDeviceFragmentShadingRatesKHR; + return true; + } + if (!strcmp("vkCmdSetFragmentShadingRateKHR", name)) { + *addr = (void *)CmdSetFragmentShadingRateKHR; + return true; + } + // ---- VK_KHR_buffer_device_address extension commands if (!strcmp("vkGetBufferDeviceAddressKHR", name)) { *addr = (void *)GetBufferDeviceAddressKHR; @@ -4398,36 +4317,26 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na } // ---- VK_KHR_deferred_host_operations extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCreateDeferredOperationKHR", name)) { *addr = (void *)CreateDeferredOperationKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkDestroyDeferredOperationKHR", name)) { *addr = (void *)DestroyDeferredOperationKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkGetDeferredOperationMaxConcurrencyKHR", name)) { *addr = (void *)GetDeferredOperationMaxConcurrencyKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkGetDeferredOperationResultKHR", name)) { *addr = (void *)GetDeferredOperationResultKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkDeferredOperationJoinKHR", name)) { *addr = (void *)DeferredOperationJoinKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS // ---- VK_KHR_pipeline_executable_properties extension commands if (!strcmp("vkGetPipelineExecutablePropertiesKHR", name)) { @@ -4543,16 +4452,6 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na return true; } - // ---- VK_GGP_stream_descriptor_surface extension commands -#ifdef VK_USE_PLATFORM_GGP - if (!strcmp("vkCreateStreamDescriptorSurfaceGGP", name)) { - *addr = (ptr_instance->enabled_known_extensions.ggp_stream_descriptor_surface == 1) - ? (void *)CreateStreamDescriptorSurfaceGGP - : NULL; - return true; - } -#endif // VK_USE_PLATFORM_GGP - // ---- VK_NV_external_memory_capabilities extension commands if (!strcmp("vkGetPhysicalDeviceExternalImageFormatPropertiesNV", name)) { *addr = (ptr_instance->enabled_known_extensions.nv_external_memory_capabilities == 1) @@ -4786,10 +4685,6 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na *addr = (void *)CreateAccelerationStructureNV; return true; } - if (!strcmp("vkDestroyAccelerationStructureKHR", name)) { - *addr = (void *)DestroyAccelerationStructureKHR; - return true; - } if (!strcmp("vkDestroyAccelerationStructureNV", name)) { *addr = (void *)DestroyAccelerationStructureNV; return true; @@ -4798,10 +4693,6 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na *addr = (void *)GetAccelerationStructureMemoryRequirementsNV; return true; } - if (!strcmp("vkBindAccelerationStructureMemoryKHR", name)) { - *addr = (void *)BindAccelerationStructureMemoryKHR; - return true; - } if (!strcmp("vkBindAccelerationStructureMemoryNV", name)) { *addr = (void *)BindAccelerationStructureMemoryNV; return true; @@ -4834,10 +4725,6 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na *addr = (void *)GetAccelerationStructureHandleNV; return true; } - if (!strcmp("vkCmdWriteAccelerationStructuresPropertiesKHR", name)) { - *addr = (void *)CmdWriteAccelerationStructuresPropertiesKHR; - return true; - } if (!strcmp("vkCmdWriteAccelerationStructuresPropertiesNV", name)) { *addr = (void *)CmdWriteAccelerationStructuresPropertiesNV; return true; @@ -4943,16 +4830,6 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na return true; } - // ---- VK_FUCHSIA_imagepipe_surface extension commands -#ifdef VK_USE_PLATFORM_FUCHSIA - if (!strcmp("vkCreateImagePipeSurfaceFUCHSIA", name)) { - *addr = (ptr_instance->enabled_known_extensions.fuchsia_imagepipe_surface == 1) - ? (void *)CreateImagePipeSurfaceFUCHSIA - : NULL; - return true; - } -#endif // VK_USE_PLATFORM_FUCHSIA - // ---- VK_EXT_buffer_device_address extension commands if (!strcmp("vkGetBufferDeviceAddressEXT", name)) { *addr = (void *)GetBufferDeviceAddressEXT; @@ -5109,115 +4986,103 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na return true; } - // ---- VK_KHR_ray_tracing extension commands -#ifdef VK_ENABLE_BETA_EXTENSIONS + // ---- VK_NV_fragment_shading_rate_enums extension commands + if (!strcmp("vkCmdSetFragmentShadingRateEnumNV", name)) { + *addr = (void *)CmdSetFragmentShadingRateEnumNV; + return true; + } + + // ---- VK_KHR_acceleration_structure extension commands if (!strcmp("vkCreateAccelerationStructureKHR", name)) { *addr = (void *)CreateAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkGetAccelerationStructureMemoryRequirementsKHR", name)) { - *addr = (void *)GetAccelerationStructureMemoryRequirementsKHR; + if (!strcmp("vkDestroyAccelerationStructureKHR", name)) { + *addr = (void *)DestroyAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkCmdBuildAccelerationStructureKHR", name)) { - *addr = (void *)CmdBuildAccelerationStructureKHR; + if (!strcmp("vkCmdBuildAccelerationStructuresKHR", name)) { + *addr = (void *)CmdBuildAccelerationStructuresKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkCmdBuildAccelerationStructureIndirectKHR", name)) { - *addr = (void *)CmdBuildAccelerationStructureIndirectKHR; + if (!strcmp("vkCmdBuildAccelerationStructuresIndirectKHR", name)) { + *addr = (void *)CmdBuildAccelerationStructuresIndirectKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkBuildAccelerationStructureKHR", name)) { - *addr = (void *)BuildAccelerationStructureKHR; + if (!strcmp("vkBuildAccelerationStructuresKHR", name)) { + *addr = (void *)BuildAccelerationStructuresKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCopyAccelerationStructureKHR", name)) { *addr = (void *)CopyAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCopyAccelerationStructureToMemoryKHR", name)) { *addr = (void *)CopyAccelerationStructureToMemoryKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCopyMemoryToAccelerationStructureKHR", name)) { *addr = (void *)CopyMemoryToAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkWriteAccelerationStructuresPropertiesKHR", name)) { *addr = (void *)WriteAccelerationStructuresPropertiesKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCmdCopyAccelerationStructureKHR", name)) { *addr = (void *)CmdCopyAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCmdCopyAccelerationStructureToMemoryKHR", name)) { *addr = (void *)CmdCopyAccelerationStructureToMemoryKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCmdCopyMemoryToAccelerationStructureKHR", name)) { *addr = (void *)CmdCopyMemoryToAccelerationStructureKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS + if (!strcmp("vkGetAccelerationStructureDeviceAddressKHR", name)) { + *addr = (void *)GetAccelerationStructureDeviceAddressKHR; + return true; + } + if (!strcmp("vkCmdWriteAccelerationStructuresPropertiesKHR", name)) { + *addr = (void *)CmdWriteAccelerationStructuresPropertiesKHR; + return true; + } + if (!strcmp("vkGetDeviceAccelerationStructureCompatibilityKHR", name)) { + *addr = (void *)GetDeviceAccelerationStructureCompatibilityKHR; + return true; + } + if (!strcmp("vkGetAccelerationStructureBuildSizesKHR", name)) { + *addr = (void *)GetAccelerationStructureBuildSizesKHR; + return true; + } + + // ---- VK_KHR_ray_tracing_pipeline extension commands if (!strcmp("vkCmdTraceRaysKHR", name)) { *addr = (void *)CmdTraceRaysKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCreateRayTracingPipelinesKHR", name)) { *addr = (void *)CreateRayTracingPipelinesKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkGetAccelerationStructureDeviceAddressKHR", name)) { - *addr = (void *)GetAccelerationStructureDeviceAddressKHR; - return true; - } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkGetRayTracingCaptureReplayShaderGroupHandlesKHR", name)) { *addr = (void *)GetRayTracingCaptureReplayShaderGroupHandlesKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS if (!strcmp("vkCmdTraceRaysIndirectKHR", name)) { *addr = (void *)CmdTraceRaysIndirectKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS -#ifdef VK_ENABLE_BETA_EXTENSIONS - if (!strcmp("vkGetDeviceAccelerationStructureCompatibilityKHR", name)) { - *addr = (void *)GetDeviceAccelerationStructureCompatibilityKHR; + if (!strcmp("vkGetRayTracingShaderGroupStackSizeKHR", name)) { + *addr = (void *)GetRayTracingShaderGroupStackSizeKHR; + return true; + } + if (!strcmp("vkCmdSetRayTracingPipelineStackSizeKHR", name)) { + *addr = (void *)CmdSetRayTracingPipelineStackSizeKHR; return true; } -#endif // VK_ENABLE_BETA_EXTENSIONS return false; } @@ -5245,12 +5110,6 @@ void extensions_create_instance(struct loader_instance *ptr_instance, const VkIn } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { ptr_instance->enabled_known_extensions.khr_external_fence_capabilities = 1; - // ---- VK_GGP_stream_descriptor_surface extension commands -#ifdef VK_USE_PLATFORM_GGP - } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME)) { - ptr_instance->enabled_known_extensions.ggp_stream_descriptor_surface = 1; -#endif // VK_USE_PLATFORM_GGP - // ---- VK_NV_external_memory_capabilities extension commands } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME)) { ptr_instance->enabled_known_extensions.nv_external_memory_capabilities = 1; @@ -5278,12 +5137,6 @@ void extensions_create_instance(struct loader_instance *ptr_instance, const VkIn // ---- VK_EXT_debug_utils extension commands } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) { ptr_instance->enabled_known_extensions.ext_debug_utils = 1; - - // ---- VK_FUCHSIA_imagepipe_surface extension commands -#ifdef VK_USE_PLATFORM_FUCHSIA - } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME)) { - ptr_instance->enabled_known_extensions.fuchsia_imagepipe_surface = 1; -#endif // VK_USE_PLATFORM_FUCHSIA } } } @@ -5347,7 +5200,7 @@ PFN_vkVoidFunction get_extension_device_proc_terminator(struct loader_device *de addr = (PFN_vkVoidFunction)terminator_GetDeviceGroupSurfacePresentModes2EXT; } } -#endif // VK_ENABLE_BETA_EXTENSIONS +#endif // None return addr; } @@ -5474,6 +5327,9 @@ const VkLayerInstanceDispatchTable instance_disp = { .GetDisplayModeProperties2KHR = terminator_GetDisplayModeProperties2KHR, .GetDisplayPlaneCapabilities2KHR = terminator_GetDisplayPlaneCapabilities2KHR, + // ---- VK_KHR_fragment_shading_rate extension commands + .GetPhysicalDeviceFragmentShadingRatesKHR = terminator_GetPhysicalDeviceFragmentShadingRatesKHR, + // ---- VK_EXT_debug_report extension commands .CreateDebugReportCallbackEXT = terminator_CreateDebugReportCallbackEXT, .DestroyDebugReportCallbackEXT = terminator_DestroyDebugReportCallbackEXT, diff --git a/thirdparty/vulkan/loader/vk_loader_extensions.h b/thirdparty/vulkan/loader/vk_loader_extensions.h index 9835e16a07..84e2150e1d 100644 --- a/thirdparty/vulkan/loader/vk_loader_extensions.h +++ b/thirdparty/vulkan/loader/vk_loader_extensions.h @@ -334,6 +334,9 @@ struct loader_icd_term_dispatch { PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR; PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR; + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR; + // ---- VK_EXT_debug_report extension commands PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT; PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT; @@ -446,14 +449,12 @@ union loader_instance_extension_enables { uint8_t khr_external_semaphore_capabilities : 1; uint8_t khr_external_fence_capabilities : 1; uint8_t ext_debug_report : 1; - uint8_t ggp_stream_descriptor_surface : 1; uint8_t nv_external_memory_capabilities : 1; uint8_t nn_vi_surface : 1; uint8_t ext_direct_mode_display : 1; uint8_t ext_acquire_xlib_display : 1; uint8_t ext_display_surface_counter : 1; uint8_t ext_debug_utils : 1; - uint8_t fuchsia_imagepipe_surface : 1; }; uint64_t padding[4]; }; diff --git a/thirdparty/vulkan/loader/vk_loader_platform.h b/thirdparty/vulkan/loader/vk_loader_platform.h index b7664f50a1..36698f7fa4 100644 --- a/thirdparty/vulkan/loader/vk_loader_platform.h +++ b/thirdparty/vulkan/loader/vk_loader_platform.h @@ -28,10 +28,14 @@ #include <winsock2.h> #endif // _WIN32 +#if defined(__Fuchsia__) +#include "dlopen_fuchsia.h" +#endif // defined(__Fuchsia__) + #include "vulkan/vk_platform.h" #include "vulkan/vk_sdk_platform.h" -#if defined(__linux__) || defined(__APPLE__) +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) /* Linux-specific common code: */ // Headers: @@ -118,6 +122,8 @@ static inline char *loader_platform_executable_path(char *buffer, size_t size) { buffer[ret] = '\0'; return buffer; } +#elif defined(__Fuchsia__) +static inline char *loader_platform_executable_path(char *buffer, size_t size) { return NULL; } #endif // defined (__APPLE__) // Compatability with compilers that don't support __has_feature @@ -131,14 +137,32 @@ static inline char *loader_platform_executable_path(char *buffer, size_t size) { // Dynamic Loading of libraries: typedef void *loader_platform_dl_handle; -static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) { // When loading the library, we use RTLD_LAZY so that not all symbols have to be // resolved at this time (which improves performance). Note that if not all symbols // can be resolved, this could cause crashes later. Use the LD_BIND_NOW environment // variable to force all symbols to be resolved here. - return dlopen(libPath, RTLD_LAZY | RTLD_LOCAL); +#define LOADER_DLOPEN_MODE (RTLD_LAZY | RTLD_LOCAL) + +#if defined(__Fuchsia__) +static inline loader_platform_dl_handle loader_platform_open_driver(const char *libPath) { + return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, true); +} +static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) { + return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, false); +} +#else +static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) { + return dlopen(libPath, LOADER_DLOPEN_MODE); +} +#endif + +static inline const char *loader_platform_open_library_error(const char *libPath) { +#ifdef __Fuchsia__ + return dlerror_fuchsia(); +#else + return dlerror(); +#endif } -static inline const char *loader_platform_open_library_error(const char *libPath) { return dlerror(); } static inline void loader_platform_close_library(loader_platform_dl_handle library) { dlclose(library); } static inline void *loader_platform_get_proc_address(loader_platform_dl_handle library, const char *name) { assert(library); diff --git a/thirdparty/vulkan/loader/vk_object_types.h b/thirdparty/vulkan/loader/vk_object_types.h index 16d22c8241..807ff7c61e 100644 --- a/thirdparty/vulkan/loader/vk_object_types.h +++ b/thirdparty/vulkan/loader/vk_object_types.h @@ -72,15 +72,15 @@ typedef enum VulkanObjectType { kVulkanObjectTypeDebugReportCallbackEXT = 33, kVulkanObjectTypeDebugUtilsMessengerEXT = 34, kVulkanObjectTypeValidationCacheEXT = 35, - kVulkanObjectTypeAccelerationStructureKHR = 36, + kVulkanObjectTypeAccelerationStructureNV = 36, kVulkanObjectTypePerformanceConfigurationINTEL = 37, kVulkanObjectTypeIndirectCommandsLayoutNV = 38, kVulkanObjectTypePrivateDataSlotEXT = 39, - kVulkanObjectTypeMax = 40, + kVulkanObjectTypeAccelerationStructureKHR = 40, + kVulkanObjectTypeMax = 41, // Aliases for backwards compatibilty of "promoted" types kVulkanObjectTypeDescriptorUpdateTemplateKHR = kVulkanObjectTypeDescriptorUpdateTemplate, kVulkanObjectTypeSamplerYcbcrConversionKHR = kVulkanObjectTypeSamplerYcbcrConversion, - kVulkanObjectTypeAccelerationStructureNV = kVulkanObjectTypeAccelerationStructureKHR, } VulkanObjectType; // Array of object name strings for OBJECT_TYPE enum conversion @@ -121,10 +121,11 @@ static const char * const object_string[kVulkanObjectTypeMax] = { "DebugReportCallbackEXT", "DebugUtilsMessengerEXT", "ValidationCacheEXT", - "AccelerationStructureKHR", + "AccelerationStructureNV", "PerformanceConfigurationINTEL", "IndirectCommandsLayoutNV", "PrivateDataSlotEXT", + "AccelerationStructureKHR", }; // Helper array to get Vulkan VK_EXT_debug_report object type enum from the internal layers version @@ -165,10 +166,11 @@ const VkDebugReportObjectTypeEXT get_debug_report_enum[] = { VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, // kVulkanObjectTypeDebugReportCallbackEXT VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, // kVulkanObjectTypeValidationCacheEXT - VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, // kVulkanObjectTypeAccelerationStructureKHR + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT, // kVulkanObjectTypeAccelerationStructureNV VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePerformanceConfigurationINTEL VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeIndirectCommandsLayoutNV VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePrivateDataSlotEXT + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, // kVulkanObjectTypeAccelerationStructureKHR }; // Helper array to get Official Vulkan VkObjectType enum from the internal layers version @@ -209,10 +211,11 @@ const VkObjectType get_object_type_enum[] = { VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT, // kVulkanObjectTypeDebugReportCallbackEXT VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, // kVulkanObjectTypeValidationCacheEXT - VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, // kVulkanObjectTypeAccelerationStructureKHR + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, // kVulkanObjectTypeAccelerationStructureNV VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL, // kVulkanObjectTypePerformanceConfigurationINTEL VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV, // kVulkanObjectTypeIndirectCommandsLayoutNV VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT, // kVulkanObjectTypePrivateDataSlotEXT + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, // kVulkanObjectTypeAccelerationStructureKHR }; // Helper function to convert from VkDebugReportObjectTypeEXT to VkObjectType diff --git a/thirdparty/vulkan/loader/wsi.c b/thirdparty/vulkan/loader/wsi.c index 804046cf95..523f16a280 100644 --- a/thirdparty/vulkan/loader/wsi.c +++ b/thirdparty/vulkan/loader/wsi.c @@ -64,6 +64,12 @@ void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceC #ifdef VK_USE_PLATFORM_IOS_MVK ptr_instance->wsi_ios_surface_enabled = false; #endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + ptr_instance->wsi_ggp_surface_enabled = false; +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + ptr_instance->wsi_imagepipe_surface_enabled = false; +#endif // VK_USE_PLATFORM_FUCHSIA ptr_instance->wsi_display_enabled = false; ptr_instance->wsi_display_props2_enabled = false; #ifdef VK_USE_PLATFORM_METAL_EXT @@ -123,6 +129,18 @@ void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceC continue; } #endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_ggp_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_imagepipe_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_FUCHSIA if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME) == 0) { ptr_instance->wsi_headless_surface_enabled = true; continue; @@ -1308,6 +1326,83 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instanc #endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + +// Functions for the VK_GGP_stream_descriptor_surface extension: + +// This is the trampoline entrypoint for CreateStreamDescriptorSurfaceGGP +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL +vkCreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateStreamDescriptorSurfaceGGP +VKAPI_ATTR VkResult VKAPI_CALL +terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_ggp_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_GGP_stream_descriptor_surface extension not enabled. vkCreateStreamDescriptorSurfaceGGP not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->ggp_surf.base), sizeof(pIcdSurface->ggp_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->ggp_surf.base.platform = VK_ICD_WSI_PLATFORM_GGP; + pIcdSurface->ggp_surf.streamDescriptor = pCreateInfo->streamDescriptor; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateStreamDescriptorSurfaceGGP) { + vkRes = icd_term->dispatch.CreateStreamDescriptorSurfaceGGP(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + return vkRes; +} + +#endif // VK_USE_PLATFORM_GGP + #if defined(VK_USE_PLATFORM_METAL_EXT) LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(VkInstance instance, @@ -1936,6 +2031,86 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhys pDisplayPlaneInfo->planeIndex, &pCapabilities->capabilities); } +#ifdef VK_USE_PLATFORM_FUCHSIA + +// This is the trampoline entrypoint for CreateImagePipeSurfaceFUCHSIA +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA(VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateImagePipeSurfaceFUCHSIA +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // Initialize pSurface to NULL just to be safe. + *pSurface = VK_NULL_HANDLE; + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_imagepipe_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_FUCHSIA_imagepipe_surface extension not enabled. " + "vkCreateImagePipeSurfaceFUCHSIA not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = + AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->imagepipe_surf.base), sizeof(pIcdSurface->imagepipe_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->imagepipe_surf.base.platform = VK_ICD_WSI_PLATFORM_FUCHSIA; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA) { + vkRes = icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)(pIcdSurface); + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} +#endif // VK_USE_PLATFORM_FUCHSIA + LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, VkSurfaceCapabilities2KHR *pSurfaceCapabilities) { @@ -2261,6 +2436,23 @@ bool wsi_swapchain_instance_gpa(struct loader_instance *ptr_instance, const char return true; } #endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + + // Functions for the VK_GGP_stream_descriptor_surface extension: + if (!strcmp("vkCreateStreamDescriptorSurfaceGGP", name)) { + *addr = ptr_instance->wsi_ggp_surface_enabled ? (void *)vkCreateStreamDescriptorSurfaceGGP : NULL; + return true; + } +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + + // Functions for the VK_FUCHSIA_imagepipe_surface extension: + if (!strcmp("vkCreateImagePipeSurfaceFUCHSIA", name)) { + *addr = ptr_instance->wsi_imagepipe_surface_enabled ? (void *)vkCreateImagePipeSurfaceFUCHSIA : NULL; + return true; + } + +#endif // VK_USE_PLATFORM_FUCHSIA // Functions for the VK_EXT_headless_surface extension: if (!strcmp("vkCreateHeadlessSurfaceEXT", name)) { diff --git a/thirdparty/vulkan/loader/wsi.h b/thirdparty/vulkan/loader/wsi.h index e6c699f58a..c9f4654914 100644 --- a/thirdparty/vulkan/loader/wsi.h +++ b/thirdparty/vulkan/loader/wsi.h @@ -45,6 +45,12 @@ typedef struct { #ifdef VK_USE_PLATFORM_MACOS_MVK VkIcdSurfaceMacOS macos_surf; #endif // VK_USE_PLATFORM_MACOS_MVK +#ifdef VK_USE_PLATFORM_GGP + VkIcdSurfaceGgp ggp_surf; +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + VkIcdSurfaceImagePipe imagepipe_surf; +#endif // VK_USE_PLATFORM_FUCHSIA #ifdef VK_USE_PLATFORM_METAL_EXT VkIcdSurfaceMetal metal_surf; #endif // VK_USE_PLATFORM_METAL_EXT @@ -140,6 +146,11 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMacOSSurfaceMVK(VkInstance insta VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); #endif +#ifdef VK_USE_PLATFORM_GGP +VKAPI_ATTR VkResult VKAPI_CALL +terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif #if defined(VK_USE_PLATFORM_METAL_EXT) VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); @@ -191,6 +202,10 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModeProperties2KHR(VkPhysica VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities); +#ifdef VK_USE_PLATFORM_FUCHSIA +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, diff --git a/thirdparty/vulkan/vk_enum_string_helper.h b/thirdparty/vulkan/vk_enum_string_helper.h index 39884deeed..dccb1d0968 100644 --- a/thirdparty/vulkan/vk_enum_string_helper.h +++ b/thirdparty/vulkan/vk_enum_string_helper.h @@ -61,8 +61,6 @@ static inline const char* string_VkResult(VkResult input_value) return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR"; case VK_ERROR_INCOMPATIBLE_DRIVER: return "VK_ERROR_INCOMPATIBLE_DRIVER"; - case VK_ERROR_INCOMPATIBLE_VERSION_KHR: - return "VK_ERROR_INCOMPATIBLE_VERSION_KHR"; case VK_ERROR_INITIALIZATION_FAILED: return "VK_ERROR_INITIALIZATION_FAILED"; case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: @@ -132,8 +130,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) { case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR: return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR"; - case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR: - return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR"; case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR: return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR"; case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV: @@ -150,12 +148,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR"; case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV: return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV"; - case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR: - return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR"; case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV: return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV"; - case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR: - return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR"; case VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR: return "VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR"; case VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR: @@ -178,8 +174,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2"; case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT: return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT"; - case VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR: - return "VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR"; + case VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV: + return "VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV"; case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO: return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO"; case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO: @@ -244,6 +240,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR"; case VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR: return "VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM: + return "VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM"; case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: return "VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET"; case VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR: @@ -278,8 +276,6 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV"; case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV: return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV"; - case VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR: - return "VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR"; case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO: return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO"; case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: @@ -300,6 +296,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO"; case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT"; case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV: return "VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV"; case VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT: @@ -324,6 +322,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO"; case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD"; + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT"; case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT: return "VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT"; case VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO: @@ -356,8 +356,6 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR"; case VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR: return "VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR"; - case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT: - return "VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT"; case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT: return "VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT"; case VK_STRUCTURE_TYPE_EVENT_CREATE_INFO: @@ -404,6 +402,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT"; case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2: return "VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR: + return "VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR"; case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO: return "VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO"; case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO: @@ -562,6 +562,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT: @@ -606,6 +610,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: @@ -642,6 +648,16 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES: @@ -712,10 +728,12 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR"; - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR: - return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR"; - case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR: - return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV: @@ -750,6 +768,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: @@ -760,6 +780,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV"; case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV: @@ -840,6 +862,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR"; case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR: return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR"; case VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR: return "VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR"; case VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO: @@ -1036,6 +1062,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value) return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"; case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR: return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR"; + case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV: + return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV"; case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT: return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT"; case VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR: @@ -1228,6 +1256,8 @@ static inline const char* string_VkObjectType(VkObjectType input_value) { case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR: return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR"; + case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV: + return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV"; case VK_OBJECT_TYPE_BUFFER: return "VK_OBJECT_TYPE_BUFFER"; case VK_OBJECT_TYPE_BUFFER_VIEW: @@ -1888,6 +1918,8 @@ static inline const char* string_VkFormatFeatureFlagBits(VkFormatFeatureFlagBits return "VK_FORMAT_FEATURE_DISJOINT_BIT"; case VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT: return "VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT"; + case VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR: + return "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"; case VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT: return "VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT"; case VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT: @@ -2478,6 +2510,8 @@ static inline const char* string_VkQueryType(VkQueryType input_value) { case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR"; + case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV: + return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV"; case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR"; case VK_QUERY_TYPE_OCCLUSION: @@ -2569,14 +2603,18 @@ static inline const char* string_VkBufferUsageFlagBits(VkBufferUsageFlagBits inp { switch ((VkBufferUsageFlagBits)input_value) { + case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR: + return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR"; + case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR: + return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR"; case VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT: return "VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT"; case VK_BUFFER_USAGE_INDEX_BUFFER_BIT: return "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"; case VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT: return "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"; - case VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR: - return "VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR"; + case VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR: + return "VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR"; case VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT: return "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT"; case VK_BUFFER_USAGE_STORAGE_BUFFER_BIT: @@ -2983,6 +3021,8 @@ static inline const char* string_VkPipelineCreateFlagBits(VkPipelineCreateFlagBi return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR"; case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR: return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR"; case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR: return "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR"; case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR: @@ -3153,6 +3193,8 @@ static inline const char* string_VkDynamicState(VkDynamicState input_value) return "VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT"; case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV: return "VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV"; + case VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR: + return "VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR"; case VK_DYNAMIC_STATE_FRONT_FACE_EXT: return "VK_DYNAMIC_STATE_FRONT_FACE_EXT"; case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT: @@ -3161,6 +3203,8 @@ static inline const char* string_VkDynamicState(VkDynamicState input_value) return "VK_DYNAMIC_STATE_LINE_WIDTH"; case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT: return "VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT"; + case VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR: + return "VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR"; case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT: return "VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT"; case VK_DYNAMIC_STATE_SCISSOR: @@ -3470,6 +3514,8 @@ static inline const char* string_VkDescriptorType(VkDescriptorType input_value) { case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR"; + case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: + return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV"; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: return "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: @@ -5200,10 +5246,10 @@ static inline const char* string_VkPerformanceCounterDescriptionFlagBitsKHR(VkPe { switch ((VkPerformanceCounterDescriptionFlagBitsKHR)input_value) { - case VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR: - return "VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR"; - case VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR: - return "VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR"; + case VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR: + return "VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR"; + case VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR: + return "VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR"; default: return "Unhandled VkPerformanceCounterDescriptionFlagBitsKHR"; } @@ -5423,6 +5469,25 @@ static inline std::string string_VkSemaphoreWaitFlagsKHR(VkSemaphoreWaitFlagsKHR return ret; } +static inline const char* string_VkFragmentShadingRateCombinerOpKHR(VkFragmentShadingRateCombinerOpKHR input_value) +{ + switch ((VkFragmentShadingRateCombinerOpKHR)input_value) + { + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR"; + default: + return "Unhandled VkFragmentShadingRateCombinerOpKHR"; + } +} + static inline const char* string_VkPipelineExecutableStatisticFormatKHR(VkPipelineExecutableStatisticFormatKHR input_value) { switch ((VkPipelineExecutableStatisticFormatKHR)input_value) @@ -5481,6 +5546,8 @@ static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectT { case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT: return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT"; case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT"; case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: @@ -5690,8 +5757,8 @@ static inline const char* string_VkSurfaceCounterFlagBitsEXT(VkSurfaceCounterFla { switch ((VkSurfaceCounterFlagBitsEXT)input_value) { - case VK_SURFACE_COUNTER_VBLANK_EXT: - return "VK_SURFACE_COUNTER_VBLANK_EXT"; + case VK_SURFACE_COUNTER_VBLANK_BIT_EXT: + return "VK_SURFACE_COUNTER_VBLANK_BIT_EXT"; default: return "Unhandled VkSurfaceCounterFlagBitsEXT"; } @@ -6074,6 +6141,8 @@ static inline const char* string_VkAccelerationStructureTypeKHR(VkAccelerationSt { case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR: return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR"; case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR: return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR"; default: @@ -6087,6 +6156,8 @@ static inline const char* string_VkAccelerationStructureTypeNV(VkAccelerationStr { case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR: return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR"; case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR: return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR"; default: @@ -6322,31 +6393,16 @@ static inline const char* string_VkCopyAccelerationStructureModeNV(VkCopyAcceler } } -static inline const char* string_VkAccelerationStructureMemoryRequirementsTypeKHR(VkAccelerationStructureMemoryRequirementsTypeKHR input_value) -{ - switch ((VkAccelerationStructureMemoryRequirementsTypeKHR)input_value) - { - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR"; - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR"; - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR"; - default: - return "Unhandled VkAccelerationStructureMemoryRequirementsTypeKHR"; - } -} - static inline const char* string_VkAccelerationStructureMemoryRequirementsTypeNV(VkAccelerationStructureMemoryRequirementsTypeNV input_value) { switch ((VkAccelerationStructureMemoryRequirementsTypeNV)input_value) { - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR"; - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR"; - case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR: - return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR"; + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV"; + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV"; + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV"; default: return "Unhandled VkAccelerationStructureMemoryRequirementsTypeNV"; } @@ -6762,6 +6818,25 @@ static inline std::string string_VkIndirectCommandsLayoutUsageFlagsNV(VkIndirect return ret; } +static inline const char* string_VkDeviceMemoryReportEventTypeEXT(VkDeviceMemoryReportEventTypeEXT input_value) +{ + switch ((VkDeviceMemoryReportEventTypeEXT)input_value) + { + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT"; + default: + return "Unhandled VkDeviceMemoryReportEventTypeEXT"; + } +} + static inline const char* string_VkDeviceDiagnosticsConfigFlagBitsNV(VkDeviceDiagnosticsConfigFlagBitsNV input_value) { switch ((VkDeviceDiagnosticsConfigFlagBitsNV)input_value) @@ -6793,8 +6868,64 @@ static inline std::string string_VkDeviceDiagnosticsConfigFlagsNV(VkDeviceDiagno return ret; } +static inline const char* string_VkFragmentShadingRateTypeNV(VkFragmentShadingRateTypeNV input_value) +{ + switch ((VkFragmentShadingRateTypeNV)input_value) + { + case VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV: + return "VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV"; + case VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV: + return "VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV"; + default: + return "Unhandled VkFragmentShadingRateTypeNV"; + } +} -#ifdef VK_ENABLE_BETA_EXTENSIONS +static inline const char* string_VkFragmentShadingRateNV(VkFragmentShadingRateNV input_value) +{ + switch ((VkFragmentShadingRateNV)input_value) + { + case VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV: + return "VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV"; + default: + return "Unhandled VkFragmentShadingRateNV"; + } +} + +static inline const char* string_VkBuildAccelerationStructureModeKHR(VkBuildAccelerationStructureModeKHR input_value) +{ + switch ((VkBuildAccelerationStructureModeKHR)input_value) + { + case VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR"; + default: + return "Unhandled VkBuildAccelerationStructureModeKHR"; + } +} static inline const char* string_VkAccelerationStructureBuildTypeKHR(VkAccelerationStructureBuildTypeKHR input_value) { @@ -6810,7 +6941,63 @@ static inline const char* string_VkAccelerationStructureBuildTypeKHR(VkAccelerat return "Unhandled VkAccelerationStructureBuildTypeKHR"; } } -#endif // VK_ENABLE_BETA_EXTENSIONS + +static inline const char* string_VkAccelerationStructureCreateFlagBitsKHR(VkAccelerationStructureCreateFlagBitsKHR input_value) +{ + switch ((VkAccelerationStructureCreateFlagBitsKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR: + return "VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR"; + default: + return "Unhandled VkAccelerationStructureCreateFlagBitsKHR"; + } +} + +static inline std::string string_VkAccelerationStructureCreateFlagsKHR(VkAccelerationStructureCreateFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast<VkAccelerationStructureCreateFlagBitsKHR>(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast<VkAccelerationStructureCreateFlagBitsKHR>(0))); + return ret; +} + +static inline const char* string_VkAccelerationStructureCompatibilityKHR(VkAccelerationStructureCompatibilityKHR input_value) +{ + switch ((VkAccelerationStructureCompatibilityKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR: + return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR"; + case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR: + return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR"; + default: + return "Unhandled VkAccelerationStructureCompatibilityKHR"; + } +} + +static inline const char* string_VkShaderGroupShaderKHR(VkShaderGroupShaderKHR input_value) +{ + switch ((VkShaderGroupShaderKHR)input_value) + { + case VK_SHADER_GROUP_SHADER_ANY_HIT_KHR: + return "VK_SHADER_GROUP_SHADER_ANY_HIT_KHR"; + case VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR: + return "VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR"; + case VK_SHADER_GROUP_SHADER_GENERAL_KHR: + return "VK_SHADER_GROUP_SHADER_GENERAL_KHR"; + case VK_SHADER_GROUP_SHADER_INTERSECTION_KHR: + return "VK_SHADER_GROUP_SHADER_INTERSECTION_KHR"; + default: + return "Unhandled VkShaderGroupShaderKHR"; + } +} static inline const char * GetPhysDevFeatureString(uint32_t index) { const char * IndexToPhysDevFeatureString[] = { |