diff options
Diffstat (limited to 'drivers/vulkan/rendering_device_vulkan.cpp')
-rw-r--r-- | drivers/vulkan/rendering_device_vulkan.cpp | 189 |
1 files changed, 142 insertions, 47 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index 0832a07f51..1ed6839dd7 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -54,9 +54,13 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; @@ -68,8 +72,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID r_access_mask |= VK_ACCESS_INDEX_READ_BIT; buffer = index_buffer_owner.get_or_null(p_buffer); } else if (uniform_buffer_owner.owns(p_buffer)) { - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; @@ -77,8 +84,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT; buffer = uniform_buffer_owner.get_or_null(p_buffer); } else if (texture_buffer_owner.owns(p_buffer)) { - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + r_access_mask |= VK_ACCESS_SHADER_READ_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { @@ -89,8 +100,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer; } else if (storage_buffer_owner.owns(p_buffer)) { buffer = storage_buffer_owner.get_or_null(p_buffer); - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { @@ -2625,8 +2640,12 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { @@ -2874,6 +2893,29 @@ bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) { return texture_owner.owns(p_texture); } +RD::TextureFormat RenderingDeviceVulkan::texture_get_format(RID p_texture) { + _THREAD_SAFE_METHOD_ + + Texture *tex = texture_owner.get_or_null(p_texture); + ERR_FAIL_COND_V(!tex, TextureFormat()); + + TextureFormat tf; + + tf.format = tex->format; + tf.width = tex->width; + tf.height = tex->height; + tf.depth = tex->depth; + tf.array_layers = tex->layers; + tf.mipmaps = tex->mipmaps; + tf.texture_type = tex->type; + tf.samples = tex->samples; + tf.usage_bits = tex->usage_flags; + tf.shareable_formats = tex->allowed_shared_formats; + tf.is_resolve_buffer = tex->is_resolve_buffer; + + return tf; +} + Size2i RenderingDeviceVulkan::texture_size(RID p_texture) { _THREAD_SAFE_METHOD_ @@ -3020,8 +3062,12 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { @@ -3198,8 +3244,12 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { @@ -3334,8 +3384,12 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { @@ -4633,7 +4687,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve "Number of uniform sets is larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ")."); // Collect reflection data into binary data. - RenderingDeviceVulkanShaderBinaryData binary_data; + RenderingDeviceVulkanShaderBinaryData binary_data{}; Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; // Set bindings. Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants; { @@ -4804,7 +4858,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve return ret; } -RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) { +RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) { const uint8_t *binptr = p_shader_binary.ptr(); uint32_t binsize = p_shader_binary.size(); @@ -4989,17 +5043,24 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ _THREAD_SAFE_METHOD_ - Shader shader; + RID id; + if (p_placeholder.is_null()) { + id = shader_owner.make_rid(); + } else { + id = p_placeholder; + } + + Shader *shader = shader_owner.get_or_null(id); - shader.vertex_input_mask = vertex_input_mask; - shader.fragment_output_mask = fragment_output_mask; - shader.push_constant = push_constant; - shader.is_compute = is_compute; - shader.compute_local_size[0] = compute_local_size[0]; - shader.compute_local_size[1] = compute_local_size[1]; - shader.compute_local_size[2] = compute_local_size[2]; - shader.specialization_constants = specialization_constants; - shader.name = name; + shader->vertex_input_mask = vertex_input_mask; + shader->fragment_output_mask = fragment_output_mask; + shader->push_constant = push_constant; + shader->is_compute = is_compute; + shader->compute_local_size[0] = compute_local_size[0]; + shader->compute_local_size[1] = compute_local_size[1]; + shader->compute_local_size[2] = compute_local_size[2]; + shader->specialization_constants = specialization_constants; + shader->name = name; String error_text; @@ -5031,7 +5092,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ shader_stage.pName = "main"; shader_stage.pSpecializationInfo = nullptr; - shader.pipeline_stages.push_back(shader_stage); + shader->pipeline_stages.push_back(shader_stage); } // Proceed to create descriptor sets. @@ -5074,8 +5135,8 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ } } - shader.sets.push_back(set); - shader.set_formats.push_back(format); + shader->sets.push_back(set); + shader->set_formats.push_back(format); } } @@ -5085,13 +5146,13 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_create_info.pNext = nullptr; pipeline_layout_create_info.flags = 0; - pipeline_layout_create_info.setLayoutCount = shader.sets.size(); + pipeline_layout_create_info.setLayoutCount = shader->sets.size(); Vector<VkDescriptorSetLayout> layouts; - layouts.resize(shader.sets.size()); + layouts.resize(shader->sets.size()); for (int i = 0; i < layouts.size(); i++) { - layouts.write[i] = shader.sets[i].descriptor_set_layout; + layouts.write[i] = shader->sets[i].descriptor_set_layout; } pipeline_layout_create_info.pSetLayouts = layouts.ptr(); @@ -5110,7 +5171,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ pipeline_layout_create_info.pPushConstantRanges = nullptr; } - VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, nullptr, &shader.pipeline_layout); + VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, nullptr, &shader->pipeline_layout); if (err) { error_text = "Error (" + itos(err) + ") creating pipeline layout."; @@ -5120,24 +5181,30 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_ if (!success) { // Clean up if failed. - for (int i = 0; i < shader.pipeline_stages.size(); i++) { - vkDestroyShaderModule(device, shader.pipeline_stages[i].module, nullptr); + for (int i = 0; i < shader->pipeline_stages.size(); i++) { + vkDestroyShaderModule(device, shader->pipeline_stages[i].module, nullptr); } - for (int i = 0; i < shader.sets.size(); i++) { - vkDestroyDescriptorSetLayout(device, shader.sets[i].descriptor_set_layout, nullptr); + for (int i = 0; i < shader->sets.size(); i++) { + vkDestroyDescriptorSetLayout(device, shader->sets[i].descriptor_set_layout, nullptr); } + shader_owner.free(id); + ERR_FAIL_V_MSG(RID(), error_text); } - RID id = shader_owner.make_rid(shader); #ifdef DEV_ENABLED set_resource_name(id, "RID:" + itos(id.get_id())); #endif return id; } +RID RenderingDeviceVulkan::shader_create_placeholder() { + Shader shader; + return shader_owner.make_rid(shader); +} + uint32_t RenderingDeviceVulkan::shader_get_vertex_input_attribute_mask(RID p_shader) { _THREAD_SAFE_METHOD_ @@ -7470,6 +7537,7 @@ uint32_t RenderingDeviceVulkan::draw_list_get_current_pass() { } RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_switch_to_next_pass() { + _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V(draw_list == nullptr, INVALID_ID); ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, INVALID_FORMAT_ID); @@ -7485,6 +7553,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_switch_to_next_pass return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT; } Error RenderingDeviceVulkan::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) { + _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V(draw_list == nullptr, ERR_INVALID_PARAMETER); ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, ERR_INVALID_PARAMETER); @@ -7649,10 +7718,14 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier) barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/; } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/; + } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; @@ -7729,6 +7802,8 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier) /***********************/ RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_allow_draw_overlap) { + _THREAD_SAFE_METHOD_ + ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time."); ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time."); @@ -7743,6 +7818,8 @@ RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_ } void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) { + // Must be called within a compute list, the class mutex is locked during that time + ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST); ERR_FAIL_COND(!compute_list); @@ -7807,6 +7884,8 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l } void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) { + // Must be called within a compute list, the class mutex is locked during that time + ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST); ERR_FAIL_COND(!compute_list); @@ -7981,6 +8060,8 @@ void RenderingDeviceVulkan::compute_list_set_push_constant(ComputeListID p_list, } void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) { + // Must be called within a compute list, the class mutex is locked during that time + ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST); ERR_FAIL_COND(!compute_list); @@ -8124,6 +8205,8 @@ void RenderingDeviceVulkan::compute_list_dispatch_indirect(ComputeListID p_list, } void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) { + // Must be called within a compute list, the class mutex is locked during that time + uint32_t barrier_flags = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; uint32_t access_flags = VK_ACCESS_SHADER_READ_BIT; _compute_list_add_barrier(BARRIER_MASK_COMPUTE, barrier_flags, access_flags); @@ -8197,10 +8280,14 @@ void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrie barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { - barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; } + if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) { + barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; @@ -8225,7 +8312,7 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT; } - if (p_from.has_flag(BARRIER_MASK_RASTER)) { + if (p_from.has_flag(BARRIER_MASK_FRAGMENT)) { src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; } @@ -8245,10 +8332,14 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_to.has_flag(BARRIER_MASK_RASTER)) { - dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + if (p_to.has_flag(BARRIER_MASK_VERTEX)) { + dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; } + if (p_to.has_flag(BARRIER_MASK_FRAGMENT)) { + dst_barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } if (p_to.has_flag(BARRIER_MASK_TRANSFER)) { dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; @@ -8604,6 +8695,8 @@ void RenderingDeviceVulkan::swap_buffers() { } void RenderingDeviceVulkan::submit() { + _THREAD_SAFE_METHOD_ + ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync."); ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done."); @@ -8615,6 +8708,8 @@ void RenderingDeviceVulkan::submit() { } void RenderingDeviceVulkan::sync() { + _THREAD_SAFE_METHOD_ + ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync."); ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit"); |