summaryrefslogtreecommitdiffstats
path: root/drivers/vulkan/rendering_device_vulkan.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan/rendering_device_vulkan.cpp')
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp256
1 files changed, 205 insertions, 51 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index 23c6919854..59b1d176c6 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -54,9 +54,13 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
@@ -68,8 +72,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.get_or_null(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
@@ -77,8 +84,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.get_or_null(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
@@ -89,8 +100,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
buffer = storage_buffer_owner.get_or_null(p_buffer);
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
@@ -2625,8 +2640,12 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -2874,6 +2893,29 @@ bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) {
return texture_owner.owns(p_texture);
}
+RD::TextureFormat RenderingDeviceVulkan::texture_get_format(RID p_texture) {
+ _THREAD_SAFE_METHOD_
+
+ Texture *tex = texture_owner.get_or_null(p_texture);
+ ERR_FAIL_COND_V(!tex, TextureFormat());
+
+ TextureFormat tf;
+
+ tf.format = tex->format;
+ tf.width = tex->width;
+ tf.height = tex->height;
+ tf.depth = tex->depth;
+ tf.array_layers = tex->layers;
+ tf.mipmaps = tex->mipmaps;
+ tf.texture_type = tex->type;
+ tf.samples = tex->samples;
+ tf.usage_bits = tex->usage_flags;
+ tf.shareable_formats = tex->allowed_shared_formats;
+ tf.is_resolve_buffer = tex->is_resolve_buffer;
+
+ return tf;
+}
+
Size2i RenderingDeviceVulkan::texture_size(RID p_texture) {
_THREAD_SAFE_METHOD_
@@ -3020,8 +3062,12 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -3198,8 +3244,12 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -3334,8 +3384,12 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -4633,7 +4687,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
"Number of uniform sets is larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
// Collect reflection data into binary data.
- RenderingDeviceVulkanShaderBinaryData binary_data;
+ RenderingDeviceVulkanShaderBinaryData binary_data{};
Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; // Set bindings.
Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants;
{
@@ -4758,11 +4812,14 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
offset += sizeof(uint32_t);
memcpy(binptr + offset, &binary_data, sizeof(RenderingDeviceVulkanShaderBinaryData));
offset += sizeof(RenderingDeviceVulkanShaderBinaryData);
- memcpy(binptr + offset, shader_name_utf.ptr(), binary_data.shader_name_len);
- offset += binary_data.shader_name_len;
- if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange.
- offset += 4 - (binary_data.shader_name_len % 4);
+ if (binary_data.shader_name_len > 0) {
+ memcpy(binptr + offset, shader_name_utf.ptr(), binary_data.shader_name_len);
+ offset += binary_data.shader_name_len;
+
+ if ((binary_data.shader_name_len % 4) != 0) { // Alignment rules are really strange.
+ offset += 4 - (binary_data.shader_name_len % 4);
+ }
}
for (int i = 0; i < uniform_info.size(); i++) {
@@ -4804,7 +4861,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
return ret;
}
-RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) {
+RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) {
const uint8_t *binptr = p_shader_binary.ptr();
uint32_t binsize = p_shader_binary.size();
@@ -4989,17 +5046,24 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
_THREAD_SAFE_METHOD_
- Shader shader;
+ RID id;
+ if (p_placeholder.is_null()) {
+ id = shader_owner.make_rid();
+ } else {
+ id = p_placeholder;
+ }
+
+ Shader *shader = shader_owner.get_or_null(id);
- shader.vertex_input_mask = vertex_input_mask;
- shader.fragment_output_mask = fragment_output_mask;
- shader.push_constant = push_constant;
- shader.is_compute = is_compute;
- shader.compute_local_size[0] = compute_local_size[0];
- shader.compute_local_size[1] = compute_local_size[1];
- shader.compute_local_size[2] = compute_local_size[2];
- shader.specialization_constants = specialization_constants;
- shader.name = name;
+ shader->vertex_input_mask = vertex_input_mask;
+ shader->fragment_output_mask = fragment_output_mask;
+ shader->push_constant = push_constant;
+ shader->is_compute = is_compute;
+ shader->compute_local_size[0] = compute_local_size[0];
+ shader->compute_local_size[1] = compute_local_size[1];
+ shader->compute_local_size[2] = compute_local_size[2];
+ shader->specialization_constants = specialization_constants;
+ shader->name = name;
String error_text;
@@ -5031,7 +5095,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
shader_stage.pName = "main";
shader_stage.pSpecializationInfo = nullptr;
- shader.pipeline_stages.push_back(shader_stage);
+ shader->pipeline_stages.push_back(shader_stage);
}
// Proceed to create descriptor sets.
@@ -5074,8 +5138,8 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
}
}
- shader.sets.push_back(set);
- shader.set_formats.push_back(format);
+ shader->sets.push_back(set);
+ shader->set_formats.push_back(format);
}
}
@@ -5085,13 +5149,13 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_create_info.pNext = nullptr;
pipeline_layout_create_info.flags = 0;
- pipeline_layout_create_info.setLayoutCount = shader.sets.size();
+ pipeline_layout_create_info.setLayoutCount = shader->sets.size();
Vector<VkDescriptorSetLayout> layouts;
- layouts.resize(shader.sets.size());
+ layouts.resize(shader->sets.size());
for (int i = 0; i < layouts.size(); i++) {
- layouts.write[i] = shader.sets[i].descriptor_set_layout;
+ layouts.write[i] = shader->sets[i].descriptor_set_layout;
}
pipeline_layout_create_info.pSetLayouts = layouts.ptr();
@@ -5110,7 +5174,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
pipeline_layout_create_info.pPushConstantRanges = nullptr;
}
- VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, nullptr, &shader.pipeline_layout);
+ VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, nullptr, &shader->pipeline_layout);
if (err) {
error_text = "Error (" + itos(err) + ") creating pipeline layout.";
@@ -5120,24 +5184,30 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
if (!success) {
// Clean up if failed.
- for (int i = 0; i < shader.pipeline_stages.size(); i++) {
- vkDestroyShaderModule(device, shader.pipeline_stages[i].module, nullptr);
+ for (int i = 0; i < shader->pipeline_stages.size(); i++) {
+ vkDestroyShaderModule(device, shader->pipeline_stages[i].module, nullptr);
}
- for (int i = 0; i < shader.sets.size(); i++) {
- vkDestroyDescriptorSetLayout(device, shader.sets[i].descriptor_set_layout, nullptr);
+ for (int i = 0; i < shader->sets.size(); i++) {
+ vkDestroyDescriptorSetLayout(device, shader->sets[i].descriptor_set_layout, nullptr);
}
+ shader_owner.free(id);
+
ERR_FAIL_V_MSG(RID(), error_text);
}
- RID id = shader_owner.make_rid(shader);
#ifdef DEV_ENABLED
set_resource_name(id, "RID:" + itos(id.get_id()));
#endif
return id;
}
+RID RenderingDeviceVulkan::shader_create_placeholder() {
+ Shader shader;
+ return shader_owner.make_rid(shader);
+}
+
uint32_t RenderingDeviceVulkan::shader_get_vertex_input_attribute_mask(RID p_shader) {
_THREAD_SAFE_METHOD_
@@ -5838,6 +5908,64 @@ void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_
us->invalidated_callback_userdata = p_userdata;
}
+Error RenderingDeviceVulkan::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
+ "Copying buffers is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
+ "Copying buffers is forbidden during creation of a compute list");
+
+ // This method assumes the barriers have been pushed prior to being called, therefore no barriers are pushed
+ // for the source or destination buffers before performing the copy. These masks are effectively ignored.
+ VkPipelineShaderStageCreateFlags src_stage_mask = 0;
+ VkAccessFlags src_access_mask = 0;
+ Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_NO_BARRIER);
+ if (!src_buffer) {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Source buffer argument is not a valid buffer of any type.");
+ }
+
+ VkPipelineStageFlags dst_stage_mask = 0;
+ VkAccessFlags dst_access = 0;
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
+ // If the post barrier mask defines it, we indicate the destination buffer will require a barrier with these flags set
+ // after the copy command is queued.
+ dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
+ Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer, dst_stage_mask, dst_access, p_post_barrier);
+ if (!dst_buffer) {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Destination buffer argument is not a valid buffer of any type.");
+ }
+
+ // Validate the copy's dimensions for both buffers.
+ ERR_FAIL_COND_V_MSG((p_size + p_src_offset) > src_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the source buffer.");
+ ERR_FAIL_COND_V_MSG((p_size + p_dst_offset) > dst_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the destination buffer.");
+
+ // Perform the copy.
+ VkBufferCopy region;
+ region.srcOffset = p_src_offset;
+ region.dstOffset = p_dst_offset;
+ region.size = p_size;
+ vkCmdCopyBuffer(frames[frame].draw_command_buffer, src_buffer->buffer, dst_buffer->buffer, 1, &region);
+
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(true);
+#else
+ if (dst_stage_mask == 0) {
+ dst_stage_mask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ }
+
+ // As indicated by the post barrier mask, push a new barrier.
+ if (p_post_barrier != RD::BARRIER_MASK_NO_BARRIER) {
+ _buffer_memory_barrier(dst_buffer->buffer, p_dst_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, true);
+ }
+#endif
+
+ return OK;
+}
+
Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
@@ -7651,10 +7779,14 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier)
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
}
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
+ }
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -7731,6 +7863,8 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier)
/***********************/
RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_allow_draw_overlap) {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
@@ -7745,6 +7879,8 @@ RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_
}
void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -7809,6 +7945,8 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l
}
void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -7983,6 +8121,8 @@ void RenderingDeviceVulkan::compute_list_set_push_constant(ComputeListID p_list,
}
void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -8126,6 +8266,8 @@ void RenderingDeviceVulkan::compute_list_dispatch_indirect(ComputeListID p_list,
}
void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
uint32_t barrier_flags = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
uint32_t access_flags = VK_ACCESS_SHADER_READ_BIT;
_compute_list_add_barrier(BARRIER_MASK_COMPUTE, barrier_flags, access_flags);
@@ -8199,10 +8341,14 @@ void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrie
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -8227,7 +8373,7 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri
src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_from.has_flag(BARRIER_MASK_RASTER)) {
+ if (p_from.has_flag(BARRIER_MASK_FRAGMENT)) {
src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
@@ -8247,10 +8393,14 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri
dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_to.has_flag(BARRIER_MASK_RASTER)) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ if (p_to.has_flag(BARRIER_MASK_VERTEX)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
+ if (p_to.has_flag(BARRIER_MASK_FRAGMENT)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
if (p_to.has_flag(BARRIER_MASK_TRANSFER)) {
dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -8606,6 +8756,8 @@ void RenderingDeviceVulkan::swap_buffers() {
}
void RenderingDeviceVulkan::submit() {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
@@ -8617,6 +8769,8 @@ void RenderingDeviceVulkan::submit() {
}
void RenderingDeviceVulkan::sync() {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit");