summaryrefslogtreecommitdiffstats
path: root/drivers/vulkan/rendering_device_vulkan.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan/rendering_device_vulkan.cpp')
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp130
1 files changed, 106 insertions, 24 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index 0832a07f51..cd18d84837 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -54,9 +54,13 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
@@ -68,8 +72,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.get_or_null(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
@@ -77,8 +84,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.get_or_null(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
@@ -89,8 +100,12 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
buffer = storage_buffer_owner.get_or_null(p_buffer);
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ r_stage_mask |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
@@ -2625,8 +2640,12 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -2874,6 +2893,29 @@ bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) {
return texture_owner.owns(p_texture);
}
+RD::TextureFormat RenderingDeviceVulkan::texture_get_format(RID p_texture) {
+ _THREAD_SAFE_METHOD_
+
+ Texture *tex = texture_owner.get_or_null(p_texture);
+ ERR_FAIL_COND_V(!tex, TextureFormat());
+
+ TextureFormat tf;
+
+ tf.format = tex->format;
+ tf.width = tex->width;
+ tf.height = tex->height;
+ tf.depth = tex->depth;
+ tf.array_layers = tex->layers;
+ tf.mipmaps = tex->mipmaps;
+ tf.texture_type = tex->type;
+ tf.samples = tex->samples;
+ tf.usage_bits = tex->usage_flags;
+ tf.shareable_formats = tex->allowed_shared_formats;
+ tf.is_resolve_buffer = tex->is_resolve_buffer;
+
+ return tf;
+}
+
Size2i RenderingDeviceVulkan::texture_size(RID p_texture) {
_THREAD_SAFE_METHOD_
@@ -3020,8 +3062,12 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -3198,8 +3244,12 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -3334,8 +3384,12 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
@@ -4633,7 +4687,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
"Number of uniform sets is larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
// Collect reflection data into binary data.
- RenderingDeviceVulkanShaderBinaryData binary_data;
+ RenderingDeviceVulkanShaderBinaryData binary_data{};
Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; // Set bindings.
Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants;
{
@@ -7470,6 +7524,7 @@ uint32_t RenderingDeviceVulkan::draw_list_get_current_pass() {
}
RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_switch_to_next_pass() {
+ _THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(draw_list == nullptr, INVALID_ID);
ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, INVALID_FORMAT_ID);
@@ -7485,6 +7540,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_switch_to_next_pass
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
Error RenderingDeviceVulkan::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
+ _THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(draw_list == nullptr, ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, ERR_INVALID_PARAMETER);
@@ -7649,10 +7705,14 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier)
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
}
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
+ }
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -7729,6 +7789,8 @@ void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier)
/***********************/
RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_allow_draw_overlap) {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
@@ -7743,6 +7805,8 @@ RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin(bool p_
}
void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -7807,6 +7871,8 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l
}
void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -7981,6 +8047,8 @@ void RenderingDeviceVulkan::compute_list_set_push_constant(ComputeListID p_list,
}
void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_COND(!compute_list);
@@ -8124,6 +8192,8 @@ void RenderingDeviceVulkan::compute_list_dispatch_indirect(ComputeListID p_list,
}
void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
+ // Must be called within a compute list, the class mutex is locked during that time
+
uint32_t barrier_flags = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
uint32_t access_flags = VK_ACCESS_SHADER_READ_BIT;
_compute_list_add_barrier(BARRIER_MASK_COMPUTE, barrier_flags, access_flags);
@@ -8197,10 +8267,14 @@ void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrie
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
- barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
+ if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
+ barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -8225,7 +8299,7 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri
src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_from.has_flag(BARRIER_MASK_RASTER)) {
+ if (p_from.has_flag(BARRIER_MASK_FRAGMENT)) {
src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
@@ -8245,10 +8319,14 @@ void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<Barri
dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_to.has_flag(BARRIER_MASK_RASTER)) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ if (p_to.has_flag(BARRIER_MASK_VERTEX)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
+ if (p_to.has_flag(BARRIER_MASK_FRAGMENT)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
if (p_to.has_flag(BARRIER_MASK_TRANSFER)) {
dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
@@ -8604,6 +8682,8 @@ void RenderingDeviceVulkan::swap_buffers() {
}
void RenderingDeviceVulkan::submit() {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
@@ -8615,6 +8695,8 @@ void RenderingDeviceVulkan::submit() {
}
void RenderingDeviceVulkan::sync() {
+ _THREAD_SAFE_METHOD_
+
ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit");