summaryrefslogtreecommitdiffstats
path: root/servers/rendering/rendering_device.cpp
diff options
context:
space:
mode:
authorDario <dariosamo@gmail.com>2023-11-24 08:23:22 -0300
committerDario <dariosamo@gmail.com>2024-01-08 14:54:56 -0300
commitcc4d39b0c1f0d4645d63120832a49e54242e3f5f (patch)
tree3f2b283ba5b5b2d5c7e383372e36ca245ec8c22a /servers/rendering/rendering_device.cpp
parent84e205b5a17bfe7ace96b78c410ec10aa52241d2 (diff)
downloadredot-engine-cc4d39b0c1f0d4645d63120832a49e54242e3f5f.tar.gz
Acyclic Command Graph for RenderingDevice.
Adds a new system to automatically reorder commands, perform layout transitions and insert synchronization barriers based on the commands issued to RenderingDevice.
Diffstat (limited to 'servers/rendering/rendering_device.cpp')
-rw-r--r--servers/rendering/rendering_device.cpp2686
1 files changed, 896 insertions, 1790 deletions
diff --git a/servers/rendering/rendering_device.cpp b/servers/rendering/rendering_device.cpp
index 9e7c32094b..03ffe967e2 100644
--- a/servers/rendering/rendering_device.cpp
+++ b/servers/rendering/rendering_device.cpp
@@ -37,7 +37,26 @@
#include "core/io/dir_access.h"
#include "servers/rendering/renderer_rd/api_context_rd.h"
-//#define FORCE_FULL_BARRIER
+// When true, the command graph will attempt to reorder the rendering commands submitted by the user based on the dependencies detected from
+// the commands automatically. This should improve rendering performance in most scenarios at the cost of some extra CPU overhead.
+//
+// This behavior can be disabled if it's suspected that the graph is not detecting dependencies correctly and more control over the order of
+// the commands is desired (e.g. debugging).
+
+#define RENDER_GRAPH_REORDER 1
+
+// Synchronization barriers are issued between the graph's levels only with the necessary amount of detail to achieve the correct result. If
+// it's suspected that the graph is not doing this correctly, full barriers can be issued instead that will block all types of operations
+// between the synchronization levels. This setting will have a very negative impact on performance when enabled, so it's only intended for
+// debugging purposes.
+
+#define RENDER_GRAPH_FULL_BARRIERS 0
+
+// The command graph can automatically issue secondary command buffers and record them on background threads when they reach an arbitrary
+// size threshold. This can be very beneficial towards reducing the time the main thread takes to record all the rendering commands. However,
+// this setting is not enabled by default as it's been shown to cause some strange issues with certain IHVs that have yet to be understood.
+
+#define SECONDARY_COMMAND_BUFFERS_PER_FRAME 0
RenderingDevice *RenderingDevice::singleton = nullptr;
@@ -131,127 +150,23 @@ RID RenderingDevice::shader_create_from_spirv(const Vector<ShaderStageSPIRVData>
return shader_create_from_bytecode(bytecode);
}
-/******************/
-/**** BARRIERS ****/
-/******************/
-
-void RenderingDevice::_full_barrier(bool p_sync_with_draw) {
- // Used for debug.
-
- RDD::MemoryBarrier mb;
- mb.src_access = (RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT |
- RDD::BARRIER_ACCESS_INDEX_READ_BIT |
- RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- RDD::BARRIER_ACCESS_UNIFORM_READ_BIT |
- RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_WRITE_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_READ_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT |
- RDD::BARRIER_ACCESS_HOST_READ_BIT |
- RDD::BARRIER_ACCESS_HOST_WRITE_BIT);
- mb.dst_access = (RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT |
- RDD::BARRIER_ACCESS_INDEX_READ_BIT |
- RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- RDD::BARRIER_ACCESS_UNIFORM_READ_BIT |
- RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_WRITE_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_READ_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT |
- RDD::BARRIER_ACCESS_HOST_READ_BIT |
- RDD::BARRIER_ACCESS_HOST_WRITE_BIT);
-
- RDD::CommandBufferID cmd_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
- driver->command_pipeline_barrier(cmd_buffer, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, mb, {}, {});
-}
-
/***************************/
/**** BUFFER MANAGEMENT ****/
/***************************/
-RenderingDevice::Buffer *RenderingDevice::_get_buffer_from_owner(RID p_buffer, BitField<RDD::PipelineStageBits> &r_stages, BitField<RDD::BarrierAccessBits> &r_access, BitField<BarrierMask> p_post_barrier) {
+RenderingDevice::Buffer *RenderingDevice::_get_buffer_from_owner(RID p_buffer) {
Buffer *buffer = nullptr;
- r_stages.clear();
- r_access.clear();
if (vertex_buffer_owner.owns(p_buffer)) {
buffer = vertex_buffer_owner.get_or_null(p_buffer);
-
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
- if (buffer->usage & RDD::BUFFER_USAGE_STORAGE_BIT) {
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- r_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- r_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- }
- }
} else if (index_buffer_owner.owns(p_buffer)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_INDEX_READ_BIT);
buffer = index_buffer_owner.get_or_null(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- }
- r_access.set_flag(RDD::BARRIER_ACCESS_UNIFORM_READ_BIT);
buffer = uniform_buffer_owner.get_or_null(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT);
- }
-
- // FIXME: Broken.
+ DEV_ASSERT(false && "FIXME: Broken.");
//buffer = texture_buffer_owner.get_or_null(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
buffer = storage_buffer_owner.get_or_null(p_buffer);
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
-
- if (buffer->usage.has_flag(RDD::BUFFER_USAGE_INDIRECT_BIT)) {
- r_stages.set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- r_access.set_flag(RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT);
- }
}
return buffer;
}
@@ -269,10 +184,11 @@ Error RenderingDevice::_insert_staging_block() {
return OK;
}
-Error RenderingDevice::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment) {
+Error RenderingDevice::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, StagingRequiredAction &r_required_action, bool p_can_segment) {
// Determine a block to use.
r_alloc_size = p_amount;
+ r_required_action = STAGING_REQUIRED_ACTION_NONE;
while (true) {
r_alloc_offset = 0;
@@ -324,23 +240,7 @@ Error RenderingDevice::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_re
// and this frame is not even done.
// If this is the main thread, it means the user is likely loading a lot of resources at once,.
// Otherwise, the thread should just be blocked until the next frame (currently unimplemented).
-
- if (false) { // Separate thread from render.
-
- //block_until_next_frame()
- continue;
- } else {
- // Flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands.
- _flush(true);
-
- // Clear the whole staging buffer.
- for (int i = 0; i < staging_buffer_blocks.size(); i++) {
- staging_buffer_blocks.write[i].frame_used = 0;
- staging_buffer_blocks.write[i].fill_amount = 0;
- }
- // Claim current.
- staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
- }
+ r_required_action = STAGING_REQUIRED_ACTION_FLUSH_CURRENT;
}
} else {
@@ -368,28 +268,7 @@ Error RenderingDevice::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_re
// Let's flush older frames.
// The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
// If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
-
- if (false) {
- // Separate thread from render.
- //block_until_next_frame()
- continue; // And try again.
- } else {
- _flush(false);
-
- for (int i = 0; i < staging_buffer_blocks.size(); i++) {
- // Clear all blocks but the ones from this frame.
- int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size();
- if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) {
- break; // Ok, we reached something from this frame, abort.
- }
-
- staging_buffer_blocks.write[block_idx].frame_used = 0;
- staging_buffer_blocks.write[block_idx].fill_amount = 0;
- }
-
- // Claim for current frame.
- staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
- }
+ r_required_action = STAGING_REQUIRED_ACTION_FLUSH_OLDER;
}
}
@@ -402,20 +281,78 @@ Error RenderingDevice::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_re
return OK;
}
-Error RenderingDevice::_buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer, uint32_t p_required_align) {
+void RenderingDevice::_staging_buffer_execute_required_action(StagingRequiredAction p_required_action) {
+ switch (p_required_action) {
+ case STAGING_REQUIRED_ACTION_NONE: {
+ // Do nothing.
+ } break;
+ case STAGING_REQUIRED_ACTION_FLUSH_CURRENT: {
+ // Flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands.
+ _flush(true);
+
+ // Clear the whole staging buffer.
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ staging_buffer_blocks.write[i].frame_used = 0;
+ staging_buffer_blocks.write[i].fill_amount = 0;
+ }
+
+ // Claim for current frame.
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } break;
+ case STAGING_REQUIRED_ACTION_FLUSH_OLDER: {
+ _flush(false);
+
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ // Clear all blocks but the ones from this frame.
+ int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size();
+ if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) {
+ break; // Ok, we reached something from this frame, abort.
+ }
+
+ staging_buffer_blocks.write[block_idx].frame_used = 0;
+ staging_buffer_blocks.write[block_idx].fill_amount = 0;
+ }
+
+ // Claim for current frame.
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } break;
+ default: {
+ DEV_ASSERT(false && "Unknown required action.");
+ } break;
+ }
+}
+
+Error RenderingDevice::_buffer_update(Buffer *p_buffer, RID p_buffer_id, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_queue, uint32_t p_required_align) {
// Submitting may get chunked for various reasons, so convert this to a task.
size_t to_submit = p_data_size;
size_t submit_from = 0;
+ thread_local LocalVector<RDG::RecordedBufferCopy> command_buffer_copies_vector;
+ command_buffer_copies_vector.clear();
+
while (to_submit > 0) {
uint32_t block_write_offset;
uint32_t block_write_amount;
+ StagingRequiredAction required_action;
- Error err = _staging_buffer_allocate(MIN(to_submit, staging_buffer_block_size), p_required_align, block_write_offset, block_write_amount);
+ Error err = _staging_buffer_allocate(MIN(to_submit, staging_buffer_block_size), p_required_align, block_write_offset, block_write_amount, required_action);
if (err) {
return err;
}
+ if (p_use_draw_queue && !command_buffer_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_CURRENT) {
+ if (_buffer_make_mutable(p_buffer, p_buffer_id)) {
+ // The buffer must be mutable to be used as a copy destination.
+ draw_graph.add_synchronization();
+ }
+
+ // If we're using the draw queue and the staging buffer requires flushing everything, we submit the command early and clear the current vector.
+ draw_graph.add_buffer_update(p_buffer->driver_id, p_buffer->draw_tracker, command_buffer_copies_vector);
+ command_buffer_copies_vector.clear();
+ }
+
+ _staging_buffer_execute_required_action(required_action);
+
// Map staging buffer (It's CPU and coherent).
uint8_t *data_ptr = driver->buffer_map(staging_buffer_blocks[staging_buffer_current].driver_id);
ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
@@ -427,12 +364,19 @@ Error RenderingDevice::_buffer_update(Buffer *p_buffer, size_t p_offset, const u
driver->buffer_unmap(staging_buffer_blocks[staging_buffer_current].driver_id);
// Insert a command to copy this.
-
RDD::BufferCopyRegion region;
region.src_offset = block_write_offset;
region.dst_offset = submit_from + p_offset;
region.size = block_write_amount;
- driver->command_copy_buffer(p_use_draw_command_buffer ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, staging_buffer_blocks[staging_buffer_current].driver_id, p_buffer->driver_id, region);
+
+ if (p_use_draw_queue) {
+ RDG::RecordedBufferCopy buffer_copy;
+ buffer_copy.source = staging_buffer_blocks[staging_buffer_current].driver_id;
+ buffer_copy.region = region;
+ command_buffer_copies_vector.push_back(buffer_copy);
+ } else {
+ driver->command_copy_buffer(frames[frame].setup_command_buffer, staging_buffer_blocks[staging_buffer_current].driver_id, p_buffer->driver_id, region);
+ }
staging_buffer_blocks.write[staging_buffer_current].fill_amount = block_write_offset + block_write_amount;
@@ -440,10 +384,19 @@ Error RenderingDevice::_buffer_update(Buffer *p_buffer, size_t p_offset, const u
submit_from += block_write_amount;
}
+ if (p_use_draw_queue && !command_buffer_copies_vector.is_empty()) {
+ if (_buffer_make_mutable(p_buffer, p_buffer_id)) {
+ // The buffer must be mutable to be used as a copy destination.
+ draw_graph.add_synchronization();
+ }
+
+ draw_graph.add_buffer_update(p_buffer->driver_id, p_buffer->draw_tracker, command_buffer_copies_vector);
+ }
+
return OK;
}
-Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
@@ -451,25 +404,12 @@ Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t
ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
"Copying buffers is forbidden during creation of a compute list");
- // This method assumes the barriers have been pushed prior to being called, therefore no barriers are pushed
- // for the source or destination buffers before performing the copy. These masks are effectively ignored.
- BitField<RDD::PipelineStageBits> src_stages;
- BitField<RDD::BarrierAccessBits> src_access;
- Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer, src_stages, src_access, BARRIER_MASK_NO_BARRIER);
+ Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer);
if (!src_buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Source buffer argument is not a valid buffer of any type.");
}
- BitField<RDD::PipelineStageBits> dst_stages;
- BitField<RDD::BarrierAccessBits> dst_access;
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- // If the post barrier mask defines it, we indicate the destination buffer will require a barrier with these flags set
- // after the copy command is queued.
- dst_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer, dst_stages, dst_access, p_post_barrier);
+ Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer);
if (!dst_buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Destination buffer argument is not a valid buffer of any type.");
}
@@ -483,31 +423,18 @@ Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t
region.src_offset = p_src_offset;
region.dst_offset = p_dst_offset;
region.size = p_size;
- driver->command_copy_buffer(frames[frame].draw_command_buffer, src_buffer->driver_id, dst_buffer->driver_id, region);
-#ifdef FORCE_FULL_BARRIER
- _full_barrier(true);
-#else
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS) && p_post_barrier != RD::BARRIER_MASK_NO_BARRIER) {
- if (dst_stages.is_empty()) {
- dst_stages = RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
- }
-
- // As indicated by the post barrier mask, push a new barrier.
- RDD::BufferBarrier bb;
- bb.buffer = dst_buffer->driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = dst_access;
- bb.offset = p_dst_offset;
- bb.size = p_size;
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, dst_stages, {}, bb, {});
+ if (_buffer_make_mutable(dst_buffer, p_dst_buffer)) {
+ // The destination buffer must be mutable to be used as a copy destination.
+ draw_graph.add_synchronization();
}
-#endif
+
+ draw_graph.add_buffer_copy(src_buffer->driver_id, src_buffer->draw_tracker, dst_buffer->driver_id, dst_buffer->draw_tracker, region);
return OK;
}
-Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
@@ -515,14 +442,7 @@ Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p
ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
"Updating buffers is forbidden during creation of a compute list");
- BitField<RDD::PipelineStageBits> dst_stages;
- BitField<RDD::BarrierAccessBits> dst_access;
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- // Protect subsequent updates.
- dst_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
- Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stages, dst_access, p_post_barrier);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer);
if (!buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
}
@@ -530,33 +450,10 @@ Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
- Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, true);
- if (err) {
- return err;
- }
-
-#ifdef FORCE_FULL_BARRIER
- _full_barrier(true);
-#else
- if (dst_stages.is_empty()) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS) && p_post_barrier != RD::BARRIER_MASK_NO_BARRIER) {
- RDD::BufferBarrier bb;
- bb.buffer = buffer->driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = dst_access;
- bb.offset = p_offset;
- bb.size = p_size;
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, dst_stages, {}, bb, {});
- }
-
-#endif
- return err;
+ return _buffer_update(buffer, p_buffer, p_offset, (uint8_t *)p_data, p_size, true);
}
-Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
@@ -566,15 +463,7 @@ Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_
ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
"Updating buffers is forbidden during creation of a compute list");
- BitField<RDD::PipelineStageBits> dst_stages;
- BitField<RDD::BarrierAccessBits> dst_access;
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- // Protect subsequent updates.
- dst_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stages, dst_access, p_post_barrier);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer);
if (!buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
}
@@ -582,51 +471,24 @@ Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
- driver->command_clear_buffer(frames[frame].draw_command_buffer, buffer->driver_id, p_offset, p_size);
-
-#ifdef FORCE_FULL_BARRIER
- _full_barrier(true);
-#else
- if (dst_stages.is_empty()) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
+ if (_buffer_make_mutable(buffer, p_buffer)) {
+ // The destination buffer must be mutable to be used as a clear destination.
+ draw_graph.add_synchronization();
}
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = buffer->driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = dst_access;
- bb.offset = p_offset;
- bb.size = p_size;
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, dst_stages, {}, bb, {});
- }
+ draw_graph.add_buffer_clear(buffer->driver_id, buffer->draw_tracker, p_offset, p_size);
-#endif
return OK;
}
Vector<uint8_t> RenderingDevice::buffer_get_data(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
_THREAD_SAFE_METHOD_
- // It could be this buffer was just created.
- BitField<RDD::PipelineStageBits> src_stages = RDD::PIPELINE_STAGE_TRANSFER_BIT;
- BitField<RDD::BarrierAccessBits> src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- // Get the vulkan buffer and the potential stage/access possible.
- Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stages, src_access, BARRIER_MASK_ALL_BARRIERS);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer);
if (!buffer) {
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
}
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- // Make sure no one is using the buffer -- the "true" gets us to the same command buffer as below.
- RDD::BufferBarrier bb;
- bb.buffer = buffer->driver_id;
- bb.src_access = src_access;
- bb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- bb.size = buffer->size;
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, src_stages, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, bb, {});
- }
-
// Size of buffer to retrieve.
if (!p_size) {
p_size = buffer->size;
@@ -641,7 +503,9 @@ Vector<uint8_t> RenderingDevice::buffer_get_data(RID p_buffer, uint32_t p_offset
RDD::BufferCopyRegion region;
region.src_offset = p_offset;
region.size = p_size;
- driver->command_copy_buffer(frames[frame].draw_command_buffer, buffer->driver_id, tmp_buffer, region);
+
+ draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, tmp_buffer, region);
+
// Flush everything so memory can be safely mapped.
_flush(true);
@@ -676,23 +540,21 @@ RID RenderingDevice::storage_buffer_create(uint32_t p_size_bytes, const Vector<u
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
ERR_FAIL_COND_V(!buffer.driver_id, RID());
+ // Storage buffers are assumed to be mutable.
+ buffer.draw_tracker = RDG::resource_tracker_create();
+ buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
+
if (p_data.size()) {
- uint64_t data_size = p_data.size();
- const uint8_t *r = p_data.ptr();
- _buffer_update(&buffer, 0, r, data_size);
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = buffer.driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- bb.size = data_size;
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, {}, bb, {});
- }
+ _buffer_update(&buffer, RID(), 0, p_data.ptr(), p_data.size());
}
buffer_memory += buffer.size;
- return storage_buffer_owner.make_rid(buffer);
+ RID id = storage_buffer_owner.make_rid(buffer);
+#ifdef DEV_ENABLED
+ set_resource_name(id, "RID:" + itos(id.get_id()));
+#endif
+ return id;
}
RID RenderingDevice::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
@@ -710,6 +572,12 @@ RID RenderingDevice::texture_buffer_create(uint32_t p_size_elements, DataFormat
texture_buffer.driver_id = driver->buffer_create(size_bytes, usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
ERR_FAIL_COND_V(!texture_buffer.driver_id, RID());
+ // Texture buffers are assumed to be immutable unless they don't have initial data.
+ if (p_data.is_empty()) {
+ texture_buffer.draw_tracker = RDG::resource_tracker_create();
+ texture_buffer.draw_tracker->buffer_driver_id = texture_buffer.driver_id;
+ }
+
bool ok = driver->buffer_set_texel_format(texture_buffer.driver_id, p_format);
if (!ok) {
driver->buffer_free(texture_buffer.driver_id);
@@ -717,15 +585,7 @@ RID RenderingDevice::texture_buffer_create(uint32_t p_size_elements, DataFormat
}
if (p_data.size()) {
- _buffer_update(&texture_buffer, 0, p_data.ptr(), p_data.size());
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = texture_buffer.driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT;
- bb.size = size_bytes;
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, (RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT), {}, bb, {});
- }
+ _buffer_update(&texture_buffer, RID(), 0, p_data.ptr(), p_data.size());
}
buffer_memory += size_bytes;
@@ -787,10 +647,8 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
ERR_FAIL_COND_V_MSG(required_mipmaps < format.mipmaps, RID(),
"Too many mipmaps requested for texture format and dimensions (" + itos(format.mipmaps) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
+ uint32_t forced_usage_bits = 0;
if (p_data.size()) {
- ERR_FAIL_COND_V_MSG(!(format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT), RID(),
- "Texture needs the TEXTURE_USAGE_CAN_UPDATE_BIT usage flag in order to be updated at initialization or later");
-
ERR_FAIL_COND_V_MSG(p_data.size() != (int)format.array_layers, RID(),
"Default supplied data for image format is of invalid length (" + itos(p_data.size()) + "), should be (" + itos(format.array_layers) + ").");
@@ -799,6 +657,10 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
ERR_FAIL_COND_V_MSG((uint32_t)p_data[i].size() != required_size, RID(),
"Data for slice index " + itos(i) + " (mapped to layer " + itos(i) + ") differs in size (supplied: " + itos(p_data[i].size()) + ") than what is required by the format (" + itos(required_size) + ").");
}
+
+ if (!(format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT)) {
+ forced_usage_bits = TEXTURE_USAGE_CAN_UPDATE_BIT;
+ }
}
{
@@ -849,7 +711,7 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
// Create.
Texture texture;
-
+ format.usage_bits |= forced_usage_bits;
texture.driver_id = driver->texture_create(format, tv);
ERR_FAIL_COND_V(!texture.driver_id, RID());
texture.type = format.texture_type;
@@ -862,26 +724,10 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
texture.base_mipmap = 0;
texture.base_layer = 0;
texture.is_resolve_buffer = format.is_resolve_buffer;
- texture.usage_flags = format.usage_bits;
+ texture.usage_flags = format.usage_bits & ~forced_usage_bits;
texture.samples = format.samples;
texture.allowed_shared_formats = format.shareable_formats;
-
- // Set base layout based on usage priority.
-
- if ((format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT)) {
- // First priority, readable.
- texture.layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- } else if ((format.usage_bits & TEXTURE_USAGE_STORAGE_BIT)) {
- // Second priority, storage.
- texture.layout = RDD::TEXTURE_LAYOUT_GENERAL;
- } else if ((format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- // Third priority, color or depth.
- texture.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- } else if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- texture.layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- } else {
- texture.layout = RDD::TEXTURE_LAYOUT_GENERAL;
- }
+ texture.has_initial_data = !p_data.is_empty();
if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
@@ -896,18 +742,10 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
texture.bound = false;
- // Barrier to set layout.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier tb;
- tb.texture = texture.driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
- tb.next_layout = texture.layout;
- tb.subresources.aspect = texture.barrier_aspect_flags;
- tb.subresources.mipmap_count = format.mipmaps;
- tb.subresources.layer_count = format.array_layers;
-
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, {}, {}, tb);
+ // Textures are only assumed to be immutable if they have initial data and none of the other bits that indicate write usage are enabled.
+ bool texture_mutable_by_default = texture.usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_STORAGE_BIT | TEXTURE_USAGE_STORAGE_ATOMIC_BIT | TEXTURE_USAGE_VRS_ATTACHMENT_BIT);
+ if (p_data.is_empty() || texture_mutable_by_default) {
+ _texture_make_mutable(&texture, RID());
}
texture_memory += driver->texture_get_allocation_size(texture.driver_id);
@@ -919,9 +757,15 @@ RID RenderingDevice::texture_create(const TextureFormat &p_format, const Texture
if (p_data.size()) {
for (uint32_t i = 0; i < p_format.array_layers; i++) {
- _texture_update(id, i, p_data[i], BARRIER_MASK_ALL_BARRIERS, true);
+ _texture_update(id, i, p_data[i], true, false);
+ }
+
+ if (texture.draw_tracker != nullptr) {
+ // Draw tracker can assume the texture will be in transfer destination.
+ texture.draw_tracker->usage = RDG::RESOURCE_USAGE_TRANSFER_TO;
}
}
+
return id;
}
@@ -959,6 +803,12 @@ RID RenderingDevice::texture_create_shared(const TextureView &p_view, RID p_with
texture.driver_id = driver->texture_create_shared(texture.driver_id, tv);
ERR_FAIL_COND_V(!texture.driver_id, RID());
+ texture.slice_trackers.clear();
+
+ if (texture.draw_tracker != nullptr) {
+ texture.draw_tracker->reference_count++;
+ }
+
texture.owner = p_with_texture;
RID id = texture_owner.make_rid(texture);
#ifdef DEV_ENABLED
@@ -988,23 +838,6 @@ RID RenderingDevice::texture_create_from_extension(TextureType p_type, DataForma
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_UNORM);
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_SRGB);
- // Set base layout based on usage priority.
-
- if (p_usage.has_flag(TEXTURE_USAGE_SAMPLING_BIT)) {
- // First priority, readable.
- texture.layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- } else if (p_usage.has_flag(TEXTURE_USAGE_STORAGE_BIT)) {
- // Second priority, storage.
- texture.layout = RDD::TEXTURE_LAYOUT_GENERAL;
- } else if (p_usage.has_flag(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- // Third priority, color or depth.
- texture.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- } else if (p_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- texture.layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- } else {
- texture.layout = RDD::TEXTURE_LAYOUT_GENERAL;
- }
-
if (p_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
@@ -1019,19 +852,7 @@ RID RenderingDevice::texture_create_from_extension(TextureType p_type, DataForma
texture.driver_id = driver->texture_create_from_extension(p_image, p_type, p_format, p_layers, (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT));
ERR_FAIL_COND_V(!texture.driver_id, RID());
- // Barrier to set layout.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier tb;
- tb.texture = texture.driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
- tb.next_layout = texture.layout;
- tb.subresources.aspect = texture.barrier_aspect_flags;
- tb.subresources.mipmap_count = texture.mipmaps;
- tb.subresources.layer_count = texture.layers;
-
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT, {}, {}, tb);
- }
+ _texture_make_mutable(&texture, RID());
RID id = texture_owner.make_rid(texture);
#ifdef DEV_ENABLED
@@ -1081,6 +902,7 @@ RID RenderingDevice::texture_create_shared_from_slice(const TextureView &p_view,
}
Texture texture = *src_texture;
+
get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
texture.mipmaps = p_mipmaps;
texture.layers = slice_layers;
@@ -1118,7 +940,17 @@ RID RenderingDevice::texture_create_shared_from_slice(const TextureView &p_view,
texture.driver_id = driver->texture_create_shared_from_slice(src_texture->driver_id, tv, p_slice_type, p_layer, slice_layers, p_mipmap, p_mipmaps);
ERR_FAIL_COND_V(!texture.driver_id, RID());
+ const Rect2i slice_rect(p_mipmap, p_layer, p_mipmaps, slice_layers);
texture.owner = p_with_texture;
+ texture.slice_type = p_slice_type;
+ texture.slice_rect = slice_rect;
+
+ // If parent is mutable, make slice mutable by default.
+ if (src_texture->draw_tracker != nullptr) {
+ texture.draw_tracker = nullptr;
+ _texture_make_mutable(&texture, RID());
+ }
+
RID id = texture_owner.make_rid(texture);
#ifdef DEV_ENABLED
set_resource_name(id, "RID:" + itos(id.get_id()));
@@ -1128,8 +960,8 @@ RID RenderingDevice::texture_create_shared_from_slice(const TextureView &p_view,
return id;
}
-Error RenderingDevice::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) {
- return _texture_update(p_texture, p_layer, p_data, p_post_barrier, false);
+Error RenderingDevice::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data) {
+ return _texture_update(p_texture, p_layer, p_data, false, true);
}
static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_t *__restrict p_dst, uint32_t p_src_x, uint32_t p_src_y, uint32_t p_src_w, uint32_t p_src_h, uint32_t p_src_full_w, uint32_t p_dst_pitch, uint32_t p_unit_size) {
@@ -1148,7 +980,7 @@ static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_
}
}
-Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue) {
+Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_use_setup_queue, bool p_validate_can_update) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((draw_list || compute_list) && !p_use_setup_queue, ERR_INVALID_PARAMETER,
@@ -1166,7 +998,7 @@ Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Ve
ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
"Texture can't be updated while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to update this texture.");
- ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(p_validate_can_update && !(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER,
"Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_UPDATE_BIT` to be set to be updatable.");
uint32_t layer_count = texture->layers;
@@ -1191,21 +1023,22 @@ Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Ve
const uint8_t *r = p_data.ptr();
- RDD::CommandBufferID command_buffer = p_use_setup_queue ? frames[frame].setup_command_buffer : frames[frame].draw_command_buffer;
+ thread_local LocalVector<RDG::RecordedBufferToTextureCopy> command_buffer_to_texture_copies_vector;
+ command_buffer_to_texture_copies_vector.clear();
- // Barrier to transfer.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
+ if (p_use_setup_queue && driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
+ // When using the setup queue directly, we transition the texture to the optimal layout.
RDD::TextureBarrier tb;
tb.texture = texture->driver_id;
tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.prev_layout = texture->layout;
+ tb.prev_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
tb.subresources.aspect = texture->barrier_aspect_flags;
tb.subresources.mipmap_count = texture->mipmaps;
tb.subresources.base_layer = p_layer;
tb.subresources.layer_count = 1;
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
+ driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
}
uint32_t mipmap_offset = 0;
@@ -1240,12 +1073,26 @@ Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Ve
uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
region_pitch = STEPIFY(region_pitch, pitch_step);
uint32_t to_allocate = region_pitch * region_h;
-
uint32_t alloc_offset = 0, alloc_size = 0;
- Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false);
+ StagingRequiredAction required_action;
+ Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, required_action, false);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- uint8_t *write_ptr = nullptr;
+ if (!p_use_setup_queue && !command_buffer_to_texture_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_CURRENT) {
+ if (_texture_make_mutable(texture, p_texture)) {
+ // The texture must be mutable to be used as a copy destination.
+ draw_graph.add_synchronization();
+ }
+
+ // If we're using the draw queue and the staging buffer requires flushing everything, we submit the command early and clear the current vector.
+ draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
+ command_buffer_to_texture_copies_vector.clear();
+ }
+
+ _staging_buffer_execute_required_action(required_action);
+
+ uint8_t *write_ptr;
+
{ // Map.
uint8_t *data_ptr = driver->buffer_map(staging_buffer_blocks[staging_buffer_current].driver_id);
ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
@@ -1288,7 +1135,14 @@ Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Ve
copy_region.texture_offset = Vector3i(x, y, z);
copy_region.texture_region_size = Vector3i(region_logic_w, region_logic_h, 1);
- driver->command_copy_buffer_to_texture(command_buffer, staging_buffer_blocks[staging_buffer_current].driver_id, texture->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL, copy_region);
+ if (p_use_setup_queue) {
+ driver->command_copy_buffer_to_texture(frames[frame].setup_command_buffer, staging_buffer_blocks[staging_buffer_current].driver_id, texture->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL, copy_region);
+ } else {
+ RDG::RecordedBufferToTextureCopy buffer_to_texture_copy;
+ buffer_to_texture_copy.from_buffer = staging_buffer_blocks[staging_buffer_current].driver_id;
+ buffer_to_texture_copy.region = copy_region;
+ command_buffer_to_texture_copies_vector.push_back(buffer_to_texture_copy);
+ }
staging_buffer_blocks.write[staging_buffer_current].fill_amount = alloc_offset + alloc_size;
}
@@ -1300,50 +1154,25 @@ Error RenderingDevice::_texture_update(RID p_texture, uint32_t p_layer, const Ve
logic_height = MAX(1u, logic_height >> 1);
}
- // Barrier to restore layout.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- BitField<RDD::PipelineStageBits> stages;
- BitField<RDD::BarrierAccessBits> access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- if (stages.is_empty()) {
- stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
+ if (p_use_setup_queue && (texture->draw_tracker == nullptr) && driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
+ // If the texture does not have a tracker, it means it must be transitioned to the sampling state.
RDD::TextureBarrier tb;
tb.texture = texture->driver_id;
tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.dst_access = access;
tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
- tb.next_layout = texture->layout;
+ tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
tb.subresources.aspect = texture->barrier_aspect_flags;
tb.subresources.mipmap_count = texture->mipmaps;
tb.subresources.base_layer = p_layer;
tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, stages, {}, {}, tb);
-
- if (texture->used_in_frame != frames_drawn) {
- texture->used_in_raster = false;
- texture->used_in_compute = false;
- texture->used_in_frame = frames_drawn;
+ driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, {}, {}, tb);
+ } else if (!p_use_setup_queue && !command_buffer_to_texture_copies_vector.is_empty()) {
+ if (_texture_make_mutable(texture, p_texture)) {
+ // The texture must be mutable to be used as a copy destination.
+ draw_graph.add_synchronization();
}
- texture->used_in_transfer = true;
+
+ draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
}
return OK;
@@ -1455,103 +1284,71 @@ Vector<uint8_t> RenderingDevice::texture_get_data(RID p_texture, uint32_t p_laye
RDD::BufferID tmp_buffer = driver->buffer_create(work_buffer_size, RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
ERR_FAIL_COND_V(!tmp_buffer, Vector<uint8_t>());
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer; // Makes more sense to retrieve.
-
- // Pre-copy barrier.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier tb;
- tb.texture = tex->driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.prev_layout = tex->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.subresources.aspect = tex->barrier_aspect_flags;
- tb.subresources.mipmap_count = tex->mipmaps;
- tb.subresources.base_layer = p_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
+ thread_local LocalVector<RDD::BufferTextureCopyRegion> command_buffer_texture_copy_regions_vector;
+ command_buffer_texture_copy_regions_vector.clear();
- {
- uint32_t w = tex->width;
- uint32_t h = tex->height;
- uint32_t d = tex->depth;
- for (uint32_t i = 0; i < tex->mipmaps; i++) {
- RDD::BufferTextureCopyRegion copy_region;
- copy_region.buffer_offset = mip_layouts[i].offset;
- copy_region.texture_subresources.aspect = tex->read_aspect_flags;
- copy_region.texture_subresources.mipmap = i;
- copy_region.texture_subresources.base_layer = p_layer;
- copy_region.texture_subresources.layer_count = 1;
- copy_region.texture_region_size.x = w;
- copy_region.texture_region_size.y = h;
- copy_region.texture_region_size.z = d;
- driver->command_copy_texture_to_buffer(command_buffer, tex->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL, tmp_buffer, copy_region);
-
- w = MAX(1u, w >> 1);
- h = MAX(1u, h >> 1);
- d = MAX(1u, d >> 1);
- }
+ uint32_t w = tex->width;
+ uint32_t h = tex->height;
+ uint32_t d = tex->depth;
+ for (uint32_t i = 0; i < tex->mipmaps; i++) {
+ RDD::BufferTextureCopyRegion copy_region;
+ copy_region.buffer_offset = mip_layouts[i].offset;
+ copy_region.texture_subresources.aspect = tex->read_aspect_flags;
+ copy_region.texture_subresources.mipmap = i;
+ copy_region.texture_subresources.base_layer = p_layer;
+ copy_region.texture_subresources.layer_count = 1;
+ copy_region.texture_region_size.x = w;
+ copy_region.texture_region_size.y = h;
+ copy_region.texture_region_size.z = d;
+ command_buffer_texture_copy_regions_vector.push_back(copy_region);
+
+ w = MAX(1u, w >> 1);
+ h = MAX(1u, h >> 1);
+ d = MAX(1u, d >> 1);
}
- // Post-copy barrier.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier tb;
- tb.texture = tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.dst_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT;
- if ((tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT)) {
- tb.dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.next_layout = tex->layout;
- tb.subresources.aspect = tex->barrier_aspect_flags;
- tb.subresources.mipmap_count = tex->mipmaps;
- tb.subresources.base_layer = p_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, {}, {}, tb);
+ if (_texture_make_mutable(tex, p_texture)) {
+ // The texture must be mutable to be used as a copy source due to layout transitions.
+ draw_graph.add_synchronization();
}
+ draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, tmp_buffer, command_buffer_texture_copy_regions_vector);
+
_flush(true);
const uint8_t *read_ptr = driver->buffer_map(tmp_buffer);
ERR_FAIL_NULL_V(read_ptr, Vector<uint8_t>());
Vector<uint8_t> buffer_data;
- {
- uint32_t tight_buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps);
- buffer_data.resize(tight_buffer_size);
-
- uint8_t *write_ptr = buffer_data.ptrw();
-
- uint32_t w = tex->width;
- uint32_t h = tex->height;
- uint32_t d = tex->depth;
- for (uint32_t i = 0; i < tex->mipmaps; i++) {
- uint32_t width = 0, height = 0, depth = 0;
- uint32_t tight_mip_size = get_image_format_required_size(tex->format, w, h, d, 1, &width, &height, &depth);
- uint32_t block_w = 0, block_h = 0;
- get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
- uint32_t tight_row_pitch = tight_mip_size / ((height / block_h) * depth);
-
- {
- // Copy row-by-row to erase padding due to alignments.
- const uint8_t *rp = read_ptr;
- uint8_t *wp = write_ptr;
- for (uint32_t row = h * d / block_h; row != 0; row--) {
- memcpy(wp, rp, tight_row_pitch);
- rp += mip_layouts[i].row_pitch;
- wp += tight_row_pitch;
- }
- }
+ uint32_t tight_buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps);
+ buffer_data.resize(tight_buffer_size);
+
+ uint8_t *write_ptr = buffer_data.ptrw();
- w = MAX(1u, w >> 1);
- h = MAX(1u, h >> 1);
- d = MAX(1u, d >> 1);
- read_ptr += mip_layouts[i].size;
- write_ptr += tight_mip_size;
+ w = tex->width;
+ h = tex->height;
+ d = tex->depth;
+ for (uint32_t i = 0; i < tex->mipmaps; i++) {
+ uint32_t width = 0, height = 0, depth = 0;
+ uint32_t tight_mip_size = get_image_format_required_size(tex->format, w, h, d, 1, &width, &height, &depth);
+ uint32_t block_w = 0, block_h = 0;
+ get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
+ uint32_t tight_row_pitch = tight_mip_size / ((height / block_h) * depth);
+
+ // Copy row-by-row to erase padding due to alignments.
+ const uint8_t *rp = read_ptr;
+ uint8_t *wp = write_ptr;
+ for (uint32_t row = h * d / block_h; row != 0; row--) {
+ memcpy(wp, rp, tight_row_pitch);
+ rp += mip_layouts[i].row_pitch;
+ wp += tight_row_pitch;
}
+
+ w = MAX(1u, w >> 1);
+ h = MAX(1u, h >> 1);
+ d = MAX(1u, d >> 1);
+ read_ptr += mip_layouts[i].size;
+ write_ptr += tight_mip_size;
}
driver->buffer_unmap(tmp_buffer);
@@ -1610,7 +1407,7 @@ uint64_t RenderingDevice::texture_get_native_handle(RID p_texture) {
}
#endif
-Error RenderingDevice::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -1658,133 +1455,34 @@ Error RenderingDevice::texture_copy(RID p_from_texture, RID p_to_texture, const
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
"Source and destination texture must be of the same type (color or depth).");
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer;
+ RDD::TextureCopyRegion copy_region;
+ copy_region.src_subresources.aspect = src_tex->read_aspect_flags;
+ copy_region.src_subresources.mipmap = p_src_mipmap;
+ copy_region.src_subresources.base_layer = p_src_layer;
+ copy_region.src_subresources.layer_count = 1;
+ copy_region.src_offset = p_from;
- // PRE Copy the image.
+ copy_region.dst_subresources.aspect = dst_tex->read_aspect_flags;
+ copy_region.dst_subresources.mipmap = p_dst_mipmap;
+ copy_region.dst_subresources.base_layer = p_dst_layer;
+ copy_region.dst_subresources.layer_count = 1;
+ copy_region.dst_offset = p_to;
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- { // Source.
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.prev_layout = src_tex->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.subresources.aspect = src_tex->barrier_aspect_flags;
- tb.subresources.base_mipmap = p_src_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = p_src_layer;
- tb.subresources.layer_count = 1;
+ copy_region.size = p_size;
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
- { // Dest.
- RDD::TextureBarrier tb;
- tb.texture = dst_tex->driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.prev_layout = dst_tex->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
- tb.subresources.aspect = dst_tex->read_aspect_flags;
- tb.subresources.base_mipmap = p_dst_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = p_dst_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
+ // The textures must be mutable to be used in the copy operation.
+ bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
+ bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
+ if (src_made_mutable || dst_made_mutable) {
+ draw_graph.add_synchronization();
}
- // COPY.
-
- {
- RDD::TextureCopyRegion copy_region;
- copy_region.src_subresources.aspect = src_tex->read_aspect_flags;
- copy_region.src_subresources.mipmap = p_src_mipmap;
- copy_region.src_subresources.base_layer = p_src_layer;
- copy_region.src_subresources.layer_count = 1;
- copy_region.src_offset = p_from;
-
- copy_region.dst_subresources.aspect = dst_tex->read_aspect_flags;
- copy_region.dst_subresources.mipmap = p_dst_mipmap;
- copy_region.dst_subresources.base_layer = p_dst_layer;
- copy_region.dst_subresources.layer_count = 1;
- copy_region.dst_offset = p_to;
-
- copy_region.size = p_size;
-
- driver->command_copy_texture(command_buffer, src_tex->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_tex->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL, copy_region);
- }
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- // RESTORE LAYOUT for SRC and DST.
-
- BitField<RDD::PipelineStageBits> stages;
- BitField<RDD::BarrierAccessBits> access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- if (stages.is_empty()) {
- stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
- { // Restore src.
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.dst_access = access;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.next_layout = src_tex->layout;
- tb.subresources.aspect = src_tex->barrier_aspect_flags;
- tb.subresources.base_mipmap = p_src_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = p_src_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, stages, {}, {}, tb);
- }
-
- { // Make dst readable.
-
- RDD::TextureBarrier tb;
- tb.texture = dst_tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.dst_access = access;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
- tb.next_layout = dst_tex->layout;
- tb.subresources.aspect = dst_tex->read_aspect_flags;
- tb.subresources.base_mipmap = p_dst_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = p_dst_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, stages, {}, {}, tb);
- }
-
- if (dst_tex->used_in_frame != frames_drawn) {
- dst_tex->used_in_raster = false;
- dst_tex->used_in_compute = false;
- dst_tex->used_in_frame = frames_drawn;
- }
- dst_tex->used_in_transfer = true;
- }
+ draw_graph.add_texture_copy(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, copy_region);
return OK;
}
-Error RenderingDevice::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::texture_resolve_multisample(RID p_from_texture, RID p_to_texture) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -1815,108 +1513,19 @@ Error RenderingDevice::texture_resolve_multisample(RID p_from_texture, RID p_to_
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
"Source and destination texture must be of the same type (color or depth).");
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer;
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- // PRE Copy the image.
-
- { // Source.
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.prev_layout = src_tex->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.subresources.aspect = src_tex->barrier_aspect_flags;
- tb.subresources.base_mipmap = src_tex->base_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = src_tex->base_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
- { // Dest.
- RDD::TextureBarrier tb;
- tb.texture = dst_tex->driver_id;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.prev_layout = dst_tex->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
- tb.subresources.aspect = dst_tex->barrier_aspect_flags;
- tb.subresources.base_mipmap = dst_tex->base_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = dst_tex->base_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
+ // The textures must be mutable to be used in the resolve operation.
+ bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
+ bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
+ if (src_made_mutable || dst_made_mutable) {
+ draw_graph.add_synchronization();
}
- // RESOLVE.
- driver->command_resolve_texture(command_buffer, src_tex->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL, src_tex->base_layer, src_tex->base_mipmap, dst_tex->driver_id, RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_tex->base_layer, dst_tex->base_mipmap);
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- // RESTORE LAYOUT for SRC and DST.
-
- BitField<RDD::PipelineStageBits> stages;
- BitField<RDD::BarrierAccessBits> access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- if (stages.is_empty()) {
- stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
- { // Restore src.
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_READ_BIT;
- tb.dst_access = access;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_SRC_OPTIMAL;
- tb.next_layout = src_tex->layout;
- tb.subresources.aspect = src_tex->barrier_aspect_flags;
- tb.subresources.base_mipmap = src_tex->base_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = src_tex->base_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT, stages, {}, {}, tb);
- }
-
- { // Make dst readable.
-
- RDD::TextureBarrier tb;
- tb.texture = dst_tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.dst_access = access;
- tb.prev_layout = RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
- tb.next_layout = dst_tex->layout;
- tb.subresources.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
- tb.subresources.base_mipmap = dst_tex->base_mipmap;
- tb.subresources.mipmap_count = 1;
- tb.subresources.base_layer = dst_tex->base_layer;
- tb.subresources.layer_count = 1;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, stages, {}, {}, tb);
- }
- }
+ draw_graph.add_texture_resolve(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, src_tex->base_layer, src_tex->base_mipmap, dst_tex->base_layer, dst_tex->base_mipmap);
return OK;
}
-Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier) {
+Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_texture);
@@ -1939,33 +1548,6 @@ Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32
ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(p_base_layer + p_layers > src_layer_count, ERR_INVALID_PARAMETER);
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer;
-
- RDD::TextureLayout clear_layout = (src_tex->layout == RDD::TEXTURE_LAYOUT_GENERAL) ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_TRANSFER_DST_OPTIMAL;
-
- // NOTE: Perhaps the valid stages/accesses for a given owner should be a property of the owner. (Here and places like _get_buffer_from_owner.)
- const BitField<RDD::PipelineStageBits> valid_texture_stages = RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- constexpr BitField<RDD::BarrierAccessBits> read_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT;
- constexpr BitField<RDD::BarrierAccessBits> read_write_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT;
- const BitField<RDD::BarrierAccessBits> valid_texture_access = (src_tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) ? read_write_access : read_access;
-
- // Barrier from previous access with optional layout change (see clear_layout logic above).
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.src_access = valid_texture_access;
- tb.dst_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.prev_layout = src_tex->layout;
- tb.next_layout = clear_layout;
- tb.subresources.aspect = src_tex->read_aspect_flags;
- tb.subresources.base_mipmap = src_tex->base_mipmap + p_base_mipmap;
- tb.subresources.mipmap_count = p_mipmaps;
- tb.subresources.base_layer = src_tex->base_layer + p_base_layer;
- tb.subresources.layer_count = p_layers;
-
- driver->command_pipeline_barrier(command_buffer, valid_texture_stages, RDD::PIPELINE_STAGE_TRANSFER_BIT, {}, {}, tb);
- }
-
RDD::TextureSubresourceRange range;
range.aspect = src_tex->read_aspect_flags;
range.base_mipmap = src_tex->base_mipmap + p_base_mipmap;
@@ -1973,55 +1555,13 @@ Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32
range.base_layer = src_tex->base_layer + p_base_layer;
range.layer_count = p_layers;
- driver->command_clear_color_texture(command_buffer, src_tex->driver_id, clear_layout, p_color, range);
-
- // Barrier to post clear accesses (changing back the layout if needed).
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- BitField<RDD::PipelineStageBits> stages;
- BitField<RDD::BarrierAccessBits> access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
- }
-
- if (stages.is_empty()) {
- stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
- RDD::TextureBarrier tb;
- tb.texture = src_tex->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- tb.dst_access = access;
- tb.prev_layout = clear_layout;
- tb.next_layout = src_tex->layout;
- tb.subresources.aspect = src_tex->read_aspect_flags;
- tb.subresources.base_mipmap = src_tex->base_mipmap + p_base_mipmap;
- tb.subresources.mipmap_count = p_mipmaps;
- tb.subresources.base_layer = src_tex->base_layer + p_base_layer;
- tb.subresources.layer_count = p_layers;
-
- driver->command_pipeline_barrier(command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, stages, {}, {}, tb);
-
- if (src_tex->used_in_frame != frames_drawn) {
- src_tex->used_in_raster = false;
- src_tex->used_in_compute = false;
- src_tex->used_in_frame = frames_drawn;
- }
- src_tex->used_in_transfer = true;
+ if (_texture_make_mutable(src_tex, p_texture)) {
+ // The texture must be mutable to be used as a clear destination.
+ draw_graph.add_synchronization();
}
+ draw_graph.add_texture_clear(src_tex->driver_id, src_tex->draw_tracker, p_color, range);
+
return OK;
}
@@ -2040,6 +1580,30 @@ bool RenderingDevice::texture_is_format_supported_for_usage(DataFormat p_format,
/**** FRAMEBUFFER ****/
/*********************/
+static RDD::AttachmentLoadOp initial_action_to_load_op(RenderingDevice::InitialAction p_action) {
+ switch (p_action) {
+ case RenderingDevice::INITIAL_ACTION_LOAD:
+ return RDD::ATTACHMENT_LOAD_OP_LOAD;
+ case RenderingDevice::INITIAL_ACTION_CLEAR:
+ return RDD::ATTACHMENT_LOAD_OP_CLEAR;
+ case RenderingDevice::INITIAL_ACTION_DISCARD:
+ return RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
+ default:
+ ERR_FAIL_V_MSG(RDD::ATTACHMENT_LOAD_OP_DONT_CARE, "Invalid initial action value (" + itos(p_action) + ")");
+ }
+}
+
+static RDD::AttachmentStoreOp final_action_to_store_op(RenderingDevice::FinalAction p_action) {
+ switch (p_action) {
+ case RenderingDevice::FINAL_ACTION_STORE:
+ return RDD::ATTACHMENT_STORE_OP_STORE;
+ case RenderingDevice::FINAL_ACTION_DISCARD:
+ return RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ default:
+ ERR_FAIL_V_MSG(RDD::ATTACHMENT_STORE_OP_DONT_CARE, "Invalid final action value (" + itos(p_action) + ")");
+ }
+}
+
RDD::RenderPassID RenderingDevice::_render_pass_create(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, InitialAction p_initial_action, FinalAction p_final_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, uint32_t p_view_count, Vector<TextureSamples> *r_samples) {
// NOTE:
// Before the refactor to RenderingDevice-RenderingDeviceDriver, there was commented out code to
@@ -2077,209 +1641,40 @@ RDD::RenderPassID RenderingDevice::_render_pass_create(const Vector<AttachmentFo
description.format = p_attachments[i].format;
description.samples = p_attachments[i].samples;
- bool is_sampled = (p_attachments[i].usage_flags & TEXTURE_USAGE_SAMPLING_BIT);
- bool is_storage = (p_attachments[i].usage_flags & TEXTURE_USAGE_STORAGE_BIT);
- bool is_depth = (p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
-
// We can setup a framebuffer where we write to our VRS texture to set it up.
// We make the assumption here that if our texture is actually used as our VRS attachment.
// It is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses.
bool is_vrs = (p_attachments[i].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && i == p_passes[0].vrs_attachment;
if (is_vrs) {
- // For VRS we only read, there is no writing to this texture.
description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- description.initial_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- } else {
- // For each UNDEFINED, assume the prior use was a *read*, as we'd be discarding the output of a write.
- // Also, each UNDEFINED will do an immediate layout transition (write), s.t. we must ensure execution synchronization vs
- // the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that
- // stage.
-
- switch (is_depth ? p_initial_depth_action : p_initial_action) {
- case INITIAL_ACTION_CLEAR_REGION:
- case INITIAL_ACTION_CLEAR: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_CLEAR;
- description.initial_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_CLEAR;
- description.initial_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_CLEAR;
- } else {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
- } break;
- case INITIAL_ACTION_KEEP: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- description.initial_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- description.initial_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- } else {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
- } break;
- case INITIAL_ACTION_DROP: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- } else {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
- } break;
- case INITIAL_ACTION_CLEAR_REGION_CONTINUE:
- case INITIAL_ACTION_CONTINUE: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- description.initial_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- description.initial_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
- } else {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
- } break;
- default: {
- ERR_FAIL_V(RDD::RenderPassID()); // Should never reach here.
- }
- }
- }
-
- bool used_last = false;
-
- {
- int last_pass = p_passes.size() - 1;
-
- if (is_depth) {
- // Likely missing depth resolve?
- if (p_passes[last_pass].depth_attachment == i) {
- used_last = true;
- }
- } else if (is_vrs) {
- if (p_passes[last_pass].vrs_attachment == i) {
- used_last = true;
- }
- } else {
- if (p_passes[last_pass].resolve_attachments.size()) {
- // If using resolve attachments, check resolve attachments.
- for (int j = 0; j < p_passes[last_pass].resolve_attachments.size(); j++) {
- if (p_passes[last_pass].resolve_attachments[j] == i) {
- used_last = true;
- break;
- }
- }
- }
- if (!used_last) {
- for (int j = 0; j < p_passes[last_pass].color_attachments.size(); j++) {
- if (p_passes[last_pass].color_attachments[j] == i) {
- used_last = true;
- break;
- }
- }
- }
- }
-
- if (!used_last) {
- for (int j = 0; j < p_passes[last_pass].preserve_attachments.size(); j++) {
- if (p_passes[last_pass].preserve_attachments[j] == i) {
- used_last = true;
- break;
- }
- }
- }
- }
-
- FinalAction final_action = p_final_action;
- FinalAction final_depth_action = p_final_depth_action;
-
- if (!used_last) {
- if (is_depth) {
- final_depth_action = FINAL_ACTION_DISCARD;
-
- } else {
- final_action = FINAL_ACTION_DISCARD;
- }
- }
-
- if (is_vrs) {
- // We don't change our VRS texture during this process.
-
description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ description.initial_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
description.final_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else {
- switch (is_depth ? final_depth_action : final_action) {
- case FINAL_ACTION_READ: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.final_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- } else {
- description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
- description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- // TODO: What does this mean about the next usage (and thus appropriate dependency masks.
- }
- } break;
- case FINAL_ACTION_DISCARD: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = is_sampled ? RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- } else {
- description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
- } break;
- case FINAL_ACTION_CONTINUE: {
- if ((p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- } else if ((p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- description.store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_STORE;
- description.final_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- } else {
- description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
- description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED; // Don't care what is there.
- }
-
- } break;
- default: {
- ERR_FAIL_V(RDD::RenderPassID()); // Should never reach here.
- }
+ if (p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.load_op = initial_action_to_load_op(p_initial_action);
+ description.store_op = final_action_to_store_op(p_final_action);
+ description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ description.initial_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ description.final_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ } else if (p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.load_op = initial_action_to_load_op(p_initial_depth_action);
+ description.store_op = final_action_to_store_op(p_final_depth_action);
+ description.stencil_load_op = initial_action_to_load_op(p_initial_depth_action);
+ description.stencil_store_op = final_action_to_store_op(p_final_depth_action);
+ description.initial_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ description.final_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
+ description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
+ description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
}
}
@@ -2465,7 +1860,7 @@ RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create_
}
Vector<TextureSamples> samples;
- RDD::RenderPassID render_pass = _render_pass_create(p_attachments, p_passes, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, p_view_count, &samples); // Actions don't matter for this use case.
+ RDD::RenderPassID render_pass = _render_pass_create(p_attachments, p_passes, INITIAL_ACTION_CLEAR, FINAL_ACTION_STORE, INITIAL_ACTION_CLEAR, FINAL_ACTION_STORE, p_view_count, &samples); // Actions don't matter for this use case.
if (!render_pass) { // Was likely invalid.
return INVALID_ID;
@@ -2701,18 +2096,14 @@ RID RenderingDevice::vertex_buffer_create(uint32_t p_size_bytes, const Vector<ui
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
ERR_FAIL_COND_V(!buffer.driver_id, RID());
+ // Vertex buffers are assumed to be immutable unless they don't have initial data or they've been marked for storage explicitly.
+ if (p_data.is_empty() || p_use_as_storage) {
+ buffer.draw_tracker = RDG::resource_tracker_create();
+ buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
+ }
+
if (p_data.size()) {
- uint64_t data_size = p_data.size();
- const uint8_t *r = p_data.ptr();
- _buffer_update(&buffer, 0, r, data_size);
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = buffer.driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
- bb.size = data_size;
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT, {}, bb, {});
- }
+ _buffer_update(&buffer, RID(), 0, p_data.ptr(), p_data.size());
}
buffer_memory += buffer.size;
@@ -2809,6 +2200,12 @@ RID RenderingDevice::vertex_array_create(uint32_t p_vertex_count, VertexFormatID
}
vertex_array.buffers.push_back(buffer->driver_id);
+
+ if (buffer->draw_tracker != nullptr) {
+ vertex_array.draw_trackers.push_back(buffer->draw_tracker);
+ } else {
+ vertex_array.untracked_buffers.insert(p_src_buffers[i]);
+ }
}
RID id = vertex_array_owner.make_rid(vertex_array);
@@ -2863,18 +2260,14 @@ RID RenderingDevice::index_buffer_create(uint32_t p_index_count, IndexBufferForm
index_buffer.driver_id = driver->buffer_create(index_buffer.size, index_buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
ERR_FAIL_COND_V(!index_buffer.driver_id, RID());
+ // Index buffers are assumed to be immutable unless they don't have initial data.
+ if (p_data.is_empty()) {
+ index_buffer.draw_tracker = RDG::resource_tracker_create();
+ index_buffer.draw_tracker->buffer_driver_id = index_buffer.driver_id;
+ }
+
if (p_data.size()) {
- uint64_t data_size = p_data.size();
- const uint8_t *r = p_data.ptr();
- _buffer_update(&index_buffer, 0, r, data_size);
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = index_buffer.driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = RDD::BARRIER_ACCESS_INDEX_READ_BIT;
- bb.size = data_size;
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT, {}, bb, {});
- }
+ _buffer_update(&index_buffer, RID(), 0, p_data.ptr(), p_data.size());
}
buffer_memory += index_buffer.size;
@@ -2899,6 +2292,7 @@ RID RenderingDevice::index_array_create(RID p_index_buffer, uint32_t p_index_off
IndexArray index_array;
index_array.max_index = index_buffer->max_index;
index_array.driver_id = index_buffer->driver_id;
+ index_array.draw_tracker = index_buffer->draw_tracker;
index_array.offset = p_index_offset;
index_array.indices = p_index_count;
index_array.format = index_buffer->format;
@@ -2991,6 +2385,29 @@ RID RenderingDevice::shader_create_from_bytecode(const Vector<uint8_t> &p_shader
shader->set_formats.push_back(format);
}
+ for (ShaderStage stage : shader_desc.stages) {
+ switch (stage) {
+ case SHADER_STAGE_VERTEX:
+ shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
+ break;
+ case SHADER_STAGE_FRAGMENT:
+ shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
+ break;
+ case SHADER_STAGE_TESSELATION_CONTROL:
+ shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
+ break;
+ case SHADER_STAGE_TESSELATION_EVALUATION:
+ shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT);
+ break;
+ case SHADER_STAGE_COMPUTE:
+ shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
+ break;
+ default:
+ DEV_ASSERT(false && "Unknown shader stage.");
+ break;
+ }
+ }
+
#ifdef DEV_ENABLED
set_resource_name(id, "RID:" + itos(id.get_id()));
#endif
@@ -3025,18 +2442,14 @@ RID RenderingDevice::uniform_buffer_create(uint32_t p_size_bytes, const Vector<u
buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
ERR_FAIL_COND_V(!buffer.driver_id, RID());
+ // Uniform buffers are assumed to be immutable unless they don't have initial data.
+ if (p_data.is_empty()) {
+ buffer.draw_tracker = RDG::resource_tracker_create();
+ buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
+ }
+
if (p_data.size()) {
- uint64_t data_size = p_data.size();
- const uint8_t *r = p_data.ptr();
- _buffer_update(&buffer, 0, r, data_size);
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::BufferBarrier bb;
- bb.buffer = buffer.driver_id;
- bb.src_access = RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT;
- bb.dst_access = RDD::BARRIER_ACCESS_UNIFORM_READ_BIT;
- bb.size = data_size;
- driver->command_pipeline_barrier(frames[frame].setup_command_buffer, RDD::PIPELINE_STAGE_TRANSFER_BIT, RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT | RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, {}, bb, {});
- }
+ _buffer_update(&buffer, RID(), 0, p_data.ptr(), p_data.size());
}
buffer_memory += buffer.size;
@@ -3073,8 +2486,9 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
// Used for verification to make sure a uniform set does not use a framebuffer bound texture.
LocalVector<UniformSet::AttachableTexture> attachable_textures;
- Vector<Texture *> mutable_sampled_textures;
- Vector<Texture *> mutable_storage_textures;
+ Vector<RDG::ResourceTracker *> draw_trackers;
+ Vector<RDG::ResourceUsage> draw_trackers_usage;
+ HashMap<RID, RDG::ResourceUsage> untracked_usage;
for (uint32_t i = 0; i < set_uniform_count; i++) {
const ShaderUniform &set_uniform = set_uniforms[i];
@@ -3126,7 +2540,8 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
ERR_FAIL_COND_V_MSG(!sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
- Texture *texture = texture_owner.get_or_null(uniform.get_id(j + 1));
+ RID texture_id = uniform.get_id(j + 1);
+ Texture *texture = texture_owner.get_or_null(texture_id);
ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
@@ -3139,8 +2554,11 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
attachable_textures.push_back(attachable_texture);
}
- if ((texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT)) {
- mutable_sampled_textures.push_back(texture);
+ if (texture->draw_tracker != nullptr) {
+ draw_trackers.push_back(texture->draw_tracker);
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
+ } else {
+ untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
}
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
@@ -3159,7 +2577,8 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
}
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
- Texture *texture = texture_owner.get_or_null(uniform.get_id(j));
+ RID texture_id = uniform.get_id(j);
+ Texture *texture = texture_owner.get_or_null(texture_id);
ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
@@ -3172,8 +2591,11 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
attachable_textures.push_back(attachable_texture);
}
- if ((texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT)) {
- mutable_sampled_textures.push_back(texture);
+ if (texture->draw_tracker != nullptr) {
+ draw_trackers.push_back(texture->draw_tracker);
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
+ } else {
+ untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
}
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
@@ -3191,7 +2613,8 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
}
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
- Texture *texture = texture_owner.get_or_null(uniform.get_id(j));
+ RID texture_id = uniform.get_id(j);
+ Texture *texture = texture_owner.get_or_null(texture_id);
ERR_FAIL_NULL_V_MSG(texture, RID(),
"Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
@@ -3199,8 +2622,19 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
"Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
- if ((texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT)) {
- mutable_storage_textures.push_back(texture);
+ if (_texture_make_mutable(texture, texture_id)) {
+ // The texture must be mutable as a layout transition will be required.
+ draw_graph.add_synchronization();
+ }
+
+ if (texture->draw_tracker != nullptr) {
+ draw_trackers.push_back(texture->draw_tracker);
+
+ if (set_uniform.writable) {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE);
+ } else {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ);
+ }
}
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
@@ -3218,9 +2652,27 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
}
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
- Buffer *buffer = texture_buffer_owner.get_or_null(uniform.get_id(j));
+ RID buffer_id = uniform.get_id(j);
+ Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
ERR_FAIL_NULL_V_MSG(buffer, RID(), "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
+ if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
+ // The buffer must be mutable if it's used for writing.
+ draw_graph.add_synchronization();
+ }
+
+ if (buffer->draw_tracker != nullptr) {
+ draw_trackers.push_back(buffer->draw_tracker);
+
+ if (set_uniform.writable) {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE);
+ } else {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
+ }
+ } else {
+ untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
+ }
+
driver_uniform.ids.push_back(buffer->driver_id);
}
} break;
@@ -3237,9 +2689,17 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
ERR_FAIL_COND_V_MSG(!sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
- Buffer *buffer = texture_buffer_owner.get_or_null(uniform.get_id(j + 1));
+ RID buffer_id = uniform.get_id(j + 1);
+ Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
ERR_FAIL_NULL_V_MSG(buffer, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
+ if (buffer->draw_tracker != nullptr) {
+ draw_trackers.push_back(buffer->draw_tracker);
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
+ } else {
+ untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
+ }
+
driver_uniform.ids.push_back(*sampler_driver_id);
driver_uniform.ids.push_back(buffer->driver_id);
}
@@ -3251,12 +2711,20 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
"Uniform buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
- Buffer *buffer = uniform_buffer_owner.get_or_null(uniform.get_id(0));
+ RID buffer_id = uniform.get_id(0);
+ Buffer *buffer = uniform_buffer_owner.get_or_null(buffer_id);
ERR_FAIL_NULL_V_MSG(buffer, RID(), "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
ERR_FAIL_COND_V_MSG(buffer->size < (uint32_t)set_uniform.length, RID(),
"Uniform buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " is smaller than size of shader uniform: (" + itos(set_uniform.length) + ").");
+ if (buffer->draw_tracker != nullptr) {
+ draw_trackers.push_back(buffer->draw_tracker);
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ);
+ } else {
+ untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ;
+ }
+
driver_uniform.ids.push_back(buffer->driver_id);
} break;
case UNIFORM_TYPE_STORAGE_BUFFER: {
@@ -3265,10 +2733,11 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
Buffer *buffer = nullptr;
- if (storage_buffer_owner.owns(uniform.get_id(0))) {
- buffer = storage_buffer_owner.get_or_null(uniform.get_id(0));
- } else if (vertex_buffer_owner.owns(uniform.get_id(0))) {
- buffer = vertex_buffer_owner.get_or_null(uniform.get_id(0));
+ RID buffer_id = uniform.get_id(0);
+ if (storage_buffer_owner.owns(buffer_id)) {
+ buffer = storage_buffer_owner.get_or_null(buffer_id);
+ } else if (vertex_buffer_owner.owns(buffer_id)) {
+ buffer = vertex_buffer_owner.get_or_null(buffer_id);
ERR_FAIL_COND_V_MSG(!(buffer->usage.has_flag(RDD::BUFFER_USAGE_STORAGE_BIT)), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag.");
}
@@ -3278,6 +2747,23 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
ERR_FAIL_COND_V_MSG(set_uniform.length > 0 && buffer->size != (uint32_t)set_uniform.length, RID(),
"Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
+ if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
+ // The buffer must be mutable if it's used for writing.
+ draw_graph.add_synchronization();
+ }
+
+ if (buffer->draw_tracker != nullptr) {
+ draw_trackers.push_back(buffer->draw_tracker);
+
+ if (set_uniform.writable) {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE);
+ } else {
+ draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ);
+ }
+ } else {
+ untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ;
+ }
+
driver_uniform.ids.push_back(buffer->driver_id);
} break;
case UNIFORM_TYPE_INPUT_ATTACHMENT: {
@@ -3292,7 +2778,8 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
}
for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
- Texture *texture = texture_owner.get_or_null(uniform.get_id(j));
+ RID texture_id = uniform.get_id(j);
+ Texture *texture = texture_owner.get_or_null(texture_id);
ERR_FAIL_NULL_V_MSG(texture, RID(),
"InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
@@ -3302,6 +2789,17 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
+ if (_texture_make_mutable(texture, texture_id)) {
+ // The texture must be mutable as a layout transition will be required.
+ draw_graph.add_synchronization();
+ }
+
+ if (texture->draw_tracker != nullptr) {
+ bool depth_stencil_read = (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
+ draw_trackers.push_back(texture->draw_tracker);
+ draw_trackers_usage.push_back(depth_stencil_read ? RDG::RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ : RDG::RESOURCE_USAGE_ATTACHMENT_COLOR_READ);
+ }
+
driver_uniform.ids.push_back(texture->driver_id);
}
} break;
@@ -3317,8 +2815,9 @@ RID RenderingDevice::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p
uniform_set.driver_id = driver_uniform_set;
uniform_set.format = shader->set_formats[p_shader_set];
uniform_set.attachable_textures = attachable_textures;
- uniform_set.mutable_sampled_textures = mutable_sampled_textures;
- uniform_set.mutable_storage_textures = mutable_storage_textures;
+ uniform_set.draw_trackers = draw_trackers;
+ uniform_set.draw_trackers_usage = draw_trackers_usage;
+ uniform_set.untracked_usage = untracked_usage;
uniform_set.shader_set = p_shader_set;
uniform_set.shader_id = p_shader;
@@ -3486,6 +2985,7 @@ RID RenderingDevice::render_pipeline_create(RID p_shader, FramebufferFormatID p_
pipeline.shader_layout_hash = shader->layout_hash;
pipeline.set_formats = shader->set_formats;
pipeline.push_constant_size = shader->push_constant_size;
+ pipeline.stage_bits = shader->stage_bits;
#ifdef DEBUG_ENABLED
pipeline.validation.dynamic_state = p_dynamic_state_flags;
@@ -3623,15 +3123,13 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_begin_for_screen(DisplayS
ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer;
-
if (!context->window_is_valid_swapchain(p_screen)) {
return INVALID_ID;
}
- Size2i size = Size2i(context->window_get_width(p_screen), context->window_get_height(p_screen));
+ Rect2i viewport = Rect2i(0, 0, context->window_get_width(p_screen), context->window_get_height(p_screen));
- _draw_list_allocate(Rect2i(Vector2i(), size), 0, 0);
+ _draw_list_allocate(viewport, 0);
#ifdef DEBUG_ENABLED
draw_list_framebuffer_format = screen_get_framebuffer_format();
#endif
@@ -3639,16 +3137,11 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_begin_for_screen(DisplayS
RDD::RenderPassClearValue clear_value;
clear_value.color = p_clear_color;
- driver->command_begin_render_pass(
- command_buffer,
- context->window_get_render_pass(p_screen),
- context->window_get_framebuffer(p_screen),
- RDD::COMMAND_BUFFER_TYPE_PRIMARY,
- Rect2i(0, 0, size.width, size.height),
- VectorView(&clear_value, 1));
- driver->command_render_set_viewport(command_buffer, Rect2i(Point2i(), size));
- driver->command_render_set_scissor(command_buffer, Rect2i(Point2i(), size));
+ draw_graph.add_draw_list_begin(context->window_get_render_pass(p_screen), context->window_get_framebuffer(p_screen), viewport, clear_value, true, false);
+
+ _draw_list_set_viewport(viewport);
+ _draw_list_set_scissor(viewport);
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
@@ -3694,8 +3187,12 @@ Error RenderingDevice::_draw_list_setup_framebuffer(Framebuffer *p_framebuffer,
return OK;
}
-Error RenderingDevice::_draw_list_render_pass_begin(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i p_viewport_offset, Point2i p_viewport_size, RDD::FramebufferID p_framebuffer_driver_id, RDD::RenderPassID p_render_pass, RDD::CommandBufferID p_command_buffer, RDD::CommandBufferType p_cmd_buffer_mode, const Vector<RID> &p_storage_textures, bool p_constrained_to_region) {
+Error RenderingDevice::_draw_list_render_pass_begin(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i p_viewport_offset, Point2i p_viewport_size, RDD::FramebufferID p_framebuffer_driver_id, RDD::RenderPassID p_render_pass) {
LocalVector<RDD::RenderPassClearValue> clear_values;
+ LocalVector<RDG::ResourceTracker *> resource_trackers;
+ LocalVector<RDG::ResourceUsage> resource_usages;
+ bool uses_color = false;
+ bool uses_depth = false;
clear_values.resize(p_framebuffer->texture_ids.size());
int clear_values_count = 0;
{
@@ -3709,69 +3206,33 @@ Error RenderingDevice::_draw_list_render_pass_begin(Framebuffer *p_framebuffer,
continue;
}
- if (color_index < p_clear_colors.size() && texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
- ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); // A bug.
- clear_value.color = p_clear_colors[color_index];
- color_index++;
+ if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ if (color_index < p_clear_colors.size()) {
+ ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); // A bug.
+ clear_value.color = p_clear_colors[color_index];
+ color_index++;
+ }
+
+ resource_trackers.push_back(texture->draw_tracker);
+ resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE);
+ uses_color = true;
} else if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
clear_value.depth = p_clear_depth;
clear_value.stencil = p_clear_stencil;
+ resource_trackers.push_back(texture->draw_tracker);
+ resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE);
+ uses_depth = true;
}
clear_values[clear_values_count++] = clear_value;
}
}
- for (int i = 0; i < p_storage_textures.size(); i++) {
- Texture *texture = texture_owner.get_or_null(p_storage_textures[i]);
- if (!texture) {
- continue;
- }
- ERR_CONTINUE_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), "Supplied storage texture " + itos(i) + " for draw list is not set to be used for storage.");
-
- if (texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- // Must change layout to general.
- RDD::TextureBarrier tb;
- tb.texture = texture->driver_id;
- tb.src_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- tb.dst_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- tb.prev_layout = texture->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_GENERAL;
- tb.subresources.aspect = texture->read_aspect_flags;
- tb.subresources.base_mipmap = texture->base_mipmap;
- tb.subresources.mipmap_count = texture->mipmaps;
- tb.subresources.base_layer = texture->base_layer;
- tb.subresources.layer_count = texture->layers;
-
- driver->command_pipeline_barrier(p_command_buffer, RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT, RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT, {}, {}, tb);
-
- texture->layout = RDD::TEXTURE_LAYOUT_GENERAL;
- }
-
- draw_list_storage_textures.push_back(p_storage_textures[i]);
- }
- }
-
- Rect2i region;
- if (p_constrained_to_region) {
- region = Rect2i(p_viewport_offset, p_viewport_size);
- } else {
- region = Rect2i(Point2i(), p_framebuffer->size);
- }
-
- driver->command_begin_render_pass(
- p_command_buffer,
- p_render_pass,
- p_framebuffer_driver_id,
- p_cmd_buffer_mode,
- region,
- clear_values);
+ draw_graph.add_draw_list_begin(p_render_pass, p_framebuffer_driver_id, Rect2i(p_viewport_offset, p_viewport_size), clear_values, uses_color, uses_depth);
+ draw_graph.add_draw_list_usages(resource_trackers, resource_usages);
// Mark textures as bound.
draw_list_bound_textures.clear();
- draw_list_unbind_color_textures = p_final_color_action != FINAL_ACTION_CONTINUE;
- draw_list_unbind_depth_textures = p_final_depth_action != FINAL_ACTION_CONTINUE;
for (int i = 0; i < p_framebuffer->texture_ids.size(); i++) {
Texture *texture = texture_owner.get_or_null(p_framebuffer->texture_ids[i]);
@@ -3785,6 +3246,14 @@ Error RenderingDevice::_draw_list_render_pass_begin(Framebuffer *p_framebuffer,
return OK;
}
+void RenderingDevice::_draw_list_set_viewport(Rect2i p_rect) {
+ draw_graph.add_draw_list_set_viewport(p_rect);
+}
+
+void RenderingDevice::_draw_list_set_scissor(Rect2i p_rect) {
+ draw_graph.add_draw_list_set_scissor(p_rect);
+}
+
void RenderingDevice::_draw_list_insert_clear_region(DrawList *p_draw_list, Framebuffer *p_framebuffer, Point2i p_viewport_offset, Point2i p_viewport_size, bool p_clear_color, const Vector<Color> &p_clear_colors, bool p_clear_depth, float p_depth, uint32_t p_stencil) {
LocalVector<RDD::AttachmentClear> clear_attachments;
int color_index = 0;
@@ -3818,24 +3287,19 @@ void RenderingDevice::_draw_list_insert_clear_region(DrawList *p_draw_list, Fram
}
Rect2i rect = Rect2i(p_viewport_offset, p_viewport_size);
-
- driver->command_render_clear_attachments(p_draw_list->command_buffer, clear_attachments, rect);
+ draw_graph.add_draw_list_clear_attachments(clear_attachments, rect);
}
-RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
+RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
- ERR_FAIL_COND_V_MSG(compute_list != nullptr && !compute_list->state.allow_draw_overlap, INVALID_ID, "Only one draw/compute list can be active at the same time.");
Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
Point2i viewport_offset;
Point2i viewport_size = framebuffer->size;
- bool constrained_to_region = false;
- bool needs_clear_color = false;
- bool needs_clear_depth = false;
if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region.
Rect2i viewport(viewport_offset, viewport_size);
@@ -3848,34 +3312,9 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer,
viewport_offset = regioni.position;
viewport_size = regioni.size;
-
- // If clearing regions both in color and depth, we can switch to a fast path where we let Vulkan to the clears
- // and we constrain the render area to the region.
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION && p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
- constrained_to_region = true;
- p_initial_color_action = INITIAL_ACTION_CLEAR;
- p_initial_depth_action = INITIAL_ACTION_CLEAR;
- } else {
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION_CONTINUE) {
- needs_clear_color = true;
- p_initial_color_action = INITIAL_ACTION_CONTINUE;
- }
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION_CONTINUE) {
- needs_clear_depth = true;
- p_initial_depth_action = INITIAL_ACTION_CONTINUE;
- }
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) {
- needs_clear_color = true;
- p_initial_color_action = INITIAL_ACTION_KEEP;
- }
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
- needs_clear_depth = true;
- p_initial_depth_action = INITIAL_ACTION_KEEP;
- }
- }
}
- if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { // Check clear values.
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR) { // Check clear values.
int color_count = 0;
for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]);
@@ -3897,8 +3336,7 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer,
Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, &fb_driver_id, &render_pass, &draw_list_subpass_count);
ERR_FAIL_COND_V(err != OK, INVALID_ID);
- RDD::CommandBufferID command_buffer = frames[frame].draw_command_buffer;
- err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, viewport_offset, viewport_size, fb_driver_id, render_pass, command_buffer, RDD::COMMAND_BUFFER_TYPE_PRIMARY, p_storage_textures, constrained_to_region);
+ err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, viewport_offset, viewport_size, fb_driver_id, render_pass);
if (err != OK) {
return INVALID_ID;
@@ -3907,135 +3345,23 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer,
draw_list_render_pass = render_pass;
draw_list_vkframebuffer = fb_driver_id;
- _draw_list_allocate(Rect2i(viewport_offset, viewport_size), 0, 0);
+ _draw_list_allocate(Rect2i(viewport_offset, viewport_size), 0);
#ifdef DEBUG_ENABLED
draw_list_framebuffer_format = framebuffer->format_id;
#endif
draw_list_current_subpass = 0;
- if (needs_clear_color || needs_clear_depth) {
- DEV_ASSERT(!constrained_to_region);
- _draw_list_insert_clear_region(draw_list, framebuffer, viewport_offset, viewport_size, needs_clear_color, p_clear_color_values, needs_clear_depth, p_clear_depth, p_clear_stencil);
- }
-
- driver->command_render_set_viewport(command_buffer, Rect2i(viewport_offset, viewport_size));
- driver->command_render_set_scissor(command_buffer, Rect2i(viewport_offset, viewport_size));
+ _draw_list_set_viewport(Rect2i(viewport_offset, viewport_size));
+ _draw_list_set_scissor(Rect2i(viewport_offset, viewport_size));
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
+#ifndef DISABLE_DEPRECATED
Error RenderingDevice::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
- _THREAD_SAFE_METHOD_
-
- ERR_FAIL_COND_V_MSG(draw_list != nullptr, ERR_BUSY, "Only one draw list can be active at the same time.");
- ERR_FAIL_COND_V_MSG(compute_list != nullptr && !compute_list->state.allow_draw_overlap, ERR_BUSY, "Only one draw/compute list can be active at the same time.");
-
- ERR_FAIL_COND_V(p_splits < 1, ERR_INVALID_DECLARATION);
-
- Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
- ERR_FAIL_NULL_V(framebuffer, ERR_INVALID_DECLARATION);
-
- Point2i viewport_offset;
- Point2i viewport_size = framebuffer->size;
- bool constrained_to_region = false;
- bool needs_clear_color = false;
- bool needs_clear_depth = false;
-
- if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region.
- Rect2i viewport(viewport_offset, viewport_size);
- Rect2i regioni = p_region;
- if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
- ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
- ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y))) {
- ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "When supplying a custom region, it must be contained within the framebuffer rectangle");
- }
-
- viewport_offset = regioni.position;
- viewport_size = regioni.size;
-
- // If clearing regions both in color and depth, we can switch to a fast path where we let Vulkan to the clears
- // and we constrain the render area to the region.
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION && p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
- constrained_to_region = true;
- p_initial_color_action = INITIAL_ACTION_CLEAR;
- p_initial_depth_action = INITIAL_ACTION_CLEAR;
- } else {
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION_CONTINUE) {
- needs_clear_color = true;
- p_initial_color_action = INITIAL_ACTION_CONTINUE;
- }
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION_CONTINUE) {
- needs_clear_depth = true;
- p_initial_depth_action = INITIAL_ACTION_CONTINUE;
- }
- if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) {
- needs_clear_color = true;
- p_initial_color_action = INITIAL_ACTION_KEEP;
- }
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
- needs_clear_depth = true;
- p_initial_depth_action = INITIAL_ACTION_KEEP;
- }
- }
- }
-
- if (p_initial_color_action == INITIAL_ACTION_CLEAR || needs_clear_color) { // Check clear values.
-
- int color_count = 0;
- for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
- Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]);
-
- if (!texture || !(texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
- color_count++;
- }
- }
-
- ERR_FAIL_COND_V_MSG(p_clear_color_values.size() != color_count, ERR_INVALID_PARAMETER,
- "Clear color values supplied (" + itos(p_clear_color_values.size()) + ") differ from the amount required for framebuffer (" + itos(color_count) + ").");
- }
-
- RDD::FramebufferID fb_driver_id;
- RDD::RenderPassID render_pass;
-
- Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, &fb_driver_id, &render_pass, &draw_list_subpass_count);
- ERR_FAIL_COND_V(err != OK, ERR_CANT_CREATE);
-
- RDD::CommandBufferID frame_command_buffer = frames[frame].draw_command_buffer;
- err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, viewport_offset, viewport_size, fb_driver_id, render_pass, frame_command_buffer, RDD::COMMAND_BUFFER_TYPE_SECONDARY, p_storage_textures, constrained_to_region);
-
- if (err != OK) {
- return ERR_CANT_CREATE;
- }
-
- draw_list_current_subpass = 0;
-
-#ifdef DEBUG_ENABLED
- draw_list_framebuffer_format = framebuffer->format_id;
-#endif
- draw_list_render_pass = render_pass;
- draw_list_vkframebuffer = fb_driver_id;
-
- err = _draw_list_allocate(Rect2i(viewport_offset, viewport_size), p_splits, 0);
- if (err != OK) {
- return err;
- }
-
- if (needs_clear_color || needs_clear_depth) {
- DEV_ASSERT(!constrained_to_region);
- _draw_list_insert_clear_region(&draw_list[0], framebuffer, viewport_offset, viewport_size, needs_clear_color, p_clear_color_values, needs_clear_depth, p_clear_depth, p_clear_stencil);
- }
-
- bool secondary_viewport_scissor = driver->api_trait_get(RDD::API_TRAIT_SECONDARY_VIEWPORT_SCISSOR);
- for (uint32_t i = 0; i < p_splits; i++) {
- if (secondary_viewport_scissor) {
- driver->command_render_set_viewport(draw_list[i].command_buffer, Rect2i(viewport_offset, viewport_size));
- driver->command_render_set_scissor(draw_list[i].command_buffer, Rect2i(viewport_offset, viewport_size));
- }
- r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i;
- }
-
- return OK;
+ ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
}
+#endif
RenderingDevice::DrawList *RenderingDevice::_get_draw_list_ptr(DrawListID p_id) {
if (p_id < 0) {
@@ -4045,22 +3371,7 @@ RenderingDevice::DrawList *RenderingDevice::_get_draw_list_ptr(DrawListID p_id)
if (!draw_list) {
return nullptr;
} else if (p_id == (int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT)) {
- if (draw_list_split) {
- return nullptr;
- }
return draw_list;
- } else if (p_id >> DrawListID(ID_BASE_SHIFT) == ID_TYPE_SPLIT_DRAW_LIST) {
- if (!draw_list_split) {
- return nullptr;
- }
-
- uint64_t index = p_id & ((DrawListID(1) << DrawListID(ID_BASE_SHIFT)) - 1); // Mask.
-
- if (index >= draw_list_count) {
- return nullptr;
- }
-
- return &draw_list[index];
} else {
return nullptr;
}
@@ -4073,7 +3384,7 @@ void RenderingDevice::draw_list_set_blend_constants(DrawListID p_list, const Col
ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
#endif
- driver->command_render_set_blend_constants(dl->command_buffer, p_color);
+ draw_graph.add_draw_list_set_blend_constants(p_color);
}
void RenderingDevice::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) {
@@ -4095,7 +3406,7 @@ void RenderingDevice::draw_list_bind_render_pipeline(DrawListID p_list, RID p_re
dl->state.pipeline = p_render_pipeline;
- driver->command_bind_render_pipeline(dl->command_buffer, pipeline->driver_id);
+ draw_graph.add_draw_list_bind_pipeline(pipeline->driver_id, pipeline->stage_bits);
if (dl->state.pipeline_shader != pipeline->shader) {
// Shader changed, so descriptor sets may become incompatible.
@@ -4183,21 +3494,6 @@ void RenderingDevice::draw_list_bind_uniform_set(DrawListID p_list, RID p_unifor
dl->state.sets[p_index].uniform_set_format = uniform_set->format;
dl->state.sets[p_index].uniform_set = p_uniform_set;
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- uint32_t mst_count = uniform_set->mutable_storage_textures.size();
- if (mst_count) {
- Texture **mst_textures = const_cast<UniformSet *>(uniform_set)->mutable_storage_textures.ptrw();
- for (uint32_t i = 0; i < mst_count; i++) {
- if (mst_textures[i]->used_in_frame != frames_drawn) {
- mst_textures[i]->used_in_frame = frames_drawn;
- mst_textures[i]->used_in_transfer = false;
- mst_textures[i]->used_in_compute = false;
- }
- mst_textures[i]->used_in_raster = true;
- }
- }
- }
-
#ifdef DEBUG_ENABLED
{ // Validate that textures bound are not attached as framebuffer bindings.
uint32_t attachable_count = uniform_set->attachable_textures.size();
@@ -4235,7 +3531,12 @@ void RenderingDevice::draw_list_bind_vertex_array(DrawListID p_list, RID p_verte
dl->validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
#endif
dl->validation.vertex_array_size = vertex_array->vertex_count;
- driver->command_render_bind_vertex_buffers(dl->command_buffer, vertex_array->buffers.size(), vertex_array->buffers.ptr(), vertex_array->offsets.ptr());
+
+ draw_graph.add_draw_list_bind_vertex_buffers(vertex_array->buffers, vertex_array->offsets);
+
+ for (int i = 0; i < vertex_array->draw_trackers.size(); i++) {
+ draw_graph.add_draw_list_usage(vertex_array->draw_trackers[i], RDG::RESOURCE_USAGE_VERTEX_BUFFER_READ);
+ }
}
void RenderingDevice::draw_list_bind_index_array(DrawListID p_list, RID p_index_array) {
@@ -4259,7 +3560,11 @@ void RenderingDevice::draw_list_bind_index_array(DrawListID p_list, RID p_index_
dl->validation.index_array_size = index_array->indices;
dl->validation.index_array_offset = index_array->offset;
- driver->command_render_bind_index_buffer(dl->command_buffer, index_array->driver_id, index_array->format, index_array->offset);
+ draw_graph.add_draw_list_bind_index_buffer(index_array->driver_id, index_array->format, index_array->offset);
+
+ if (index_array->draw_tracker != nullptr) {
+ draw_graph.add_draw_list_usage(index_array->draw_tracker, RDG::RESOURCE_USAGE_INDEX_BUFFER_READ);
+ }
}
void RenderingDevice::draw_list_set_line_width(DrawListID p_list, float p_width) {
@@ -4269,7 +3574,7 @@ void RenderingDevice::draw_list_set_line_width(DrawListID p_list, float p_width)
ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
#endif
- driver->command_render_set_line_width(dl->command_buffer, p_width);
+ draw_graph.add_draw_list_set_line_width(p_width);
}
void RenderingDevice::draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size) {
@@ -4284,7 +3589,9 @@ void RenderingDevice::draw_list_set_push_constant(DrawListID p_list, const void
ERR_FAIL_COND_MSG(p_data_size != dl->validation.pipeline_push_constant_size,
"This render pipeline requires (" + itos(dl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
#endif
- driver->command_bind_push_constants(dl->command_buffer, dl->state.pipeline_shader_driver_id, 0, VectorView((const uint32_t *)p_data, p_data_size / sizeof(uint32_t)));
+
+ draw_graph.add_draw_list_set_push_constant(dl->state.pipeline_shader_driver_id, p_data, p_data_size);
+
#ifdef DEBUG_ENABLED
dl->validation.pipeline_push_constant_supplied = true;
#endif
@@ -4338,14 +3645,19 @@ void RenderingDevice::draw_list_draw(DrawListID p_list, bool p_use_indices, uint
}
}
#endif
- driver->command_uniform_set_prepare_for_use(dl->command_buffer, dl->state.sets[i].uniform_set_driver_id, dl->state.pipeline_shader_driver_id, i);
+ draw_graph.add_draw_list_uniform_set_prepare_for_use(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
}
for (uint32_t i = 0; i < dl->state.set_count; i++) {
if (dl->state.sets[i].pipeline_expected_format == 0) {
continue; // Nothing expected by this pipeline.
}
if (!dl->state.sets[i].bound) {
- driver->command_bind_render_uniform_set(dl->command_buffer, dl->state.sets[i].uniform_set_driver_id, dl->state.pipeline_shader_driver_id, i);
+ // All good, see if this requires re-binding.
+ draw_graph.add_draw_list_bind_uniform_set(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
+
+ UniformSet *uniform_set = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
+ draw_graph.add_draw_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
+
dl->state.sets[i].bound = true;
}
}
@@ -4370,7 +3682,8 @@ void RenderingDevice::draw_list_draw(DrawListID p_list, bool p_use_indices, uint
ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
"Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
#endif
- driver->command_render_draw_indexed(dl->command_buffer, to_draw, p_instances, dl->validation.index_array_offset, 0, 0);
+
+ draw_graph.add_draw_list_draw_indexed(to_draw, p_instances, dl->validation.index_array_offset);
} else {
uint32_t to_draw;
@@ -4396,7 +3709,7 @@ void RenderingDevice::draw_list_draw(DrawListID p_list, bool p_use_indices, uint
"Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
#endif
- driver->command_render_draw(dl->command_buffer, to_draw, p_instances, 0, 0);
+ draw_graph.add_draw_list_draw(to_draw, p_instances);
}
}
@@ -4416,7 +3729,7 @@ void RenderingDevice::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p
return;
}
- driver->command_render_set_scissor(dl->command_buffer, rect);
+ _draw_list_set_scissor(rect);
}
void RenderingDevice::draw_list_disable_scissor(DrawListID p_list) {
@@ -4426,7 +3739,7 @@ void RenderingDevice::draw_list_disable_scissor(DrawListID p_list) {
ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
#endif
- driver->command_render_set_scissor(dl->command_buffer, dl->viewport);
+ _draw_list_set_scissor(dl->viewport);
}
uint32_t RenderingDevice::draw_list_get_current_pass() {
@@ -4443,230 +3756,80 @@ RenderingDevice::DrawListID RenderingDevice::draw_list_switch_to_next_pass() {
Rect2i viewport;
_draw_list_free(&viewport);
- driver->command_next_render_subpass(frames[frame].draw_command_buffer, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
+ draw_graph.add_draw_list_next_subpass(RDD::COMMAND_BUFFER_TYPE_PRIMARY);
- _draw_list_allocate(viewport, 0, draw_list_current_subpass);
+ _draw_list_allocate(viewport, draw_list_current_subpass);
return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
-Error RenderingDevice::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
- _THREAD_SAFE_METHOD_
- ERR_FAIL_COND_V(draw_list == nullptr, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, ERR_INVALID_PARAMETER);
- draw_list_current_subpass++;
-
- Rect2i viewport;
- _draw_list_free(&viewport);
-
- driver->command_next_render_subpass(frames[frame].draw_command_buffer, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
-
- _draw_list_allocate(viewport, p_splits, draw_list_current_subpass);
-
- for (uint32_t i = 0; i < p_splits; i++) {
- r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i;
- }
-
- return OK;
+#ifndef DISABLE_DEPRECATED
+Error RenderingDevice::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
+ ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
}
+#endif
-Error RenderingDevice::_draw_list_allocate(const Rect2i &p_viewport, uint32_t p_splits, uint32_t p_subpass) {
+Error RenderingDevice::_draw_list_allocate(const Rect2i &p_viewport, uint32_t p_subpass) {
// Lock while draw_list is active.
_THREAD_SAFE_LOCK_
- if (p_splits == 0) {
- draw_list = memnew(DrawList);
- draw_list->command_buffer = frames[frame].draw_command_buffer;
- draw_list->viewport = p_viewport;
- draw_list_count = 0;
- draw_list_split = false;
- } else {
- if (p_splits > (uint32_t)split_draw_list_allocators.size()) {
- uint32_t from = split_draw_list_allocators.size();
- split_draw_list_allocators.resize(p_splits);
- for (uint32_t i = from; i < p_splits; i++) {
- RDD::CommandPoolID cmd_pool = driver->command_pool_create(RDD::COMMAND_BUFFER_TYPE_SECONDARY);
- ERR_FAIL_COND_V(!cmd_pool, ERR_CANT_CREATE);
- split_draw_list_allocators.write[i].command_pool = cmd_pool;
-
- for (int j = 0; j < frame_count; j++) {
- RDD::CommandBufferID cmd_buffer = driver->command_buffer_create(RDD::COMMAND_BUFFER_TYPE_SECONDARY, cmd_pool);
- ERR_FAIL_COND_V(!cmd_buffer, ERR_CANT_CREATE);
- split_draw_list_allocators.write[i].command_buffers.push_back(cmd_buffer);
- }
- }
- }
- draw_list = memnew_arr(DrawList, p_splits);
- draw_list_count = p_splits;
- draw_list_split = true;
-
- for (uint32_t i = 0; i < p_splits; i++) {
- // Take a command buffer and initialize it.
- RDD::CommandBufferID cmd_buffer = split_draw_list_allocators[i].command_buffers[frame];
-
- bool ok = driver->command_buffer_begin_secondary(cmd_buffer, draw_list_render_pass, p_subpass, draw_list_vkframebuffer);
- if (!ok) {
- memdelete_arr(draw_list);
- draw_list = nullptr;
- ERR_FAIL_V(ERR_CANT_CREATE);
- }
-
- draw_list[i].command_buffer = cmd_buffer;
- draw_list[i].viewport = p_viewport;
- }
- }
+ draw_list = memnew(DrawList);
+ draw_list->viewport = p_viewport;
+ draw_list_count = 0;
return OK;
}
void RenderingDevice::_draw_list_free(Rect2i *r_last_viewport) {
- if (draw_list_split) {
- // Send all command buffers.
- RDD::CommandBufferID *command_buffers = (RDD::CommandBufferID *)alloca(sizeof(RDD::CommandBufferID) * draw_list_count);
- for (uint32_t i = 0; i < draw_list_count; i++) {
- driver->command_buffer_end(draw_list[i].command_buffer);
- command_buffers[i] = draw_list[i].command_buffer;
- if (r_last_viewport) {
- if (i == 0 || draw_list[i].viewport_set) {
- *r_last_viewport = draw_list[i].viewport;
- }
- }
- }
-
- driver->command_buffer_execute_secondary(frames[frame].draw_command_buffer, VectorView(command_buffers, draw_list_count));
- memdelete_arr(draw_list);
- draw_list = nullptr;
-
- } else {
- if (r_last_viewport) {
- *r_last_viewport = draw_list->viewport;
- }
- // Just end the list.
- memdelete(draw_list);
- draw_list = nullptr;
+ if (r_last_viewport) {
+ *r_last_viewport = draw_list->viewport;
}
+ // Just end the list.
+ memdelete(draw_list);
+ draw_list = nullptr;
// Draw_list is no longer active.
_THREAD_SAFE_UNLOCK_
}
-void RenderingDevice::draw_list_end(BitField<BarrierMask> p_post_barrier) {
+void RenderingDevice::draw_list_end() {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
- _draw_list_free();
+ draw_graph.add_draw_list_end();
- driver->command_end_render_pass(frames[frame].draw_command_buffer);
+ _draw_list_free();
for (int i = 0; i < draw_list_bound_textures.size(); i++) {
Texture *texture = texture_owner.get_or_null(draw_list_bound_textures[i]);
ERR_CONTINUE(!texture); // Wtf.
- if (draw_list_unbind_color_textures && (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
+ if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
texture->bound = false;
}
- if (draw_list_unbind_depth_textures && (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
texture->bound = false;
}
}
- draw_list_bound_textures.clear();
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- BitField<RDD::PipelineStageBits> dst_stages;
- BitField<RDD::BarrierAccessBits> dst_access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT).set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT); // RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_INDEX_READ_BIT).set_flag(RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT); // RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT); // RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT); // RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_TRANSFER_READ_BIT);
- }
-
- if (dst_stages.is_empty()) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
- }
-
- RDD::TextureBarrier *texture_barriers = nullptr;
-
- uint32_t texture_barrier_count = draw_list_storage_textures.size();
-
- if (texture_barrier_count) {
- texture_barriers = (RDD::TextureBarrier *)alloca(sizeof(RDD::TextureBarrier) * draw_list_storage_textures.size());
- }
-
- BitField<RDD::PipelineStageBits> src_stage(RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
- RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
- BitField<RDD::BarrierAccessBits> src_access(
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
-
- if (texture_barrier_count) {
- src_stage.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
-
- for (uint32_t i = 0; i < texture_barrier_count; i++) {
- Texture *texture = texture_owner.get_or_null(draw_list_storage_textures[i]);
-
- RDD::TextureBarrier &tb = texture_barriers[i];
- tb.texture = texture->driver_id;
- tb.src_access = src_access;
- tb.dst_access = dst_access;
- tb.prev_layout = texture->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- tb.subresources.aspect = texture->read_aspect_flags;
- tb.subresources.base_mipmap = texture->base_mipmap;
- tb.subresources.mipmap_count = texture->mipmaps;
- tb.subresources.base_layer = texture->base_layer;
- tb.subresources.layer_count = texture->layers;
-
- texture->layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- }
-
- // To ensure proper synchronization, we must make sure rendering is done before:
- // * Some buffer is copied.
- // * Another render pass happens (since we may be done).
-
- RDD::MemoryBarrier mb;
- mb.src_access = src_access;
- mb.dst_access = dst_access;
-
- if (texture_barrier_count > 0 || p_post_barrier != BARRIER_MASK_NO_BARRIER) {
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, src_stage, dst_stages, mb, {}, VectorView(texture_barriers, texture_barrier_count));
- }
- }
- draw_list_storage_textures.clear();
-
-#ifdef FORCE_FULL_BARRIER
- _full_barrier(true);
-#endif
+ draw_list_bound_textures.clear();
}
/***********************/
/**** COMPUTE LISTS ****/
/***********************/
-RenderingDevice::ComputeListID RenderingDevice::compute_list_begin(bool p_allow_draw_overlap) {
+RenderingDevice::ComputeListID RenderingDevice::compute_list_begin() {
_THREAD_SAFE_METHOD_
- ERR_FAIL_COND_V_MSG(!p_allow_draw_overlap && draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
// Lock while compute_list is active.
_THREAD_SAFE_LOCK_
compute_list = memnew(ComputeList);
- compute_list->command_buffer = frames[frame].draw_command_buffer;
- compute_list->state.allow_draw_overlap = p_allow_draw_overlap;
+
+ draw_graph.add_compute_list_begin();
return ID_TYPE_COMPUTE_LIST;
}
@@ -4688,7 +3851,7 @@ void RenderingDevice::compute_list_bind_compute_pipeline(ComputeListID p_list, R
cl->state.pipeline = p_compute_pipeline;
- driver->command_bind_compute_pipeline(cl->command_buffer, pipeline->driver_id);
+ draw_graph.add_compute_list_bind_pipeline(pipeline->driver_id);
if (cl->state.pipeline_shader != pipeline->shader) {
// Shader changed, so descriptor sets may become incompatible.
@@ -4779,109 +3942,6 @@ void RenderingDevice::compute_list_bind_uniform_set(ComputeListID p_list, RID p_
cl->state.sets[p_index].uniform_set_format = uniform_set->format;
cl->state.sets[p_index].uniform_set = p_uniform_set;
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- uint32_t textures_to_sampled_count = uniform_set->mutable_sampled_textures.size();
- uint32_t textures_to_storage_count = uniform_set->mutable_storage_textures.size();
-
- Texture **textures_to_sampled = uniform_set->mutable_sampled_textures.ptrw();
-
- RDD::TextureBarrier *texture_barriers = nullptr;
-
- if (textures_to_sampled_count + textures_to_storage_count) {
- texture_barriers = (RDD::TextureBarrier *)alloca(sizeof(RDD::TextureBarrier) * (textures_to_sampled_count + textures_to_storage_count));
- }
- uint32_t texture_barrier_count = 0;
-
- BitField<RDD::PipelineStageBits> src_stages;
-
- for (uint32_t i = 0; i < textures_to_sampled_count; i++) {
- if (textures_to_sampled[i]->layout != RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
-
- RDD::TextureBarrier &tb = texture_barriers[texture_barrier_count++];
- tb.texture = textures_to_sampled[i]->driver_id;
- tb.src_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- tb.dst_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- tb.prev_layout = textures_to_sampled[i]->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- tb.subresources.aspect = textures_to_sampled[i]->read_aspect_flags;
- tb.subresources.base_mipmap = textures_to_sampled[i]->base_mipmap;
- tb.subresources.mipmap_count = textures_to_sampled[i]->mipmaps;
- tb.subresources.base_layer = textures_to_sampled[i]->base_layer;
- tb.subresources.layer_count = textures_to_sampled[i]->layers;
-
- textures_to_sampled[i]->layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
-
- cl->state.textures_to_sampled_layout.erase(textures_to_sampled[i]);
- }
-
- if (textures_to_sampled[i]->used_in_frame != frames_drawn) {
- textures_to_sampled[i]->used_in_frame = frames_drawn;
- textures_to_sampled[i]->used_in_transfer = false;
- textures_to_sampled[i]->used_in_raster = false;
- }
- textures_to_sampled[i]->used_in_compute = true;
- }
-
- Texture **textures_to_storage = uniform_set->mutable_storage_textures.ptrw();
-
- for (uint32_t i = 0; i < textures_to_storage_count; i++) {
- if (textures_to_storage[i]->layout != RDD::TEXTURE_LAYOUT_GENERAL) {
- BitField<RDD::BarrierAccessBits> src_access;
-
- if (textures_to_storage[i]->used_in_frame == frames_drawn) {
- if (textures_to_storage[i]->used_in_compute) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (textures_to_storage[i]->used_in_raster) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (textures_to_storage[i]->used_in_transfer) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_TRANSFER_READ_BIT);
- }
-
- textures_to_storage[i]->used_in_compute = false;
- textures_to_storage[i]->used_in_raster = false;
- textures_to_storage[i]->used_in_transfer = false;
-
- } else {
- src_access.clear();
- textures_to_storage[i]->used_in_compute = false;
- textures_to_storage[i]->used_in_raster = false;
- textures_to_storage[i]->used_in_transfer = false;
- textures_to_storage[i]->used_in_frame = frames_drawn;
- }
-
- RDD::TextureBarrier &tb = texture_barriers[texture_barrier_count++];
- tb.texture = textures_to_storage[i]->driver_id;
- tb.src_access = src_access;
- tb.dst_access = (RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- tb.prev_layout = textures_to_storage[i]->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_GENERAL;
- tb.subresources.aspect = textures_to_storage[i]->read_aspect_flags;
- tb.subresources.base_mipmap = textures_to_storage[i]->base_mipmap;
- tb.subresources.mipmap_count = textures_to_storage[i]->mipmaps;
- tb.subresources.base_layer = textures_to_storage[i]->base_layer;
- tb.subresources.layer_count = textures_to_storage[i]->layers;
-
- textures_to_storage[i]->layout = RDD::TEXTURE_LAYOUT_GENERAL;
-
- cl->state.textures_to_sampled_layout.insert(textures_to_storage[i]); // Needs to go back to sampled layout afterwards.
- }
- }
-
- if (texture_barrier_count) {
- if (src_stages.is_empty()) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT);
- }
-
- driver->command_pipeline_barrier(cl->command_buffer, src_stages, RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, {}, {}, VectorView(texture_barriers, texture_barrier_count));
- }
- }
-
#if 0
{ // Validate that textures bound are not attached as framebuffer bindings.
uint32_t attachable_count = uniform_set->attachable_textures.size();
@@ -4901,6 +3961,7 @@ void RenderingDevice::compute_list_bind_uniform_set(ComputeListID p_list, RID p_
void RenderingDevice::compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size) {
ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
ERR_FAIL_NULL(compute_list);
+ ERR_FAIL_COND_MSG(p_data_size > MAX_PUSH_CONSTANT_SIZE, "Push constants can't be bigger than 128 bytes to maintain compatibility.");
ComputeList *cl = compute_list;
@@ -4912,7 +3973,13 @@ void RenderingDevice::compute_list_set_push_constant(ComputeListID p_list, const
ERR_FAIL_COND_MSG(p_data_size != cl->validation.pipeline_push_constant_size,
"This compute pipeline requires (" + itos(cl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
#endif
- driver->command_bind_push_constants(cl->command_buffer, cl->state.pipeline_shader_driver_id, 0, VectorView((const uint32_t *)p_data, p_data_size / sizeof(uint32_t)));
+
+ draw_graph.add_compute_list_set_push_constant(cl->state.pipeline_shader_driver_id, p_data, p_data_size);
+
+ // Store it in the state in case we need to restart the compute list.
+ memcpy(cl->state.push_constant_data, p_data, p_data_size);
+ cl->state.push_constant_size = p_data_size;
+
#ifdef DEBUG_ENABLED
cl->validation.pipeline_push_constant_supplied = true;
#endif
@@ -4970,19 +4037,24 @@ void RenderingDevice::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_g
}
}
#endif
- driver->command_uniform_set_prepare_for_use(cl->command_buffer, cl->state.sets[i].uniform_set_driver_id, cl->state.pipeline_shader_driver_id, i);
+ draw_graph.add_compute_list_uniform_set_prepare_for_use(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
}
for (uint32_t i = 0; i < cl->state.set_count; i++) {
if (cl->state.sets[i].pipeline_expected_format == 0) {
continue; // Nothing expected by this pipeline.
}
if (!cl->state.sets[i].bound) {
- driver->command_bind_compute_uniform_set(cl->command_buffer, cl->state.sets[i].uniform_set_driver_id, cl->state.pipeline_shader_driver_id, i);
+ // All good, see if this requires re-binding.
+ draw_graph.add_compute_list_bind_uniform_set(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
+
+ UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
+ draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
+
cl->state.sets[i].bound = true;
}
}
- driver->command_compute_dispatch(cl->command_buffer, p_x_groups, p_y_groups, p_z_groups);
+ draw_graph.add_compute_list_dispatch(p_x_groups, p_y_groups, p_z_groups);
}
void RenderingDevice::compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads) {
@@ -5058,106 +4130,56 @@ void RenderingDevice::compute_list_dispatch_indirect(ComputeListID p_list, RID p
}
}
#endif
- driver->command_uniform_set_prepare_for_use(cl->command_buffer, cl->state.sets[i].uniform_set_driver_id, cl->state.pipeline_shader_driver_id, i);
+ draw_graph.add_compute_list_uniform_set_prepare_for_use(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
}
for (uint32_t i = 0; i < cl->state.set_count; i++) {
if (cl->state.sets[i].pipeline_expected_format == 0) {
continue; // Nothing expected by this pipeline.
}
if (!cl->state.sets[i].bound) {
- driver->command_bind_compute_uniform_set(cl->command_buffer, cl->state.sets[i].uniform_set_driver_id, cl->state.pipeline_shader_driver_id, i);
+ // All good, see if this requires re-binding.
+ draw_graph.add_compute_list_bind_uniform_set(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
+
+ UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
+ draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
+
cl->state.sets[i].bound = true;
}
}
- driver->command_compute_dispatch_indirect(cl->command_buffer, buffer->driver_id, p_offset);
+ draw_graph.add_compute_list_dispatch_indirect(buffer->driver_id, p_offset);
+
+ if (buffer->draw_tracker != nullptr) {
+ draw_graph.add_compute_list_usage(buffer->draw_tracker, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ);
+ }
}
void RenderingDevice::compute_list_add_barrier(ComputeListID p_list) {
// Must be called within a compute list, the class mutex is locked during that time
- BitField<RDD::PipelineStageBits> stages(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- BitField<RDD::BarrierAccessBits> access(RDD::BARRIER_ACCESS_SHADER_READ_BIT);
- _compute_list_add_barrier(BARRIER_MASK_COMPUTE, stages, access);
-}
-
-void RenderingDevice::_compute_list_add_barrier(BitField<BarrierMask> p_post_barrier, BitField<RDD::PipelineStageBits> p_stages, BitField<RDD::BarrierAccessBits> p_access) {
- ERR_FAIL_NULL(compute_list);
-
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::TextureBarrier *texture_barriers = nullptr;
-
- uint32_t texture_barrier_count = compute_list->state.textures_to_sampled_layout.size();
-
- if (texture_barrier_count) {
- texture_barriers = (RDD::TextureBarrier *)alloca(sizeof(RDD::TextureBarrier) * texture_barrier_count);
- }
-
- texture_barrier_count = 0; // We'll count how many we end up issuing.
-
- for (Texture *E : compute_list->state.textures_to_sampled_layout) {
- if (E->layout != RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
- RDD::TextureBarrier &tb = texture_barriers[texture_barrier_count++];
- tb.texture = E->driver_id;
- tb.src_access = RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT;
- tb.dst_access = p_access;
- tb.prev_layout = E->layout;
- tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- tb.subresources.aspect = E->read_aspect_flags;
- tb.subresources.base_mipmap = E->base_mipmap;
- tb.subresources.mipmap_count = E->mipmaps;
- tb.subresources.base_layer = E->base_layer;
- tb.subresources.layer_count = E->layers;
-
- E->layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- }
-
- if (E->used_in_frame != frames_drawn) {
- E->used_in_transfer = false;
- E->used_in_raster = false;
- E->used_in_compute = false;
- E->used_in_frame = frames_drawn;
- }
- }
+ compute_list_barrier_state = compute_list->state;
+ compute_list_end();
+ compute_list_begin();
- if (p_stages) {
- RDD::MemoryBarrier mb;
- mb.src_access = RDD::BARRIER_ACCESS_SHADER_WRITE_BIT;
- mb.dst_access = p_access;
- driver->command_pipeline_barrier(compute_list->command_buffer, RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, p_stages, mb, {}, VectorView(texture_barriers, texture_barrier_count));
+ if (compute_list_barrier_state.pipeline.is_valid()) {
+ compute_list_bind_compute_pipeline(p_list, compute_list_barrier_state.pipeline);
+ }
- } else if (texture_barrier_count) {
- driver->command_pipeline_barrier(compute_list->command_buffer, RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, {}, {}, VectorView(texture_barriers, texture_barrier_count));
+ for (uint32_t i = 0; i < compute_list_barrier_state.set_count; i++) {
+ if (compute_list_barrier_state.sets[i].uniform_set.is_valid()) {
+ compute_list_bind_uniform_set(p_list, compute_list_barrier_state.sets[i].uniform_set, i);
}
}
-#ifdef FORCE_FULL_BARRIER
- _full_barrier(true);
-#endif
+ if (compute_list_barrier_state.push_constant_size > 0) {
+ compute_list_set_push_constant(p_list, compute_list_barrier_state.push_constant_data, compute_list_barrier_state.push_constant_size);
+ }
}
-void RenderingDevice::compute_list_end(BitField<BarrierMask> p_post_barrier) {
+void RenderingDevice::compute_list_end() {
ERR_FAIL_NULL(compute_list);
- BitField<RDD::PipelineStageBits> stages;
- BitField<RDD::BarrierAccessBits> access;
- if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
- stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_VERTEX)) {
- stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT).set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_INDEX_READ_BIT).set_flag(RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT).set_flag(RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_FRAGMENT)) {
- stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT);
- }
- if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
- stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_TRANSFER_READ_BIT);
- }
- _compute_list_add_barrier(p_post_barrier, stages, access);
+ draw_graph.add_compute_list_end();
memdelete(compute_list);
compute_list = nullptr;
@@ -5166,66 +4188,168 @@ void RenderingDevice::compute_list_end(BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_UNLOCK_
}
+#ifndef DISABLE_DEPRECATED
void RenderingDevice::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
- if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- return;
- }
+ WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
+}
- BitField<RDD::PipelineStageBits> src_stages;
- BitField<RDD::BarrierAccessBits> src_access;
+void RenderingDevice::full_barrier() {
+ WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
+}
+#endif
- if (p_from == 0) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+/***********************/
+/**** COMMAND GRAPH ****/
+/***********************/
+
+bool RenderingDevice::_texture_make_mutable(Texture *p_texture, RID p_texture_id) {
+ if (p_texture->draw_tracker != nullptr) {
+ // Texture already has a tracker.
+ return false;
} else {
- if (p_from.has_flag(BARRIER_MASK_COMPUTE)) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_from.has_flag(BARRIER_MASK_FRAGMENT)) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT).set_flag(RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT).set_flag(RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
+ if (p_texture->owner.is_valid()) {
+ // Texture has an owner.
+ Texture *owner_texture = texture_owner.get_or_null(p_texture->owner);
+ ERR_FAIL_COND_V(!owner_texture, false);
+
+ if (owner_texture->draw_tracker != nullptr) {
+ // Create a tracker for this dependency in particular.
+ if (p_texture->slice_type == TEXTURE_SLICE_MAX) {
+ // Shared texture.
+ p_texture->draw_tracker = owner_texture->draw_tracker;
+ p_texture->draw_tracker->reference_count++;
+ } else {
+ // Slice texture.
+ HashMap<Rect2i, RDG::ResourceTracker *>::ConstIterator draw_tracker_iterator = owner_texture->slice_trackers.find(p_texture->slice_rect);
+ RDG::ResourceTracker *draw_tracker = nullptr;
+ if (draw_tracker_iterator != owner_texture->slice_trackers.end()) {
+ // Reuse the tracker at the matching rectangle.
+ draw_tracker = draw_tracker_iterator->value;
+ } else {
+ // Create a new tracker and store it on the map.
+ draw_tracker = RDG::resource_tracker_create();
+ draw_tracker->parent = owner_texture->draw_tracker;
+ draw_tracker->texture_driver_id = p_texture->driver_id;
+ draw_tracker->texture_subresources = p_texture->barrier_range();
+ draw_tracker->texture_slice_or_dirty_rect = p_texture->slice_rect;
+ owner_texture->slice_trackers[p_texture->slice_rect] = draw_tracker;
+ }
+
+ p_texture->slice_trackers.clear();
+ p_texture->draw_tracker = draw_tracker;
+ p_texture->draw_tracker->reference_count++;
+ }
+
+ if (p_texture_id.is_valid()) {
+ _dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
+ }
+ } else {
+ // Delegate this to the owner instead, as it'll make all its dependencies mutable.
+ _texture_make_mutable(owner_texture, p_texture->owner);
+ }
+ } else {
+ // Regular texture.
+ p_texture->draw_tracker = RDG::resource_tracker_create();
+ p_texture->draw_tracker->texture_driver_id = p_texture->driver_id;
+ p_texture->draw_tracker->texture_subresources = p_texture->barrier_range();
+ p_texture->draw_tracker->reference_count = 1;
+
+ if (p_texture_id.is_valid()) {
+ if (p_texture->has_initial_data) {
+ // If the texture was initialized with initial data but wasn't made mutable from the start, assume the texture sampling usage.
+ p_texture->draw_tracker->usage = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
+ }
+
+ _dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
+ }
}
- if (p_from.has_flag(BARRIER_MASK_TRANSFER)) {
- src_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- src_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT);
+
+ return true;
+ }
+}
+
+bool RenderingDevice::_buffer_make_mutable(Buffer *p_buffer, RID p_buffer_id) {
+ if (p_buffer->draw_tracker != nullptr) {
+ // Buffer already has a tracker.
+ return false;
+ } else {
+ // Create a tracker for the buffer and make all its dependencies mutable.
+ p_buffer->draw_tracker = RDG::resource_tracker_create();
+ p_buffer->draw_tracker->buffer_driver_id = p_buffer->driver_id;
+ if (p_buffer_id.is_valid()) {
+ _dependencies_make_mutable(p_buffer_id, p_buffer->draw_tracker);
}
+
+ return true;
}
+}
- BitField<RDD::PipelineStageBits> dst_stages;
- BitField<RDD::BarrierAccessBits> dst_access;
+bool RenderingDevice::_vertex_array_make_mutable(VertexArray *p_vertex_array, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
+ if (!p_vertex_array->untracked_buffers.has(p_resource_id)) {
+ // Vertex array thinks the buffer is already tracked or does not use it.
+ return false;
+ } else {
+ // Vertex array is aware of the buffer but it isn't being tracked.
+ p_vertex_array->draw_trackers.push_back(p_resource_tracker);
+ p_vertex_array->untracked_buffers.erase(p_resource_id);
+ return true;
+ }
+}
- if (p_to == 0) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
+bool RenderingDevice::_index_array_make_mutable(IndexArray *p_index_array, RDG::ResourceTracker *p_resource_tracker) {
+ if (p_index_array->draw_tracker != nullptr) {
+ // Index array already has a tracker.
+ return false;
} else {
- if (p_to.has_flag(BARRIER_MASK_COMPUTE)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
- }
- if (p_to.has_flag(BARRIER_MASK_VERTEX)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT).set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_INDEX_READ_BIT).set_flag(RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT).set_flag(RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT);
- }
- if (p_to.has_flag(BARRIER_MASK_FRAGMENT)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT).set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_SHADER_READ_BIT).set_flag(RDD::BARRIER_ACCESS_SHADER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT);
- }
- if (p_to.has_flag(BARRIER_MASK_TRANSFER)) {
- dst_stages.set_flag(RDD::PIPELINE_STAGE_TRANSFER_BIT);
- dst_access.set_flag(RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT).set_flag(RDD::BARRIER_ACCESS_TRANSFER_READ_BIT);
- }
+ // Index array should assign the tracker from the buffer.
+ p_index_array->draw_tracker = p_resource_tracker;
+ return true;
}
+}
- RDD::MemoryBarrier mb;
- mb.src_access = src_access;
- mb.dst_access = dst_access;
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, src_stages, dst_stages, mb, {}, {});
+bool RenderingDevice::_uniform_set_make_mutable(UniformSet *p_uniform_set, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
+ HashMap<RID, RDG::ResourceUsage>::Iterator E = p_uniform_set->untracked_usage.find(p_resource_id);
+ if (!E) {
+ // Uniform set thinks the resource is already tracked or does not use it.
+ return false;
+ } else {
+ // Uniform set has seen the resource but hasn't added its tracker yet.
+ p_uniform_set->draw_trackers.push_back(p_resource_tracker);
+ p_uniform_set->draw_trackers_usage.push_back(E->value);
+ p_uniform_set->untracked_usage.remove(E);
+ return true;
+ }
}
-void RenderingDevice::full_barrier() {
-#ifndef DEBUG_ENABLED
- ERR_PRINT("Full barrier is debug-only, should not be used in production");
-#endif
- _full_barrier(true);
+bool RenderingDevice::_dependency_make_mutable(RID p_id, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
+ if (texture_owner.owns(p_id)) {
+ Texture *texture = texture_owner.get_or_null(p_id);
+ return _texture_make_mutable(texture, p_id);
+ } else if (vertex_array_owner.owns(p_id)) {
+ VertexArray *vertex_array = vertex_array_owner.get_or_null(p_id);
+ return _vertex_array_make_mutable(vertex_array, p_resource_id, p_resource_tracker);
+ } else if (index_array_owner.owns(p_id)) {
+ IndexArray *index_array = index_array_owner.get_or_null(p_id);
+ return _index_array_make_mutable(index_array, p_resource_tracker);
+ } else if (uniform_set_owner.owns(p_id)) {
+ UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
+ return _uniform_set_make_mutable(uniform_set, p_resource_id, p_resource_tracker);
+ } else {
+ DEV_ASSERT(false && "Unknown resource type to make mutable.");
+ return false;
+ }
+}
+
+bool RenderingDevice::_dependencies_make_mutable(RID p_id, RDG::ResourceTracker *p_resource_tracker) {
+ bool made_mutable = false;
+ HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
+ if (E) {
+ for (RID rid : E->value) {
+ made_mutable = _dependency_make_mutable(rid, p_id, p_resource_tracker) || made_mutable;
+ }
+ }
+
+ return made_mutable;
}
/**************************/
@@ -5251,6 +4375,22 @@ void RenderingDevice::_free_internal(RID p_id) {
// Push everything so it's disposed of next time this frame index is processed (means, it's safe to do it).
if (texture_owner.owns(p_id)) {
Texture *texture = texture_owner.get_or_null(p_id);
+ RDG::ResourceTracker *draw_tracker = texture->draw_tracker;
+ if (draw_tracker != nullptr) {
+ draw_tracker->reference_count--;
+ if (draw_tracker->reference_count == 0) {
+ RDG::resource_tracker_free(draw_tracker);
+
+ if (texture->owner.is_valid() && (texture->slice_type != TEXTURE_SLICE_MAX)) {
+ // If this was a texture slice, erase the tracker from the map.
+ Texture *owner_texture = texture_owner.get_or_null(texture->owner);
+ if (owner_texture != nullptr) {
+ owner_texture->slice_trackers.erase(texture->slice_rect);
+ }
+ }
+ }
+ }
+
frames[frame].textures_to_dispose_of.push_back(*texture);
texture_owner.free(p_id);
} else if (framebuffer_owner.owns(p_id)) {
@@ -5268,12 +4408,14 @@ void RenderingDevice::_free_internal(RID p_id) {
sampler_owner.free(p_id);
} else if (vertex_buffer_owner.owns(p_id)) {
Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
+ RDG::resource_tracker_free(vertex_buffer->draw_tracker);
frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
vertex_buffer_owner.free(p_id);
} else if (vertex_array_owner.owns(p_id)) {
vertex_array_owner.free(p_id);
} else if (index_buffer_owner.owns(p_id)) {
IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
+ RDG::resource_tracker_free(index_buffer->draw_tracker);
frames[frame].buffers_to_dispose_of.push_back(*index_buffer);
index_buffer_owner.free(p_id);
} else if (index_array_owner.owns(p_id)) {
@@ -5286,14 +4428,17 @@ void RenderingDevice::_free_internal(RID p_id) {
shader_owner.free(p_id);
} else if (uniform_buffer_owner.owns(p_id)) {
Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
+ RDG::resource_tracker_free(uniform_buffer->draw_tracker);
frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
uniform_buffer_owner.free(p_id);
} else if (texture_buffer_owner.owns(p_id)) {
Buffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
+ RDG::resource_tracker_free(texture_buffer->draw_tracker);
frames[frame].buffers_to_dispose_of.push_back(*texture_buffer);
texture_buffer_owner.free(p_id);
} else if (storage_buffer_owner.owns(p_id)) {
Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
+ RDG::resource_tracker_free(storage_buffer->draw_tracker);
frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
storage_buffer_owner.free(p_id);
} else if (uniform_set_owner.owns(p_id)) {
@@ -5370,18 +4515,21 @@ void RenderingDevice::set_resource_name(RID p_id, const String &p_name) {
}
void RenderingDevice::draw_command_begin_label(String p_label_name, const Color &p_color) {
- _THREAD_SAFE_METHOD_
- context->command_begin_label(frames[frame].draw_command_buffer, p_label_name, p_color);
+ if (!context->is_debug_utils_enabled()) {
+ return;
+ }
+
+ draw_graph.begin_label(p_label_name, p_color);
}
+#ifndef DISABLE_DEPRECATED
void RenderingDevice::draw_command_insert_label(String p_label_name, const Color &p_color) {
- _THREAD_SAFE_METHOD_
- context->command_insert_label(frames[frame].draw_command_buffer, p_label_name, p_color);
+ WARN_PRINT("Deprecated. Inserting labels no longer applies due to command reordering.");
}
+#endif
void RenderingDevice::draw_command_end_label() {
- _THREAD_SAFE_METHOD_
- context->command_end_label(frames[frame].draw_command_buffer);
+ draw_graph.end_label();
}
String RenderingDevice::get_device_vendor_name() const {
@@ -5404,7 +4552,7 @@ String RenderingDevice::get_device_pipeline_cache_uuid() const {
return context->get_device_pipeline_cache_uuid();
}
-void RenderingDevice::_finalize_command_bufers() {
+void RenderingDevice::_finalize_command_buffers(bool p_postpare) {
if (draw_list) {
ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
}
@@ -5413,7 +4561,13 @@ void RenderingDevice::_finalize_command_bufers() {
ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work).");
}
- {
+ { // Complete the setup buffer (that needs to be processed before anything else).
+ draw_graph.end(frames[frame].draw_command_buffer, RENDER_GRAPH_REORDER, RENDER_GRAPH_FULL_BARRIERS);
+
+ if (p_postpare) {
+ context->postpare_buffers(frames[frame].draw_command_buffer);
+ }
+
driver->end_segment();
driver->command_buffer_end(frames[frame].setup_command_buffer);
driver->command_buffer_end(frames[frame].draw_command_buffer);
@@ -5421,6 +4575,8 @@ void RenderingDevice::_finalize_command_bufers() {
}
void RenderingDevice::_begin_frame() {
+ draw_graph.begin();
+
// Erase pending resources.
_free_pending_resources(frame);
@@ -5464,8 +4620,7 @@ void RenderingDevice::swap_buffers() {
ERR_FAIL_COND_MSG(local_device.is_valid(), "Local devices can't swap buffers.");
_THREAD_SAFE_METHOD_
- context->postpare_buffers(frames[frame].draw_command_buffer);
- _finalize_command_bufers();
+ _finalize_command_buffers(true);
// Swap buffers.
if (!screen_prepared) {
@@ -5486,7 +4641,7 @@ void RenderingDevice::submit() {
ERR_FAIL_COND_MSG(local_device.is_null(), "Only local devices can submit and sync.");
ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
- _finalize_command_bufers();
+ _finalize_command_buffers(false);
RDD::CommandBufferID command_buffers[2] = { frames[frame].setup_command_buffer, frames[frame].draw_command_buffer };
context->local_device_push_command_buffers(local_device, command_buffers, 2);
@@ -5618,11 +4773,14 @@ void RenderingDevice::_flush(bool p_current_frame) {
if (local_device.is_valid() && !p_current_frame) {
return; // Flushing previous frames has no effect with local device.
}
+
// Not doing this crashes RADV (undefined behavior).
if (p_current_frame) {
+ draw_graph.end(frames[frame].draw_command_buffer, RENDER_GRAPH_REORDER, RENDER_GRAPH_FULL_BARRIERS);
driver->end_segment();
driver->command_buffer_end(frames[frame].setup_command_buffer);
driver->command_buffer_end(frames[frame].draw_command_buffer);
+ draw_graph.begin();
}
if (local_device.is_valid()) {
@@ -5743,7 +4901,6 @@ void RenderingDevice::initialize(ApiContextRD *p_context, bool p_local_device) {
draw_list = nullptr;
draw_list_count = 0;
- draw_list_split = false;
compute_list = nullptr;
@@ -5760,6 +4917,8 @@ void RenderingDevice::initialize(ApiContextRD *p_context, bool p_local_device) {
pipelines_cache_size = driver->pipeline_cache_query_size();
print_verbose(vformat("Startup PSO cache (%.1f MiB)", pipelines_cache_size / (1024.0f * 1024.0f)));
}
+
+ draw_graph.initialize(driver, frame_count, SECONDARY_COMMAND_BUFFERS_PER_FRAME);
}
Vector<uint8_t> RenderingDevice::_load_pipeline_cache() {
@@ -5857,46 +5016,11 @@ void RenderingDevice::_free_rids(T &p_owner, const char *p_type) {
void RenderingDevice::capture_timestamp(const String &p_name) {
ERR_FAIL_COND_MSG(draw_list != nullptr, "Capturing timestamps during draw list creation is not allowed. Offending timestamp was: " + p_name);
+ ERR_FAIL_COND_MSG(compute_list != nullptr, "Capturing timestamps during compute list creation is not allowed. Offending timestamp was: " + p_name);
ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements);
- // This should be optional for profiling, else it will slow things down.
- if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
- RDD::MemoryBarrier mb;
- mb.src_access = (RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT |
- RDD::BARRIER_ACCESS_INDEX_READ_BIT |
- RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- RDD::BARRIER_ACCESS_UNIFORM_READ_BIT |
- RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_WRITE_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_READ_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT |
- RDD::BARRIER_ACCESS_HOST_READ_BIT |
- RDD::BARRIER_ACCESS_HOST_WRITE_BIT);
- mb.dst_access = (RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT |
- RDD::BARRIER_ACCESS_INDEX_READ_BIT |
- RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- RDD::BARRIER_ACCESS_UNIFORM_READ_BIT |
- RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_READ_BIT |
- RDD::BARRIER_ACCESS_SHADER_WRITE_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_READ_BIT |
- RDD::BARRIER_ACCESS_TRANSFER_WRITE_BIT |
- RDD::BARRIER_ACCESS_HOST_READ_BIT |
- RDD::BARRIER_ACCESS_HOST_WRITE_BIT);
-
- driver->command_pipeline_barrier(frames[frame].draw_command_buffer, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, mb, {}, {});
- }
-
- driver->command_timestamp_write(frames[frame].draw_command_buffer, frames[frame].timestamp_pool, frames[frame].timestamp_count);
+ draw_graph.add_capture_timestamp(frames[frame].timestamp_pool, frames[frame].timestamp_count);
+
frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
frames[frame].timestamp_count++;
@@ -6064,10 +5188,6 @@ void RenderingDevice::finalize() {
driver->pipeline_cache_free();
}
- for (int i = 0; i < split_draw_list_allocators.size(); i++) {
- driver->command_pool_free(split_draw_list_allocators[i].command_pool);
- }
-
frames.clear();
for (int i = 0; i < staging_buffer_blocks.size(); i++) {
@@ -6106,7 +5226,7 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("texture_create_shared_from_slice", "view", "with_texture", "layer", "mipmap", "mipmaps", "slice_type"), &RenderingDevice::_texture_create_shared_from_slice, DEFVAL(1), DEFVAL(TEXTURE_SLICE_2D));
ClassDB::bind_method(D_METHOD("texture_create_from_extension", "type", "format", "samples", "usage_flags", "image", "width", "height", "depth", "layers"), &RenderingDevice::texture_create_from_extension);
- ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data", "post_barrier"), &RenderingDevice::texture_update, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
+ ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data"), &RenderingDevice::texture_update);
ClassDB::bind_method(D_METHOD("texture_get_data", "texture", "layer"), &RenderingDevice::texture_get_data);
ClassDB::bind_method(D_METHOD("texture_is_format_supported_for_usage", "format", "usage_flags"), &RenderingDevice::texture_is_format_supported_for_usage);
@@ -6114,9 +5234,9 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("texture_is_shared", "texture"), &RenderingDevice::texture_is_shared);
ClassDB::bind_method(D_METHOD("texture_is_valid", "texture"), &RenderingDevice::texture_is_valid);
- ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer", "post_barrier"), &RenderingDevice::texture_copy, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
- ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count", "post_barrier"), &RenderingDevice::texture_clear, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
- ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture", "post_barrier"), &RenderingDevice::texture_resolve_multisample, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
+ ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer"), &RenderingDevice::texture_copy);
+ ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count"), &RenderingDevice::texture_clear);
+ ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture"), &RenderingDevice::texture_resolve_multisample);
ClassDB::bind_method(D_METHOD("texture_get_format", "texture"), &RenderingDevice::_texture_get_format);
#ifndef DISABLE_DEPRECATED
@@ -6158,8 +5278,9 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("uniform_set_create", "uniforms", "shader", "shader_set"), &RenderingDevice::_uniform_set_create);
ClassDB::bind_method(D_METHOD("uniform_set_is_valid", "uniform_set"), &RenderingDevice::uniform_set_is_valid);
- ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data", "post_barrier"), &RenderingDevice::_buffer_update_bind, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
- ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes", "post_barrier"), &RenderingDevice::buffer_clear, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
+ ClassDB::bind_method(D_METHOD("buffer_copy", "src_buffer", "dst_buffer", "src_offset", "dst_offset", "size"), &RenderingDevice::buffer_copy);
+ ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data"), &RenderingDevice::_buffer_update_bind);
+ ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes"), &RenderingDevice::buffer_clear);
ClassDB::bind_method(D_METHOD("buffer_get_data", "buffer", "offset_bytes", "size_bytes"), &RenderingDevice::buffer_get_data, DEFVAL(0), DEFVAL(0));
ClassDB::bind_method(D_METHOD("render_pipeline_create", "shader", "framebuffer_format", "vertex_format", "primitive", "rasterization_state", "multisample_state", "stencil_state", "color_blend_state", "dynamic_state_flags", "for_render_pass", "specialization_constants"), &RenderingDevice::_render_pipeline_create, DEFVAL(0), DEFVAL(0), DEFVAL(TypedArray<RDPipelineSpecializationConstant>()));
@@ -6174,8 +5295,10 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("draw_list_begin_for_screen", "screen", "clear_color"), &RenderingDevice::draw_list_begin_for_screen, DEFVAL(DisplayServer::MAIN_WINDOW_ID), DEFVAL(Color()));
- ClassDB::bind_method(D_METHOD("draw_list_begin", "framebuffer", "initial_color_action", "final_color_action", "initial_depth_action", "final_depth_action", "clear_color_values", "clear_depth", "clear_stencil", "region", "storage_textures"), &RenderingDevice::_draw_list_begin, DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(TypedArray<RID>()));
+ ClassDB::bind_method(D_METHOD("draw_list_begin", "framebuffer", "initial_color_action", "final_color_action", "initial_depth_action", "final_depth_action", "clear_color_values", "clear_depth", "clear_stencil", "region"), &RenderingDevice::draw_list_begin, DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()));
+#ifndef DISABLE_DEPRECATED
ClassDB::bind_method(D_METHOD("draw_list_begin_split", "framebuffer", "splits", "initial_color_action", "final_color_action", "initial_depth_action", "final_depth_action", "clear_color_values", "clear_depth", "clear_stencil", "region", "storage_textures"), &RenderingDevice::_draw_list_begin_split, DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(TypedArray<RID>()));
+#endif
ClassDB::bind_method(D_METHOD("draw_list_set_blend_constants", "draw_list", "color"), &RenderingDevice::draw_list_set_blend_constants);
ClassDB::bind_method(D_METHOD("draw_list_bind_render_pipeline", "draw_list", "render_pipeline"), &RenderingDevice::draw_list_bind_render_pipeline);
@@ -6190,17 +5313,19 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("draw_list_disable_scissor", "draw_list"), &RenderingDevice::draw_list_disable_scissor);
ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass"), &RenderingDevice::draw_list_switch_to_next_pass);
+#ifndef DISABLE_DEPRECATED
ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass_split", "splits"), &RenderingDevice::_draw_list_switch_to_next_pass_split);
+#endif
- ClassDB::bind_method(D_METHOD("draw_list_end", "post_barrier"), &RenderingDevice::draw_list_end, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
+ ClassDB::bind_method(D_METHOD("draw_list_end"), &RenderingDevice::draw_list_end);
- ClassDB::bind_method(D_METHOD("compute_list_begin", "allow_draw_overlap"), &RenderingDevice::compute_list_begin, DEFVAL(false));
+ ClassDB::bind_method(D_METHOD("compute_list_begin"), &RenderingDevice::compute_list_begin);
ClassDB::bind_method(D_METHOD("compute_list_bind_compute_pipeline", "compute_list", "compute_pipeline"), &RenderingDevice::compute_list_bind_compute_pipeline);
ClassDB::bind_method(D_METHOD("compute_list_set_push_constant", "compute_list", "buffer", "size_bytes"), &RenderingDevice::_compute_list_set_push_constant);
ClassDB::bind_method(D_METHOD("compute_list_bind_uniform_set", "compute_list", "uniform_set", "set_index"), &RenderingDevice::compute_list_bind_uniform_set);
ClassDB::bind_method(D_METHOD("compute_list_dispatch", "compute_list", "x_groups", "y_groups", "z_groups"), &RenderingDevice::compute_list_dispatch);
ClassDB::bind_method(D_METHOD("compute_list_add_barrier", "compute_list"), &RenderingDevice::compute_list_add_barrier);
- ClassDB::bind_method(D_METHOD("compute_list_end", "post_barrier"), &RenderingDevice::compute_list_end, DEFVAL(BARRIER_MASK_ALL_BARRIERS));
+ ClassDB::bind_method(D_METHOD("compute_list_end"), &RenderingDevice::compute_list_end);
ClassDB::bind_method(D_METHOD("free_rid", "rid"), &RenderingDevice::free);
@@ -6216,15 +5341,19 @@ void RenderingDevice::_bind_methods() {
ClassDB::bind_method(D_METHOD("submit"), &RenderingDevice::submit);
ClassDB::bind_method(D_METHOD("sync"), &RenderingDevice::sync);
+#ifndef DISABLE_DEPRECATED
ClassDB::bind_method(D_METHOD("barrier", "from", "to"), &RenderingDevice::barrier, DEFVAL(BARRIER_MASK_ALL_BARRIERS), DEFVAL(BARRIER_MASK_ALL_BARRIERS));
ClassDB::bind_method(D_METHOD("full_barrier"), &RenderingDevice::full_barrier);
+#endif
ClassDB::bind_method(D_METHOD("create_local_device"), &RenderingDevice::create_local_device);
ClassDB::bind_method(D_METHOD("set_resource_name", "id", "name"), &RenderingDevice::set_resource_name);
ClassDB::bind_method(D_METHOD("draw_command_begin_label", "name", "color"), &RenderingDevice::draw_command_begin_label);
+#ifndef DISABLE_DEPRECATED
ClassDB::bind_method(D_METHOD("draw_command_insert_label", "name", "color"), &RenderingDevice::draw_command_insert_label);
+#endif
ClassDB::bind_method(D_METHOD("draw_command_end_label"), &RenderingDevice::draw_command_end_label);
ClassDB::bind_method(D_METHOD("get_device_vendor_name"), &RenderingDevice::get_device_vendor_name);
@@ -6491,6 +5620,7 @@ void RenderingDevice::_bind_methods() {
BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM);
BIND_ENUM_CONSTANT(DATA_FORMAT_MAX);
+#ifndef DISABLE_DEPRECATED
BIND_BITFIELD_FLAG(BARRIER_MASK_VERTEX);
BIND_BITFIELD_FLAG(BARRIER_MASK_FRAGMENT);
BIND_BITFIELD_FLAG(BARRIER_MASK_COMPUTE);
@@ -6498,6 +5628,7 @@ void RenderingDevice::_bind_methods() {
BIND_BITFIELD_FLAG(BARRIER_MASK_RASTER);
BIND_BITFIELD_FLAG(BARRIER_MASK_ALL_BARRIERS);
BIND_BITFIELD_FLAG(BARRIER_MASK_NO_BARRIER);
+#endif
BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D);
BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D);
@@ -6672,18 +5803,25 @@ void RenderingDevice::_bind_methods() {
BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_WRITE_MASK);
BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_REFERENCE);
- BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR); //start rendering and clear the framebuffer (supply params)
- BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION); //start rendering and clear the framebuffer (supply params)
- BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION_CONTINUE); //continue rendering and clear the framebuffer (supply params)
- BIND_ENUM_CONSTANT(INITIAL_ACTION_KEEP); //start rendering); but keep attached color texture contents (depth will be cleared)
- BIND_ENUM_CONSTANT(INITIAL_ACTION_DROP); //start rendering); ignore what is there); just write above it
- BIND_ENUM_CONSTANT(INITIAL_ACTION_CONTINUE); //continue rendering (framebuffer must have been left in "continue" state as final action previously)
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_LOAD);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_DISCARD);
BIND_ENUM_CONSTANT(INITIAL_ACTION_MAX);
+#ifndef DISABLE_DEPRECATED
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION_CONTINUE);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_KEEP);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_DROP);
+ BIND_ENUM_CONSTANT(INITIAL_ACTION_CONTINUE);
+#endif
- BIND_ENUM_CONSTANT(FINAL_ACTION_READ); //will no longer render to it); allows attached textures to be read again); but depth buffer contents will be dropped (Can't be read from)
- BIND_ENUM_CONSTANT(FINAL_ACTION_DISCARD); // discard contents after rendering
- BIND_ENUM_CONSTANT(FINAL_ACTION_CONTINUE); //will continue rendering later); attached textures can't be read until re-bound with "finish"
+ BIND_ENUM_CONSTANT(FINAL_ACTION_STORE);
+ BIND_ENUM_CONSTANT(FINAL_ACTION_DISCARD);
BIND_ENUM_CONSTANT(FINAL_ACTION_MAX);
+#ifndef DISABLE_DEPRECATED
+ BIND_ENUM_CONSTANT(FINAL_ACTION_READ);
+ BIND_ENUM_CONSTANT(FINAL_ACTION_CONTINUE);
+#endif
BIND_ENUM_CONSTANT(SHADER_STAGE_VERTEX);
BIND_ENUM_CONSTANT(SHADER_STAGE_FRAGMENT);
@@ -6950,8 +6088,8 @@ RID RenderingDevice::_uniform_set_create(const TypedArray<RDUniform> &p_uniforms
return uniform_set_create(uniforms, p_shader, p_shader_set);
}
-Error RenderingDevice::_buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) {
- return buffer_update(p_buffer, p_offset, p_size, p_data.ptr(), p_post_barrier);
+Error RenderingDevice::_buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data) {
+ return buffer_update(p_buffer, p_offset, p_size, p_data.ptr());
}
static Vector<RenderingDevice::PipelineSpecializationConstant> _get_spec_constants(const TypedArray<RDPipelineSpecializationConstant> &p_constants) {
@@ -7022,47 +6160,15 @@ RID RenderingDevice::_compute_pipeline_create(RID p_shader, const TypedArray<RDP
return compute_pipeline_create(p_shader, _get_spec_constants(p_specialization_constants));
}
-RenderingDevice::DrawListID RenderingDevice::_draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures) {
- Vector<RID> stextures;
- for (int i = 0; i < p_storage_textures.size(); i++) {
- stextures.push_back(p_storage_textures[i]);
- }
- return draw_list_begin(p_framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, p_region, stextures);
-}
-
+#ifndef DISABLE_DEPRECATED
Vector<int64_t> RenderingDevice::_draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures) {
- Vector<DrawListID> splits;
- splits.resize(p_splits);
- Vector<RID> stextures;
- for (int i = 0; i < p_storage_textures.size(); i++) {
- stextures.push_back(p_storage_textures[i]);
- }
- draw_list_begin_split(p_framebuffer, p_splits, splits.ptrw(), p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, p_region, stextures);
-
- Vector<int64_t> split_ids;
- split_ids.resize(splits.size());
- for (int i = 0; i < splits.size(); i++) {
- split_ids.write[i] = splits[i];
- }
-
- return split_ids;
+ ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
}
Vector<int64_t> RenderingDevice::_draw_list_switch_to_next_pass_split(uint32_t p_splits) {
- Vector<DrawListID> splits;
- splits.resize(p_splits);
-
- Error err = draw_list_switch_to_next_pass_split(p_splits, splits.ptrw());
- ERR_FAIL_COND_V(err != OK, Vector<int64_t>());
-
- Vector<int64_t> split_ids;
- split_ids.resize(splits.size());
- for (int i = 0; i < splits.size(); i++) {
- split_ids.write[i] = splits[i];
- }
-
- return split_ids;
+ ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
}
+#endif
void RenderingDevice::_draw_list_set_push_constant(DrawListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size) {
ERR_FAIL_COND((uint32_t)p_data.size() > p_data_size);