summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason@jlekstrand.net>2019-11-08 09:33:07 -0600
committerDylan Baker <dylan@pnwbakers.com>2019-11-19 16:54:04 -0800
commitb7ab6e9470d888fa541cbbae173cae4e5ade258f (patch)
tree0cdd0d3dd939404cd2335a4d2214531eb308c460
parentaddf63dbd796fc08e06ac1323071a8f8f48ac7b9 (diff)
downloadmesa-b7ab6e9470d888fa541cbbae173cae4e5ade258f.tar.gz
anv: Stop bounds-checking pushed UBOs
The bounds checking is actually less safe than just pushing the data. If the bounds checking actually ever kicks in and it's not on the last UBO push range, then the shrinking will cause all subsequent ranges to be pushed to the wrong place in the GRF. One of the behaviors we definitely don't want is for OOB UBO access to result in completely unrelated UBOs returning garbage values. It's safer to just push the UBOs as-requested. If we're really concerned about robustness, we can emit shader code to do bounds checking which should be stupid cheap (a CMP followed by SEL). Cc: mesa-stable@lists.freedesktop.org Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
-rw-r--r--src/intel/vulkan/genX_cmd_buffer.c46
1 files changed, 10 insertions, 36 deletions
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index a4223bfd1ef..cfffe30aa9b 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -2602,20 +2602,12 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
const struct anv_pipeline_binding *binding =
&bind_map->surface_to_descriptor[surface];
- struct anv_address read_addr;
- uint32_t read_len;
+ struct anv_address addr;
if (binding->set == ANV_DESCRIPTOR_SET_SHADER_CONSTANTS) {
- struct anv_address constant_data = {
+ addr = (struct anv_address) {
.bo = pipeline->device->dynamic_state_pool.block_pool.bo,
.offset = pipeline->shaders[stage]->constant_data.offset,
};
- unsigned constant_data_size =
- pipeline->shaders[stage]->constant_data_size;
-
- read_len = MIN2(range->length,
- DIV_ROUND_UP(constant_data_size, 32) - range->start);
- read_addr = anv_address_add(constant_data,
- range->start * 32);
} else if (binding->set == ANV_DESCRIPTOR_SET_DESCRIPTORS) {
/* This is a descriptor set buffer so the set index is
* actually given by binding->binding. (Yes, that's
@@ -2623,45 +2615,27 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
*/
struct anv_descriptor_set *set =
gfx_state->base.descriptors[binding->binding];
- struct anv_address desc_buffer_addr =
- anv_descriptor_set_address(cmd_buffer, set);
- const unsigned desc_buffer_size = set->desc_mem.alloc_size;
-
- read_len = MIN2(range->length,
- DIV_ROUND_UP(desc_buffer_size, 32) - range->start);
- read_addr = anv_address_add(desc_buffer_addr,
- range->start * 32);
+ addr = anv_descriptor_set_address(cmd_buffer, set);
} else {
const struct anv_descriptor *desc =
anv_descriptor_for_binding(&gfx_state->base, binding);
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
- read_len = MIN2(range->length,
- DIV_ROUND_UP(desc->buffer_view->range, 32) - range->start);
- read_addr = anv_address_add(desc->buffer_view->address,
- range->start * 32);
+ addr = desc->buffer_view->address;
} else {
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
uint32_t dynamic_offset =
dynamic_offset_for_binding(&gfx_state->base, binding);
- uint32_t buf_offset =
- MIN2(desc->offset + dynamic_offset, desc->buffer->size);
- uint32_t buf_range =
- MIN2(desc->range, desc->buffer->size - buf_offset);
-
- read_len = MIN2(range->length,
- DIV_ROUND_UP(buf_range, 32) - range->start);
- read_addr = anv_address_add(desc->buffer->address,
- buf_offset + range->start * 32);
+ addr = anv_address_add(desc->buffer->address,
+ desc->offset + dynamic_offset);
}
}
- if (read_len > 0) {
- c.ConstantBody.Buffer[n] = read_addr;
- c.ConstantBody.ReadLength[n] = read_len;
- n--;
- }
+ c.ConstantBody.Buffer[n] =
+ anv_address_add(addr, range->start * 32);
+ c.ConstantBody.ReadLength[n] = range->length;
+ n--;
}
struct anv_state state =