summaryrefslogtreecommitdiff
path: root/src/asahi
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <alyssa@rosenzweig.io>2023-04-09 13:59:13 -0400
committerAlyssa Rosenzweig <alyssa@rosenzweig.io>2023-05-07 09:00:59 -0400
commitfc88876329ce87d5ff89cab91226970a11d6c987 (patch)
treec838bacea5683fd15f790e1485794e89f74999f7 /src/asahi
parent21d7049925f669f62b1cea4f00dabe98e2bd2ea0 (diff)
downloadmesa-fc88876329ce87d5ff89cab91226970a11d6c987.tar.gz
agx: Handle linear 2D array textureSize()
We handle linear 2D arrays internally for blit shaders, so we need textureSize to work for these. That requires some special casing, because there's a line stride where the layer count would otherwise be. But it's not too bad. Fixes dEQP-GLES3.functional.shaders.texture_functions.texturesize.sampler2darray_* when forcing linear textures. Since we clamp array access to the maximum layer, we need textureSize() to work for even the most basic array texturing. So this should fix blits from linear 2D arrays as well, which finally unlocks support for compressed arrays/cubes/3D textures. Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22891>
Diffstat (limited to 'src/asahi')
-rw-r--r--src/asahi/compiler/agx_nir_lower_texture.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/src/asahi/compiler/agx_nir_lower_texture.c b/src/asahi/compiler/agx_nir_lower_texture.c
index fbd1dfb437b..48c994a981e 100644
--- a/src/asahi/compiler/agx_nir_lower_texture.c
+++ b/src/asahi/compiler/agx_nir_lower_texture.c
@@ -13,6 +13,7 @@
#define AGX_TEXTURE_DESC_STRIDE 24
#define AGX_FORMAT_RGB32_EMULATED 0x36
+#define AGX_LAYOUT_LINEAR 0x0
static nir_ssa_def *
texture_descriptor_ptr(nir_builder *b, nir_tex_instr *tex)
@@ -100,6 +101,30 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
b, lod, nir_u2u32(b, nir_ssa_for_src(b, tex->src[lod_idx].src, 1)));
}
+ if (tex->sampler_dim == GLSL_SAMPLER_DIM_2D && tex->is_array) {
+ /* Linear 2D arrays are special and have their depth in the next word,
+ * since the depth read above is actually the stride for linear. We handle
+ * this case specially.
+ *
+ * TODO: Optimize this, since linear 2D arrays aren't needed for APIs and
+ * this just gets used internally for blits.
+ */
+ nir_ssa_def *layout =
+ nir_iand_imm(b, nir_ushr_imm(b, w0, 4), BITFIELD_MASK(2));
+
+ /* Get the 2 bytes after the first 128-bit descriptor */
+ nir_ssa_def *extension =
+ nir_load_global_constant(b, nir_iadd_imm(b, ptr, 16), 8, 1, 16);
+
+ nir_ssa_def *depth_linear_m1 =
+ nir_iand_imm(b, extension, BITFIELD_MASK(11));
+
+ depth_linear_m1 = nir_u2uN(b, depth_linear_m1, depth_m1->bit_size);
+
+ depth_m1 = nir_bcsel(b, nir_ieq_imm(b, layout, AGX_LAYOUT_LINEAR),
+ depth_linear_m1, depth_m1);
+ }
+
/* Add 1 to width-1, height-1 to get base dimensions */
nir_ssa_def *width = nir_iadd_imm(b, width_m1, 1);
nir_ssa_def *height = nir_iadd_imm(b, height_m1, 1);