summaryrefslogtreecommitdiff
path: root/src/amd/compiler/aco_scheduler.cpp
diff options
context:
space:
mode:
authorRhys Perry <pendingchaos02@gmail.com>2022-07-21 15:45:11 +0100
committerMarge Bot <emma+marge@anholt.net>2022-09-30 20:57:02 +0000
commit6407d783ea862082bf0c4f764c42430f354322d0 (patch)
treee1e1f785bb8cf09c5151b20c139e42aef1a7e0bd /src/amd/compiler/aco_scheduler.cpp
parent7cecc816837117c0999d5e37efb84fd90c1041b8 (diff)
downloadmesa-6407d783ea862082bf0c4f764c42430f354322d0.tar.gz
aco: update sendmsg enum from LLVM
Add GFX11 enums and some new ones that apparently existed before. Signed-off-by: Rhys Perry <pendingchaos02@gmail.com> Reviewed-by: Daniel Schürmann <daniel@schuermann.dev> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17710>
Diffstat (limited to 'src/amd/compiler/aco_scheduler.cpp')
-rw-r--r--src/amd/compiler/aco_scheduler.cpp43
1 files changed, 19 insertions, 24 deletions
diff --git a/src/amd/compiler/aco_scheduler.cpp b/src/amd/compiler/aco_scheduler.cpp
index 6cebcf95622..88498ef4565 100644
--- a/src/amd/compiler/aco_scheduler.cpp
+++ b/src/amd/compiler/aco_scheduler.cpp
@@ -122,6 +122,7 @@ struct MoveState {
};
struct sched_ctx {
+ amd_gfx_level gfx_level;
int16_t num_waves;
int16_t last_SMEM_stall;
int last_SMEM_dep_idx;
@@ -420,20 +421,10 @@ MoveState::upwards_skip(UpwardsCursor& cursor)
}
bool
-is_gs_or_done_sendmsg(const Instruction* instr)
+is_done_sendmsg(amd_gfx_level gfx_level, const Instruction* instr)
{
- if (instr->opcode == aco_opcode::s_sendmsg) {
- uint16_t imm = instr->sopp().imm;
- return (imm & sendmsg_id_mask) == _sendmsg_gs || (imm & sendmsg_id_mask) == _sendmsg_gs_done;
- }
- return false;
-}
-
-bool
-is_done_sendmsg(const Instruction* instr)
-{
- if (instr->opcode == aco_opcode::s_sendmsg)
- return (instr->sopp().imm & sendmsg_id_mask) == _sendmsg_gs_done;
+ if (gfx_level <= GFX10_3 && instr->opcode == aco_opcode::s_sendmsg)
+ return (instr->sopp().imm & sendmsg_id_mask_gfx6) == _sendmsg_gs_done;
return false;
}
@@ -464,6 +455,7 @@ struct memory_event_set {
};
struct hazard_query {
+ amd_gfx_level gfx_level;
bool contains_spill;
bool contains_sendmsg;
bool uses_exec;
@@ -473,8 +465,9 @@ struct hazard_query {
};
void
-init_hazard_query(hazard_query* query)
+init_hazard_query(const sched_ctx& ctx, hazard_query* query)
{
+ query->gfx_level = ctx.gfx_level;
query->contains_spill = false;
query->contains_sendmsg = false;
query->uses_exec = false;
@@ -484,9 +477,10 @@ init_hazard_query(hazard_query* query)
}
void
-add_memory_event(memory_event_set* set, Instruction* instr, memory_sync_info* sync)
+add_memory_event(amd_gfx_level gfx_level, memory_event_set* set, Instruction* instr,
+ memory_sync_info* sync)
{
- set->has_control_barrier |= is_done_sendmsg(instr);
+ set->has_control_barrier |= is_done_sendmsg(gfx_level, instr);
if (instr->opcode == aco_opcode::p_barrier) {
Pseudo_barrier_instruction& bar = instr->barrier();
if (bar.sync.semantics & semantic_acquire)
@@ -524,7 +518,7 @@ add_to_hazard_query(hazard_query* query, Instruction* instr)
memory_sync_info sync = get_sync_info_with_hack(instr);
- add_memory_event(&query->mem_events, instr, &sync);
+ add_memory_event(query->gfx_level, &query->mem_events, instr, &sync);
if (!(sync.semantics & semantic_can_reorder)) {
unsigned storage = sync.storage;
@@ -580,7 +574,7 @@ perform_hazard_query(hazard_query* query, Instruction* instr, bool upwards)
memory_event_set instr_set;
memset(&instr_set, 0, sizeof(instr_set));
memory_sync_info sync = get_sync_info_with_hack(instr);
- add_memory_event(&instr_set, instr, &sync);
+ add_memory_event(query->gfx_level, &instr_set, instr, &sync);
memory_event_set* first = &instr_set;
memory_event_set* second = &query->mem_events;
@@ -655,7 +649,7 @@ schedule_SMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& registe
/* first, check if we have instructions before current to move down */
hazard_query hq;
- init_hazard_query(&hq);
+ init_hazard_query(ctx, &hq);
add_to_hazard_query(&hq, current);
DownwardsCursor cursor = ctx.mv.downwards_init(idx, false, false);
@@ -751,7 +745,7 @@ schedule_SMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& registe
if (is_dependency) {
if (!found_dependency) {
ctx.mv.upwards_update_insert_idx(up_cursor);
- init_hazard_query(&hq);
+ init_hazard_query(ctx, &hq);
found_dependency = true;
}
}
@@ -797,8 +791,8 @@ schedule_VMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& registe
/* first, check if we have instructions before current to move down */
hazard_query indep_hq;
hazard_query clause_hq;
- init_hazard_query(&indep_hq);
- init_hazard_query(&clause_hq);
+ init_hazard_query(ctx, &indep_hq);
+ init_hazard_query(ctx, &clause_hq);
add_to_hazard_query(&indep_hq, current);
DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, true);
@@ -923,7 +917,7 @@ schedule_VMEM(sched_ctx& ctx, Block* block, std::vector<RegisterDemand>& registe
if (is_dependency) {
if (!found_dependency) {
ctx.mv.upwards_update_insert_idx(up_cursor);
- init_hazard_query(&indep_hq);
+ init_hazard_query(ctx, &indep_hq);
found_dependency = true;
}
} else if (is_vmem) {
@@ -967,7 +961,7 @@ schedule_position_export(sched_ctx& ctx, Block* block, std::vector<RegisterDeman
DownwardsCursor cursor = ctx.mv.downwards_init(idx, true, false);
hazard_query hq;
- init_hazard_query(&hq);
+ init_hazard_query(ctx, &hq);
add_to_hazard_query(&hq, current);
for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int)idx - window_size;
@@ -1054,6 +1048,7 @@ schedule_program(Program* program, live& live_vars)
demand.vgpr += program->config->num_shared_vgprs / 2;
sched_ctx ctx;
+ ctx.gfx_level = program->gfx_level;
ctx.mv.depends_on.resize(program->peekAllocationId());
ctx.mv.RAR_dependencies.resize(program->peekAllocationId());
ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());