summaryrefslogtreecommitdiff
path: root/src/compiler/nir/nir_lower_helper_writes.c
blob: 49ffedc4234cc3796fca32197468f2871800408c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
/*
 * Copyright (C) 2020-2021 Collabora, Ltd.
 * Copyright © 2020 Valve Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"

static bool
lower(nir_builder *b, nir_instr *instr, void *data)
{
   if (instr->type != nir_instr_type_intrinsic)
      return false;

   nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
   bool *lower_plain_stores = data;

   switch (intr->intrinsic) {
   case nir_intrinsic_global_atomic:
   case nir_intrinsic_global_atomic_swap:
   case nir_intrinsic_image_atomic:
   case nir_intrinsic_image_atomic_swap:
      break;
   case nir_intrinsic_store_global:
   case nir_intrinsic_image_store:
      if (!(*lower_plain_stores))
         return false;
      else
         break;
   default:
      return false;
   }

   b->cursor = nir_before_instr(instr);
   bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
   nir_ssa_def *undef = NULL;

   nir_ssa_def *helper = nir_load_helper_invocation(b, 1);
   nir_push_if(b, nir_inot(b, helper));
   nir_instr_remove(instr);
   nir_builder_instr_insert(b, instr);

   /* Per the spec, it does not matter what we return for helper threads.
    * Represent this by an ssa_undef in the hopes the backend will be clever
    * enough to optimize out the phi.
    *
    *    Fragment shader helper invocations execute the same shader code as
    *    non-helper invocations, but will not have side effects that modify the
    *    framebuffer or other shader-accessible memory. In particular:
    *
    *       ...
    *
    *       Atomic operations to image, buffer, or atomic counter variables
    *       performed by helper invocations have no effect on the underlying
    *       image or buffer memory. The values returned by such atomic
    *       operations are undefined.
    */
   if (has_dest) {
      nir_push_else(b, NULL);
      undef = nir_ssa_undef(b, nir_dest_num_components(intr->dest),
                            nir_dest_bit_size(intr->dest));
   }

   nir_pop_if(b, NULL);

   if (has_dest) {
      assert(intr->dest.is_ssa);
      nir_ssa_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);

      /* We can't use nir_ssa_def_rewrite_uses_after on phis, so use the global
       * version and fixup the phi manually
       */
      nir_ssa_def_rewrite_uses(&intr->dest.ssa, phi);

      nir_instr *phi_instr = phi->parent_instr;
      nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
      nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
                                                        instr->block);
      nir_instr_rewrite_src_ssa(phi->parent_instr, &phi_src->src,
                                &intr->dest.ssa);
   }

   return true;
}

bool
nir_lower_helper_writes(nir_shader *shader, bool lower_plain_stores)
{
   assert(shader->info.stage == MESA_SHADER_FRAGMENT);
   return nir_shader_instructions_pass(shader, lower, nir_metadata_none,
                                       &lower_plain_stores);
}