"Fossies" - the Fresh Open Source Software Archive

Member "mesa-20.1.8/src/compiler/nir/nir_from_ssa.c" (16 Sep 2020, 34192 Bytes) of package /linux/misc/mesa-20.1.8.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "nir_from_ssa.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 20.1.5_vs_20.2.0-rc1.

    1 /*
    2  * Copyright © 2014 Intel Corporation
    3  *
    4  * Permission is hereby granted, free of charge, to any person obtaining a
    5  * copy of this software and associated documentation files (the "Software"),
    6  * to deal in the Software without restriction, including without limitation
    7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
    8  * and/or sell copies of the Software, and to permit persons to whom the
    9  * Software is furnished to do so, subject to the following conditions:
   10  *
   11  * The above copyright notice and this permission notice (including the next
   12  * paragraph) shall be included in all copies or substantial portions of the
   13  * Software.
   14  *
   15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   21  * IN THE SOFTWARE.
   22  *
   23  * Authors:
   24  *    Jason Ekstrand (jason@jlekstrand.net)
   25  *
   26  */
   27 
   28 #include "nir.h"
   29 #include "nir_builder.h"
   30 #include "nir_vla.h"
   31 
   32 /*
   33  * This file implements an out-of-SSA pass as described in "Revisiting
   34  * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
   35  * Boissinot et al.
   36  */
   37 
   38 struct from_ssa_state {
   39    nir_builder builder;
   40    void *dead_ctx;
   41    bool phi_webs_only;
   42    struct hash_table *merge_node_table;
   43    nir_instr *instr;
   44    bool progress;
   45 };
   46 
   47 /* Returns true if a dominates b */
   48 static bool
   49 ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
   50 {
   51    if (a->live_index == 0) {
   52       /* SSA undefs always dominate */
   53       return true;
   54    } else if (b->live_index < a->live_index) {
   55       return false;
   56    } else if (a->parent_instr->block == b->parent_instr->block) {
   57       return a->live_index <= b->live_index;
   58    } else {
   59       return nir_block_dominates(a->parent_instr->block,
   60                                  b->parent_instr->block);
   61    }
   62 }
   63 
   64 
   65 /* The following data structure, which I have named merge_set is a way of
   66  * representing a set registers of non-interfering registers.  This is
   67  * based on the concept of a "dominance forest" presented in "Fast Copy
   68  * Coalescing and Live-Range Identification" by Budimlic et al. but the
   69  * implementation concept is taken from  "Revisiting Out-of-SSA Translation
   70  * for Correctness, Code Quality, and Efficiency" by Boissinot et al.
   71  *
   72  * Each SSA definition is associated with a merge_node and the association
   73  * is represented by a combination of a hash table and the "def" parameter
   74  * in the merge_node structure.  The merge_set stores a linked list of
   75  * merge_nodes in dominance order of the ssa definitions.  (Since the
   76  * liveness analysis pass indexes the SSA values in dominance order for us,
   77  * this is an easy thing to keep up.)  It is assumed that no pair of the
   78  * nodes in a given set interfere.  Merging two sets or checking for
   79  * interference can be done in a single linear-time merge-sort walk of the
   80  * two lists of nodes.
   81  */
   82 struct merge_set;
   83 
   84 typedef struct {
   85    struct exec_node node;
   86    struct merge_set *set;
   87    nir_ssa_def *def;
   88 } merge_node;
   89 
   90 typedef struct merge_set {
   91    struct exec_list nodes;
   92    unsigned size;
   93    nir_register *reg;
   94 } merge_set;
   95 
   96 #if 0
   97 static void
   98 merge_set_dump(merge_set *set, FILE *fp)
   99 {
  100    nir_ssa_def *dom[set->size];
  101    int dom_idx = -1;
  102 
  103    foreach_list_typed(merge_node, node, node, &set->nodes) {
  104       while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
  105          dom_idx--;
  106 
  107       for (int i = 0; i <= dom_idx; i++)
  108          fprintf(fp, "  ");
  109 
  110       if (node->def->name)
  111          fprintf(fp, "ssa_%d /* %s */\n", node->def->index, node->def->name);
  112       else
  113          fprintf(fp, "ssa_%d\n", node->def->index);
  114 
  115       dom[++dom_idx] = node->def;
  116    }
  117 }
  118 #endif
  119 
  120 static merge_node *
  121 get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
  122 {
  123    struct hash_entry *entry =
  124       _mesa_hash_table_search(state->merge_node_table, def);
  125    if (entry)
  126       return entry->data;
  127 
  128    merge_set *set = ralloc(state->dead_ctx, merge_set);
  129    exec_list_make_empty(&set->nodes);
  130    set->size = 1;
  131    set->reg = NULL;
  132 
  133    merge_node *node = ralloc(state->dead_ctx, merge_node);
  134    node->set = set;
  135    node->def = def;
  136    exec_list_push_head(&set->nodes, &node->node);
  137 
  138    _mesa_hash_table_insert(state->merge_node_table, def, node);
  139 
  140    return node;
  141 }
  142 
  143 static bool
  144 merge_nodes_interfere(merge_node *a, merge_node *b)
  145 {
  146    return nir_ssa_defs_interfere(a->def, b->def);
  147 }
  148 
  149 /* Merges b into a */
  150 static merge_set *
  151 merge_merge_sets(merge_set *a, merge_set *b)
  152 {
  153    struct exec_node *an = exec_list_get_head(&a->nodes);
  154    struct exec_node *bn = exec_list_get_head(&b->nodes);
  155    while (!exec_node_is_tail_sentinel(bn)) {
  156       merge_node *a_node = exec_node_data(merge_node, an, node);
  157       merge_node *b_node = exec_node_data(merge_node, bn, node);
  158 
  159       if (exec_node_is_tail_sentinel(an) ||
  160           a_node->def->live_index > b_node->def->live_index) {
  161          struct exec_node *next = bn->next;
  162          exec_node_remove(bn);
  163          exec_node_insert_node_before(an, bn);
  164          exec_node_data(merge_node, bn, node)->set = a;
  165          bn = next;
  166       } else {
  167          an = an->next;
  168       }
  169    }
  170 
  171    a->size += b->size;
  172    b->size = 0;
  173 
  174    return a;
  175 }
  176 
  177 /* Checks for any interference between two merge sets
  178  *
  179  * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
  180  * Translation for Correctness, Code Quality, and Efficiency" by
  181  * Boissinot et al.
  182  */
  183 static bool
  184 merge_sets_interfere(merge_set *a, merge_set *b)
  185 {
  186    NIR_VLA(merge_node *, dom, a->size + b->size);
  187    int dom_idx = -1;
  188 
  189    struct exec_node *an = exec_list_get_head(&a->nodes);
  190    struct exec_node *bn = exec_list_get_head(&b->nodes);
  191    while (!exec_node_is_tail_sentinel(an) ||
  192           !exec_node_is_tail_sentinel(bn)) {
  193 
  194       merge_node *current;
  195       if (exec_node_is_tail_sentinel(an)) {
  196          current = exec_node_data(merge_node, bn, node);
  197          bn = bn->next;
  198       } else if (exec_node_is_tail_sentinel(bn)) {
  199          current = exec_node_data(merge_node, an, node);
  200          an = an->next;
  201       } else {
  202          merge_node *a_node = exec_node_data(merge_node, an, node);
  203          merge_node *b_node = exec_node_data(merge_node, bn, node);
  204 
  205          if (a_node->def->live_index <= b_node->def->live_index) {
  206             current = a_node;
  207             an = an->next;
  208          } else {
  209             current = b_node;
  210             bn = bn->next;
  211          }
  212       }
  213 
  214       while (dom_idx >= 0 &&
  215              !ssa_def_dominates(dom[dom_idx]->def, current->def))
  216          dom_idx--;
  217 
  218       if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
  219          return true;
  220 
  221       dom[++dom_idx] = current;
  222    }
  223 
  224    return false;
  225 }
  226 
  227 static bool
  228 add_parallel_copy_to_end_of_block(nir_block *block, void *dead_ctx)
  229 {
  230 
  231    bool need_end_copy = false;
  232    if (block->successors[0]) {
  233       nir_instr *instr = nir_block_first_instr(block->successors[0]);
  234       if (instr && instr->type == nir_instr_type_phi)
  235          need_end_copy = true;
  236    }
  237 
  238    if (block->successors[1]) {
  239       nir_instr *instr = nir_block_first_instr(block->successors[1]);
  240       if (instr && instr->type == nir_instr_type_phi)
  241          need_end_copy = true;
  242    }
  243 
  244    if (need_end_copy) {
  245       /* If one of our successors has at least one phi node, we need to
  246        * create a parallel copy at the end of the block but before the jump
  247        * (if there is one).
  248        */
  249       nir_parallel_copy_instr *pcopy =
  250          nir_parallel_copy_instr_create(dead_ctx);
  251 
  252       nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
  253    }
  254 
  255    return true;
  256 }
  257 
  258 static nir_parallel_copy_instr *
  259 get_parallel_copy_at_end_of_block(nir_block *block)
  260 {
  261    nir_instr *last_instr = nir_block_last_instr(block);
  262    if (last_instr == NULL)
  263       return NULL;
  264 
  265    /* The last instruction may be a jump in which case the parallel copy is
  266     * right before it.
  267     */
  268    if (last_instr->type == nir_instr_type_jump)
  269       last_instr = nir_instr_prev(last_instr);
  270 
  271    if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
  272       return nir_instr_as_parallel_copy(last_instr);
  273    else
  274       return NULL;
  275 }
  276 
  277 /** Isolate phi nodes with parallel copies
  278  *
  279  * In order to solve the dependency problems with the sources and
  280  * destinations of phi nodes, we first isolate them by adding parallel
  281  * copies to the beginnings and ends of basic blocks.  For every block with
  282  * phi nodes, we add a parallel copy immediately following the last phi
  283  * node that copies the destinations of all of the phi nodes to new SSA
  284  * values.  We also add a parallel copy to the end of every block that has
  285  * a successor with phi nodes that, for each phi node in each successor,
  286  * copies the corresponding sorce of the phi node and adjust the phi to
  287  * used the destination of the parallel copy.
  288  *
  289  * In SSA form, each value has exactly one definition.  What this does is
  290  * ensure that each value used in a phi also has exactly one use.  The
  291  * destinations of phis are only used by the parallel copy immediately
  292  * following the phi nodes and.  Thanks to the parallel copy at the end of
  293  * the predecessor block, the sources of phi nodes are are the only use of
  294  * that value.  This allows us to immediately assign all the sources and
  295  * destinations of any given phi node to the same register without worrying
  296  * about interference at all.  We do coalescing to get rid of the parallel
  297  * copies where possible.
  298  *
  299  * Before this pass can be run, we have to iterate over the blocks with
  300  * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
  301  * the ends of blocks exist.  We can create the ones at the beginnings as
  302  * we go, but the ones at the ends of blocks need to be created ahead of
  303  * time because of potential back-edges in the CFG.
  304  */
  305 static bool
  306 isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
  307 {
  308    nir_instr *last_phi_instr = NULL;
  309    nir_foreach_instr(instr, block) {
  310       /* Phi nodes only ever come at the start of a block */
  311       if (instr->type != nir_instr_type_phi)
  312          break;
  313 
  314       last_phi_instr = instr;
  315    }
  316 
  317    /* If we don't have any phis, then there's nothing for us to do. */
  318    if (last_phi_instr == NULL)
  319       return true;
  320 
  321    /* If we have phi nodes, we need to create a parallel copy at the
  322     * start of this block but after the phi nodes.
  323     */
  324    nir_parallel_copy_instr *block_pcopy =
  325       nir_parallel_copy_instr_create(dead_ctx);
  326    nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
  327 
  328    nir_foreach_instr(instr, block) {
  329       /* Phi nodes only ever come at the start of a block */
  330       if (instr->type != nir_instr_type_phi)
  331          break;
  332 
  333       nir_phi_instr *phi = nir_instr_as_phi(instr);
  334       assert(phi->dest.is_ssa);
  335       nir_foreach_phi_src(src, phi) {
  336          nir_parallel_copy_instr *pcopy =
  337             get_parallel_copy_at_end_of_block(src->pred);
  338          assert(pcopy);
  339 
  340          nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
  341                                                   nir_parallel_copy_entry);
  342          nir_ssa_dest_init(&pcopy->instr, &entry->dest,
  343                            phi->dest.ssa.num_components,
  344                            phi->dest.ssa.bit_size, src->src.ssa->name);
  345          exec_list_push_tail(&pcopy->entries, &entry->node);
  346 
  347          assert(src->src.is_ssa);
  348          nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
  349 
  350          nir_instr_rewrite_src(&phi->instr, &src->src,
  351                                nir_src_for_ssa(&entry->dest.ssa));
  352       }
  353 
  354       nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
  355                                                nir_parallel_copy_entry);
  356       nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
  357                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
  358                         phi->dest.ssa.name);
  359       exec_list_push_tail(&block_pcopy->entries, &entry->node);
  360 
  361       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
  362                                nir_src_for_ssa(&entry->dest.ssa));
  363 
  364       nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
  365                             nir_src_for_ssa(&phi->dest.ssa));
  366    }
  367 
  368    return true;
  369 }
  370 
  371 static bool
  372 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
  373 {
  374    nir_foreach_instr(instr, block) {
  375       /* Phi nodes only ever come at the start of a block */
  376       if (instr->type != nir_instr_type_phi)
  377          break;
  378 
  379       nir_phi_instr *phi = nir_instr_as_phi(instr);
  380 
  381       assert(phi->dest.is_ssa);
  382       merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
  383 
  384       nir_foreach_phi_src(src, phi) {
  385          assert(src->src.is_ssa);
  386          merge_node *src_node = get_merge_node(src->src.ssa, state);
  387          if (src_node->set != dest_node->set)
  388             merge_merge_sets(dest_node->set, src_node->set);
  389       }
  390    }
  391 
  392    return true;
  393 }
  394 
  395 static void
  396 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
  397                                  struct from_ssa_state *state)
  398 {
  399    nir_foreach_parallel_copy_entry(entry, pcopy) {
  400       if (!entry->src.is_ssa)
  401          continue;
  402 
  403       /* Since load_const instructions are SSA only, we can't replace their
  404        * destinations with registers and, therefore, can't coalesce them.
  405        */
  406       if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
  407          continue;
  408 
  409       /* Don't try and coalesce these */
  410       if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
  411          continue;
  412 
  413       merge_node *src_node = get_merge_node(entry->src.ssa, state);
  414       merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
  415 
  416       if (src_node->set == dest_node->set)
  417          continue;
  418 
  419       if (!merge_sets_interfere(src_node->set, dest_node->set))
  420          merge_merge_sets(src_node->set, dest_node->set);
  421    }
  422 }
  423 
  424 static bool
  425 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
  426 {
  427    nir_parallel_copy_instr *start_pcopy = NULL;
  428    nir_foreach_instr(instr, block) {
  429       /* Phi nodes only ever come at the start of a block */
  430       if (instr->type != nir_instr_type_phi) {
  431          if (instr->type != nir_instr_type_parallel_copy)
  432             break; /* The parallel copy must be right after the phis */
  433 
  434          start_pcopy = nir_instr_as_parallel_copy(instr);
  435 
  436          aggressive_coalesce_parallel_copy(start_pcopy, state);
  437 
  438          break;
  439       }
  440    }
  441 
  442    nir_parallel_copy_instr *end_pcopy =
  443       get_parallel_copy_at_end_of_block(block);
  444 
  445    if (end_pcopy && end_pcopy != start_pcopy)
  446       aggressive_coalesce_parallel_copy(end_pcopy, state);
  447 
  448    return true;
  449 }
  450 
  451 static nir_register *
  452 create_reg_for_ssa_def(nir_ssa_def *def, nir_function_impl *impl)
  453 {
  454    nir_register *reg = nir_local_reg_create(impl);
  455 
  456    reg->name = def->name;
  457    reg->num_components = def->num_components;
  458    reg->bit_size = def->bit_size;
  459    reg->num_array_elems = 0;
  460 
  461    return reg;
  462 }
  463 
  464 static bool
  465 rewrite_ssa_def(nir_ssa_def *def, void *void_state)
  466 {
  467    struct from_ssa_state *state = void_state;
  468    nir_register *reg;
  469 
  470    struct hash_entry *entry =
  471       _mesa_hash_table_search(state->merge_node_table, def);
  472    if (entry) {
  473       /* In this case, we're part of a phi web.  Use the web's register. */
  474       merge_node *node = (merge_node *)entry->data;
  475 
  476       /* If it doesn't have a register yet, create one.  Note that all of
  477        * the things in the merge set should be the same so it doesn't
  478        * matter which node's definition we use.
  479        */
  480       if (node->set->reg == NULL)
  481          node->set->reg = create_reg_for_ssa_def(def, state->builder.impl);
  482 
  483       reg = node->set->reg;
  484    } else {
  485       if (state->phi_webs_only)
  486          return true;
  487 
  488       /* We leave load_const SSA values alone.  They act as immediates to
  489        * the backend.  If it got coalesced into a phi, that's ok.
  490        */
  491       if (def->parent_instr->type == nir_instr_type_load_const)
  492          return true;
  493 
  494       reg = create_reg_for_ssa_def(def, state->builder.impl);
  495    }
  496 
  497    nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
  498    assert(list_is_empty(&def->uses) && list_is_empty(&def->if_uses));
  499 
  500    if (def->parent_instr->type == nir_instr_type_ssa_undef) {
  501       /* If it's an ssa_undef instruction, remove it since we know we just got
  502        * rid of all its uses.
  503        */
  504       nir_instr *parent_instr = def->parent_instr;
  505       nir_instr_remove(parent_instr);
  506       ralloc_steal(state->dead_ctx, parent_instr);
  507       state->progress = true;
  508       return true;
  509    }
  510 
  511    assert(def->parent_instr->type != nir_instr_type_load_const);
  512 
  513    /* At this point we know a priori that this SSA def is part of a
  514     * nir_dest.  We can use exec_node_data to get the dest pointer.
  515     */
  516    nir_dest *dest = exec_node_data(nir_dest, def, ssa);
  517 
  518    nir_instr_rewrite_dest(state->instr, dest, nir_dest_for_reg(reg));
  519    state->progress = true;
  520    return true;
  521 }
  522 
  523 /* Resolves ssa definitions to registers.  While we're at it, we also
  524  * remove phi nodes.
  525  */
  526 static void
  527 resolve_registers_block(nir_block *block, struct from_ssa_state *state)
  528 {
  529    nir_foreach_instr_safe(instr, block) {
  530       state->instr = instr;
  531       nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
  532 
  533       if (instr->type == nir_instr_type_phi) {
  534          nir_instr_remove(instr);
  535          ralloc_steal(state->dead_ctx, instr);
  536          state->progress = true;
  537       }
  538    }
  539    state->instr = NULL;
  540 }
  541 
  542 static void
  543 emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
  544 {
  545    assert(!dest_src.is_ssa &&
  546           dest_src.reg.indirect == NULL &&
  547           dest_src.reg.base_offset == 0);
  548 
  549    if (src.is_ssa)
  550       assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
  551    else
  552       assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
  553 
  554    nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
  555    nir_src_copy(&mov->src[0].src, &src, mov);
  556    mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
  557    mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
  558 
  559    nir_builder_instr_insert(b, &mov->instr);
  560 }
  561 
  562 /* Resolves a single parallel copy operation into a sequence of movs
  563  *
  564  * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
  565  * Correctness, Code Quality, and Efficiency" by Boissinot et al.
  566  * However, I never got the algorithm to work as written, so this version
  567  * is slightly modified.
  568  *
  569  * The algorithm works by playing this little shell game with the values.
  570  * We start by recording where every source value is and which source value
  571  * each destination value should receive.  We then grab any copy whose
  572  * destination is "empty", i.e. not used as a source, and do the following:
  573  *  - Find where its source value currently lives
  574  *  - Emit the move instruction
  575  *  - Set the location of the source value to the destination
  576  *  - Mark the location containing the source value
  577  *  - Mark the destination as no longer needing to be copied
  578  *
  579  * When we run out of "empty" destinations, we have a cycle and so we
  580  * create a temporary register, copy to that register, and mark the value
  581  * we copied as living in that temporary.  Now, the cycle is broken, so we
  582  * can continue with the above steps.
  583  */
  584 static void
  585 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
  586                       struct from_ssa_state *state)
  587 {
  588    unsigned num_copies = 0;
  589    nir_foreach_parallel_copy_entry(entry, pcopy) {
  590       /* Sources may be SSA */
  591       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
  592          continue;
  593 
  594       num_copies++;
  595    }
  596 
  597    if (num_copies == 0) {
  598       /* Hooray, we don't need any copies! */
  599       nir_instr_remove(&pcopy->instr);
  600       return;
  601    }
  602 
  603    /* The register/source corresponding to the given index */
  604    NIR_VLA_ZERO(nir_src, values, num_copies * 2);
  605 
  606    /* The current location of a given piece of data.  We will use -1 for "null" */
  607    NIR_VLA_FILL(int, loc, num_copies * 2, -1);
  608 
  609    /* The piece of data that the given piece of data is to be copied from.  We will use -1 for "null" */
  610    NIR_VLA_FILL(int, pred, num_copies * 2, -1);
  611 
  612    /* The destinations we have yet to properly fill */
  613    NIR_VLA(int, to_do, num_copies * 2);
  614    int to_do_idx = -1;
  615 
  616    state->builder.cursor = nir_before_instr(&pcopy->instr);
  617 
  618    /* Now we set everything up:
  619     *  - All values get assigned a temporary index
  620     *  - Current locations are set from sources
  621     *  - Predicessors are recorded from sources and destinations
  622     */
  623    int num_vals = 0;
  624    nir_foreach_parallel_copy_entry(entry, pcopy) {
  625       /* Sources may be SSA */
  626       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
  627          continue;
  628 
  629       int src_idx = -1;
  630       for (int i = 0; i < num_vals; ++i) {
  631          if (nir_srcs_equal(values[i], entry->src))
  632             src_idx = i;
  633       }
  634       if (src_idx < 0) {
  635          src_idx = num_vals++;
  636          values[src_idx] = entry->src;
  637       }
  638 
  639       nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
  640 
  641       int dest_idx = -1;
  642       for (int i = 0; i < num_vals; ++i) {
  643          if (nir_srcs_equal(values[i], dest_src)) {
  644             /* Each destination of a parallel copy instruction should be
  645              * unique.  A destination may get used as a source, so we still
  646              * have to walk the list.  However, the predecessor should not,
  647              * at this point, be set yet, so we should have -1 here.
  648              */
  649             assert(pred[i] == -1);
  650             dest_idx = i;
  651          }
  652       }
  653       if (dest_idx < 0) {
  654          dest_idx = num_vals++;
  655          values[dest_idx] = dest_src;
  656       }
  657 
  658       loc[src_idx] = src_idx;
  659       pred[dest_idx] = src_idx;
  660 
  661       to_do[++to_do_idx] = dest_idx;
  662    }
  663 
  664    /* Currently empty destinations we can go ahead and fill */
  665    NIR_VLA(int, ready, num_copies * 2);
  666    int ready_idx = -1;
  667 
  668    /* Mark the ones that are ready for copying.  We know an index is a
  669     * destination if it has a predecessor and it's ready for copying if
  670     * it's not marked as containing data.
  671     */
  672    for (int i = 0; i < num_vals; i++) {
  673       if (pred[i] != -1 && loc[i] == -1)
  674          ready[++ready_idx] = i;
  675    }
  676 
  677    while (to_do_idx >= 0) {
  678       while (ready_idx >= 0) {
  679          int b = ready[ready_idx--];
  680          int a = pred[b];
  681          emit_copy(&state->builder, values[loc[a]], values[b]);
  682 
  683          /* b has been filled, mark it as not needing to be copied */
  684          pred[b] = -1;
  685 
  686          /* If a needs to be filled... */
  687          if (pred[a] != -1) {
  688             /* If any other copies want a they can find it at b */
  689             loc[a] = b;
  690 
  691             /* It's ready for copying now */
  692             ready[++ready_idx] = a;
  693          }
  694       }
  695       int b = to_do[to_do_idx--];
  696       if (pred[b] == -1)
  697          continue;
  698 
  699       /* If we got here, then we don't have any more trivial copies that we
  700        * can do.  We have to break a cycle, so we create a new temporary
  701        * register for that purpose.  Normally, if going out of SSA after
  702        * register allocation, you would want to avoid creating temporary
  703        * registers.  However, we are going out of SSA before register
  704        * allocation, so we would rather not create extra register
  705        * dependencies for the backend to deal with.  If it wants, the
  706        * backend can coalesce the (possibly multiple) temporaries.
  707        */
  708       assert(num_vals < num_copies * 2);
  709       nir_register *reg = nir_local_reg_create(state->builder.impl);
  710       reg->name = "copy_temp";
  711       reg->num_array_elems = 0;
  712       if (values[b].is_ssa) {
  713          reg->num_components = values[b].ssa->num_components;
  714          reg->bit_size = values[b].ssa->bit_size;
  715       } else {
  716          reg->num_components = values[b].reg.reg->num_components;
  717          reg->bit_size = values[b].reg.reg->bit_size;
  718       }
  719       values[num_vals].is_ssa = false;
  720       values[num_vals].reg.reg = reg;
  721 
  722       emit_copy(&state->builder, values[b], values[num_vals]);
  723       loc[b] = num_vals;
  724       ready[++ready_idx] = b;
  725       num_vals++;
  726    }
  727 
  728    nir_instr_remove(&pcopy->instr);
  729 }
  730 
  731 /* Resolves the parallel copies in a block.  Each block can have at most
  732  * two:  One at the beginning, right after all the phi noces, and one at
  733  * the end (or right before the final jump if it exists).
  734  */
  735 static bool
  736 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
  737 {
  738    /* At this point, we have removed all of the phi nodes.  If a parallel
  739     * copy existed right after the phi nodes in this block, it is now the
  740     * first instruction.
  741     */
  742    nir_instr *first_instr = nir_block_first_instr(block);
  743    if (first_instr == NULL)
  744       return true; /* Empty, nothing to do. */
  745 
  746    if (first_instr->type == nir_instr_type_parallel_copy) {
  747       nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
  748 
  749       resolve_parallel_copy(pcopy, state);
  750    }
  751 
  752    /* It's possible that the above code already cleaned up the end parallel
  753     * copy.  However, doing so removed it form the instructions list so we
  754     * won't find it here.  Therefore, it's safe to go ahead and just look
  755     * for one and clean it up if it exists.
  756     */
  757    nir_parallel_copy_instr *end_pcopy =
  758       get_parallel_copy_at_end_of_block(block);
  759    if (end_pcopy)
  760       resolve_parallel_copy(end_pcopy, state);
  761 
  762    return true;
  763 }
  764 
  765 static bool
  766 nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
  767 {
  768    struct from_ssa_state state;
  769 
  770    nir_builder_init(&state.builder, impl);
  771    state.dead_ctx = ralloc_context(NULL);
  772    state.phi_webs_only = phi_webs_only;
  773    state.merge_node_table = _mesa_pointer_hash_table_create(NULL);
  774    state.progress = false;
  775 
  776    nir_foreach_block(block, impl) {
  777       add_parallel_copy_to_end_of_block(block, state.dead_ctx);
  778    }
  779 
  780    nir_foreach_block(block, impl) {
  781       isolate_phi_nodes_block(block, state.dead_ctx);
  782    }
  783 
  784    /* Mark metadata as dirty before we ask for liveness analysis */
  785    nir_metadata_preserve(impl, nir_metadata_block_index |
  786                                nir_metadata_dominance);
  787 
  788    nir_metadata_require(impl, nir_metadata_live_ssa_defs |
  789                               nir_metadata_dominance);
  790 
  791    nir_foreach_block(block, impl) {
  792       coalesce_phi_nodes_block(block, &state);
  793    }
  794 
  795    nir_foreach_block(block, impl) {
  796       aggressive_coalesce_block(block, &state);
  797    }
  798 
  799    nir_foreach_block(block, impl) {
  800       resolve_registers_block(block, &state);
  801    }
  802 
  803    nir_foreach_block(block, impl) {
  804       resolve_parallel_copies_block(block, &state);
  805    }
  806 
  807    nir_metadata_preserve(impl, nir_metadata_block_index |
  808                                nir_metadata_dominance);
  809 
  810    /* Clean up dead instructions and the hash tables */
  811    _mesa_hash_table_destroy(state.merge_node_table, NULL);
  812    ralloc_free(state.dead_ctx);
  813    return state.progress;
  814 }
  815 
  816 bool
  817 nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
  818 {
  819    bool progress = false;
  820 
  821    nir_foreach_function(function, shader) {
  822       if (function->impl)
  823          progress |= nir_convert_from_ssa_impl(function->impl, phi_webs_only);
  824    }
  825 
  826    return progress;
  827 }
  828 
  829 
  830 static void
  831 place_phi_read(nir_shader *shader, nir_register *reg,
  832                nir_ssa_def *def, nir_block *block, unsigned depth)
  833 {
  834    if (block != def->parent_instr->block) {
  835       /* Try to go up the single-successor tree */
  836       bool all_single_successors = true;
  837       set_foreach(block->predecessors, entry) {
  838          nir_block *pred = (nir_block *)entry->key;
  839          if (pred->successors[0] && pred->successors[1]) {
  840             all_single_successors = false;
  841             break;
  842          }
  843       }
  844 
  845       if (all_single_successors && depth < 32) {
  846          /* All predecessors of this block have exactly one successor and it
  847           * is this block so they must eventually lead here without
  848           * intersecting each other.  Place the reads in the predecessors
  849           * instead of this block.
  850           *
  851           * We only let this function recurse 32 times because it can recurse
  852           * indefinitely in the presence of infinite loops.  Because we're
  853           * crawling a single-successor chain, it doesn't matter where we
  854           * place it so it's ok to stop at an arbitrary distance.
  855           *
  856           * TODO: One day, we could detect back edges and avoid the recursion
  857           * that way.
  858           */
  859          set_foreach(block->predecessors, entry) {
  860             place_phi_read(shader, reg, def, (nir_block *)entry->key,
  861                            depth + 1);
  862          }
  863          return;
  864       }
  865    }
  866 
  867    nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
  868    mov->src[0].src = nir_src_for_ssa(def);
  869    mov->dest.dest = nir_dest_for_reg(reg);
  870    mov->dest.write_mask = (1 << reg->num_components) - 1;
  871    nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
  872 }
  873 
  874 /** Lower all of the phi nodes in a block to imovs to and from a register
  875  *
  876  * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
  877  * single block to convert all of its phis to a register and some imovs.
  878  * The code that is generated, while not optimal for actual codegen in a
  879  * back-end, is easy to generate, correct, and will turn into the same set of
  880  * phis after you call regs_to_ssa and do some copy propagation.
  881  *
  882  * The one intelligent thing this pass does is that it places the moves from
  883  * the phi sources as high up the predecessor tree as possible instead of in
  884  * the exact predecessor.  This means that, in particular, it will crawl into
  885  * the deepest nesting of any if-ladders.  In order to ensure that doing so is
  886  * safe, it stops as soon as one of the predecessors has multiple successors.
  887  */
  888 bool
  889 nir_lower_phis_to_regs_block(nir_block *block)
  890 {
  891    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
  892    nir_shader *shader = impl->function->shader;
  893 
  894    bool progress = false;
  895    nir_foreach_instr_safe(instr, block) {
  896       if (instr->type != nir_instr_type_phi)
  897          break;
  898 
  899       nir_phi_instr *phi = nir_instr_as_phi(instr);
  900       assert(phi->dest.is_ssa);
  901 
  902       nir_register *reg = create_reg_for_ssa_def(&phi->dest.ssa, impl);
  903 
  904       nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
  905       mov->src[0].src = nir_src_for_reg(reg);
  906       mov->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
  907       nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
  908                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
  909                         phi->dest.ssa.name);
  910       nir_instr_insert(nir_after_instr(&phi->instr), &mov->instr);
  911 
  912       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
  913                                nir_src_for_ssa(&mov->dest.dest.ssa));
  914 
  915       nir_foreach_phi_src(src, phi) {
  916          assert(src->src.is_ssa);
  917          place_phi_read(shader, reg, src->src.ssa, src->pred, 0);
  918       }
  919 
  920       nir_instr_remove(&phi->instr);
  921 
  922       progress = true;
  923    }
  924 
  925    return progress;
  926 }
  927 
  928 struct ssa_def_to_reg_state {
  929    nir_function_impl *impl;
  930    bool progress;
  931 };
  932 
  933 static bool
  934 dest_replace_ssa_with_reg(nir_dest *dest, void *void_state)
  935 {
  936    struct ssa_def_to_reg_state *state = void_state;
  937 
  938    if (!dest->is_ssa)
  939       return true;
  940 
  941    nir_register *reg = create_reg_for_ssa_def(&dest->ssa, state->impl);
  942 
  943    nir_ssa_def_rewrite_uses(&dest->ssa, nir_src_for_reg(reg));
  944 
  945    nir_instr *instr = dest->ssa.parent_instr;
  946    *dest = nir_dest_for_reg(reg);
  947    dest->reg.parent_instr = instr;
  948    list_addtail(&dest->reg.def_link, &reg->defs);
  949 
  950    state->progress = true;
  951 
  952    return true;
  953 }
  954 
  955 static bool
  956 ssa_def_is_local_to_block(nir_ssa_def *def, UNUSED void *state)
  957 {
  958    nir_block *block = def->parent_instr->block;
  959    nir_foreach_use(use_src, def) {
  960       if (use_src->parent_instr->block != block ||
  961           use_src->parent_instr->type == nir_instr_type_phi) {
  962          return false;
  963       }
  964    }
  965 
  966    if (!list_is_empty(&def->if_uses))
  967       return false;
  968 
  969    return true;
  970 }
  971 
  972 /** Lower all of the SSA defs in a block to registers
  973  *
  974  * This performs the very simple operation of blindly replacing all of the SSA
  975  * defs in the given block with registers.  If not used carefully, this may
  976  * result in phi nodes with register sources which is technically invalid.
  977  * Fortunately, the register-based into-SSA pass handles them anyway.
  978  */
  979 bool
  980 nir_lower_ssa_defs_to_regs_block(nir_block *block)
  981 {
  982    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
  983    nir_shader *shader = impl->function->shader;
  984 
  985    struct ssa_def_to_reg_state state = {
  986       .impl = impl,
  987       .progress = false,
  988    };
  989 
  990    nir_foreach_instr(instr, block) {
  991       if (instr->type == nir_instr_type_ssa_undef) {
  992          /* Undefs are just a read of something never written. */
  993          nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
  994          nir_register *reg = create_reg_for_ssa_def(&undef->def, state.impl);
  995          nir_ssa_def_rewrite_uses(&undef->def, nir_src_for_reg(reg));
  996       } else if (instr->type == nir_instr_type_load_const) {
  997          /* Constant loads are SSA-only, we need to insert a move */
  998          nir_load_const_instr *load = nir_instr_as_load_const(instr);
  999          nir_register *reg = create_reg_for_ssa_def(&load->def, state.impl);
 1000          nir_ssa_def_rewrite_uses(&load->def, nir_src_for_reg(reg));
 1001 
 1002          nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
 1003          mov->src[0].src = nir_src_for_ssa(&load->def);
 1004          mov->dest.dest = nir_dest_for_reg(reg);
 1005          mov->dest.write_mask = (1 << reg->num_components) - 1;
 1006          nir_instr_insert(nir_after_instr(&load->instr), &mov->instr);
 1007       } else if (nir_foreach_ssa_def(instr, ssa_def_is_local_to_block, NULL)) {
 1008          /* If the SSA def produced by this instruction is only in the block
 1009           * in which it is defined and is not used by ifs or phis, then we
 1010           * don't have a reason to convert it to a register.
 1011           */
 1012       } else {
 1013          nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
 1014       }
 1015    }
 1016 
 1017    return state.progress;
 1018 }