"Fossies" - the Fresh Open Source Software Archive

Member "bison-3.4.1/src/AnnotationList.c" (26 Apr 2019, 33677 Bytes) of package /linux/misc/bison-3.4.1.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "AnnotationList.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 3.2.4_vs_3.3.

    1 /* IELR's inadequacy annotation list.
    2 
    3    Copyright (C) 2009-2015, 2018-2019 Free Software Foundation, Inc.
    4 
    5    This file is part of Bison, the GNU Compiler Compiler.
    6 
    7    This program is free software: you can redistribute it and/or modify
    8    it under the terms of the GNU General Public License as published by
    9    the Free Software Foundation, either version 3 of the License, or
   10    (at your option) any later version.
   11 
   12    This program is distributed in the hope that it will be useful,
   13    but WITHOUT ANY WARRANTY; without even the implied warranty of
   14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   15    GNU General Public License for more details.
   16 
   17    You should have received a copy of the GNU General Public License
   18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
   19 
   20 #include <config.h>
   21 #include "system.h"
   22 
   23 #include "AnnotationList.h"
   24 #include "lalr.h"
   25 #include "ielr.h"
   26 
   27 /**
   28  * \pre
   29  *   - <tt>annotations_obstackp != NULL</tt>.
   30  * \post
   31  *   - \c result is a new \c AnnotationList with one node whose:
   32  *     - \c inadequacyNode member is \c NULL.
   33  *     - \c contributions member is allocated with \c contribution_count
   34  *       uninitialized elements.
   35  *   - All memory was allocated on \c annotations_obstackp.
   36  */
   37 static AnnotationList*
   38 AnnotationList__alloc_on_obstack (ContributionIndex contribution_count,
   39                                   struct obstack *annotations_obstackp)
   40 {
   41   AnnotationList *result;
   42   size_t contributions_size =
   43     contribution_count * sizeof result->contributions[0];
   44   result = obstack_alloc (annotations_obstackp,
   45                           offsetof (AnnotationList, contributions)
   46                           + contributions_size);
   47   result->next = NULL;
   48   result->inadequacyNode = NULL;
   49   return result;
   50 }
   51 
   52 /**
   53  * \pre
   54  *   - <tt>self != NULL</tt>.
   55  *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
   56  * \post
   57  *   - \c result = true iff contribution \c ci in \c self represents an
   58  *     "always" contribution.
   59  */
   60 static bool
   61 AnnotationList__isContributionAlways (AnnotationList const *self,
   62                                       ContributionIndex ci)
   63 {
   64   aver (0 <= ci && ci < self->inadequacyNode->contributionCount);
   65   return self->contributions[ci] == NULL;
   66 }
   67 
   68 /**
   69  * \pre
   70  *   - \c self is a single node.
   71  *   - \c self annotates the same state as every other node in \c list, and
   72  *     that state has \c nitems kernel items.
   73  * \post
   74  *   - If the list \c list already contains an identical annotation to \c self,
   75  *     \c self was discarded, \c result is false, and the caller is responsible
   76  *     for the memory of \c self.
   77  *   - Otherwise, \c list now contains the node \c self, \c result is true, and
   78  *     \c list assumes responsibility for the memory of \c self.
   79  *   - The sort in \c list is:
   80  *     - Sort in reverse order on the unique ID of the associated
   81  *       inadequacy node.  Because these IDs are assigned in ascending
   82  *       order, this should mean that the insertion position within an
   83  *       annotation list is usually near the beginning with other
   84  *       annotations associated with the same inadequacy.
   85  *     - Next, sort on the first contribution that is different as follows:
   86  *       - Sort an always-contribution before a never-contribution before a
   87  *         potential-contribution.
   88  *       - Two always-contributions are identical.
   89  *       - Two never-contributions are identical.
   90  *       - For two potential-contributions, sort on the contributions' kernel
   91  *         item bitsets interpreted as binary numbers.
   92  *  - The sorting has a few effects:
   93  *    - It accelerates elimination of identical annotations during insertion.
   94  *    - It determines how the output of \c AnnotationList__debug is sorted.
   95  *    - Other than that, it's probably not important.
   96  */
   97 static bool
   98 AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
   99                             size_t nitems)
  100 {
  101   AnnotationList **node;
  102   for (node = list; *node; node = &(*node)->next)
  103     {
  104       int cmp = 0;
  105       ContributionIndex ci;
  106       if (self->inadequacyNode->id < (*node)->inadequacyNode->id)
  107         cmp = 1;
  108       else if ((*node)->inadequacyNode->id < self->inadequacyNode->id)
  109         cmp = -1;
  110       else
  111         for (ci = 0;
  112              cmp == 0 && ci < self->inadequacyNode->contributionCount;
  113              ++ci)
  114           {
  115             if (AnnotationList__isContributionAlways (self, ci))
  116               {
  117                 if (!AnnotationList__isContributionAlways (*node, ci))
  118                   cmp = -1;
  119               }
  120             else if (AnnotationList__isContributionAlways (*node, ci))
  121               cmp = 1;
  122             else
  123               {
  124                 size_t item;
  125                 for (item = 0; cmp == 0 && item < nitems; ++item)
  126                   {
  127                     if (!Sbitset__test (self->contributions[ci], item))
  128                       {
  129                         if (Sbitset__test ((*node)->contributions[ci], item))
  130                           cmp = -1;
  131                       }
  132                     else if (!Sbitset__test ((*node)->contributions[ci], item))
  133                       cmp = 1;
  134                   }
  135               }
  136           }
  137       if (cmp < 0)
  138         {
  139           self->next = *node;
  140           *node = self;
  141           break;
  142         }
  143       else if (cmp == 0)
  144         {
  145           self = NULL;
  146           break;
  147         }
  148     }
  149   if (!*node)
  150     *node = self;
  151   return self != NULL;
  152 }
  153 
  154 static bitset
  155 AnnotationList__compute_shift_tokens (transitions *trans)
  156 {
  157   bitset shift_tokens = bitset_create (ntokens, BITSET_FIXED);
  158   int i;
  159   FOR_EACH_SHIFT (trans, i)
  160     bitset_set (shift_tokens, TRANSITION_SYMBOL (trans, i));
  161   return shift_tokens;
  162 }
  163 
  164 static bitset
  165 AnnotationList__compute_conflicted_tokens (bitset shift_tokens,
  166                                            reductions *reds)
  167 {
  168   bitset conflicted_tokens = bitset_create (ntokens, BITSET_FIXED);
  169   bitset conflicted_tokens_rule = bitset_create (ntokens, BITSET_FIXED);
  170   bitset tokens = bitset_create (ntokens, BITSET_FIXED);
  171 
  172   bitset_copy (tokens, shift_tokens);
  173   for (int i = 0; i < reds->num; ++i)
  174     {
  175       bitset_and (conflicted_tokens_rule, tokens, reds->lookahead_tokens[i]);
  176       bitset_or (conflicted_tokens,
  177                  conflicted_tokens, conflicted_tokens_rule);
  178       bitset_or (tokens, tokens, reds->lookahead_tokens[i]);
  179       /* Check that rules are sorted on rule number or the next step in
  180          AnnotationList__compute_from_inadequacies will misbehave.  */
  181       aver (i == 0 || reds->rules[i-1] < reds->rules[i]);
  182     }
  183 
  184   bitset_free (tokens);
  185   bitset_free (conflicted_tokens_rule);
  186 
  187   return conflicted_tokens;
  188 }
  189 
  190 static bool
  191 AnnotationList__compute_lhs_contributions (state *s, rule *the_rule,
  192                                            symbol_number conflicted_token,
  193                                            bitsetv follow_kernel_items,
  194                                            bitsetv always_follows,
  195                                            state ***predecessors,
  196                                            bitset **item_lookahead_sets,
  197                                            Sbitset *items,
  198                                            struct obstack
  199                                              *annotations_obstackp)
  200 {
  201   goto_number lhs_goto = map_goto (s->number, the_rule->lhs->number);
  202   if (bitset_test (always_follows[lhs_goto], conflicted_token))
  203     return true;
  204   *items = Sbitset__new_on_obstack (s->nitems, annotations_obstackp);
  205   {
  206     bitset_iterator biter_item;
  207     bitset_bindex item;
  208     BITSET_FOR_EACH (biter_item, follow_kernel_items[lhs_goto], item, 0)
  209       if (ielr_item_has_lookahead (s, 0, item, conflicted_token,
  210                                    predecessors, item_lookahead_sets))
  211         Sbitset__set (*items, item);
  212   }
  213   return false;
  214 }
  215 
  216 static void
  217 AnnotationList__computePredecessorAnnotations (
  218   AnnotationList *self, state *s,
  219   bitsetv follow_kernel_items,
  220   bitsetv always_follows,
  221   state ***predecessors,
  222   bitset **item_lookahead_sets,
  223   AnnotationList **annotation_lists,
  224   AnnotationIndex *annotation_counts,
  225   struct obstack *annotations_obstackp)
  226 {
  227   for (state **predecessor = predecessors[s->number]; *predecessor; ++predecessor)
  228     {
  229       AnnotationList *annotation_node =
  230         AnnotationList__alloc_on_obstack (
  231           self->inadequacyNode->contributionCount, annotations_obstackp);
  232       annotation_node->inadequacyNode = self->inadequacyNode;
  233       bool potential_contribution = false;
  234       bitset *lookaheads = NULL;
  235       {
  236         for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
  237           {
  238             symbol_number contribution_token =
  239               InadequacyList__getContributionToken (self->inadequacyNode, ci)
  240                 ->content->number;
  241             if (AnnotationList__isContributionAlways (self, ci))
  242               {
  243                 annotation_node->contributions[ci] = NULL;
  244                 continue;
  245               }
  246             annotation_node->contributions[ci] =
  247               Sbitset__new_on_obstack ((*predecessor)->nitems,
  248                                        annotations_obstackp);
  249             {
  250               size_t predecessor_item = 0;
  251               Sbitset sbiter_item;
  252               Sbitset__Index self_item;
  253               SBITSET__FOR_EACH (self->contributions[ci], s->nitems,
  254                                  sbiter_item, self_item)
  255                 {
  256                   /* If this kernel item is the beginning of a RHS, it must be
  257                      the kernel item in the start state, and so it has an empty
  258                      lookahead set.  Thus, it can't contribute to inadequacies,
  259                      and so it should never have been identified as a
  260                      contribution.  If, instead, this kernel item is the
  261                      successor of the start state's kernel item, the lookahead
  262                      set is still empty, and so it also should never have been
  263                      identified as a contribution.  This situation is fortunate
  264                      because we want to avoid the - 2 below in both cases.  */
  265                   aver (s->items[self_item] > 1);
  266                   /* If this kernel item is next to the beginning of the RHS,
  267                      then check all of the predecessor's goto follows for the
  268                      LHS.  */
  269                   if (item_number_is_rule_number (ritem[s->items[self_item]
  270                                                         - 2]))
  271                     {
  272                       unsigned rulei;
  273                       for (rulei = s->items[self_item];
  274                            !item_number_is_rule_number (ritem[rulei]);
  275                            ++rulei)
  276                         continue;
  277                       Sbitset items;
  278                       if (AnnotationList__compute_lhs_contributions (
  279                             *predecessor,
  280                             &rules[item_number_as_rule_number (ritem[rulei])],
  281                             contribution_token,
  282                             follow_kernel_items, always_follows, predecessors,
  283                             item_lookahead_sets, &items, annotations_obstackp))
  284                         {
  285                           obstack_free (annotations_obstackp,
  286                                         annotation_node->contributions[ci]);
  287                           annotation_node->contributions[ci] = NULL;
  288                           break;
  289                         }
  290                       else
  291                         {
  292                           Sbitset__or (annotation_node->contributions[ci],
  293                                        annotation_node->contributions[ci],
  294                                        items, (*predecessor)->nitems);
  295                           obstack_free (annotations_obstackp, items);
  296                         }
  297                     }
  298                   /* If this kernel item is later in the RHS, then check the
  299                      predecessor item's lookahead set.  */
  300                   else
  301                     {
  302                       /* We don't have to start the predecessor item search at
  303                          the beginning every time because items from both
  304                          states are sorted by their indices in ritem.  */
  305                       for (;
  306                            predecessor_item < (*predecessor)->nitems;
  307                            ++predecessor_item)
  308                         if ((*predecessor)->items[predecessor_item]
  309                             == s->items[self_item] - 1)
  310                           break;
  311                       aver (predecessor_item != (*predecessor)->nitems);
  312                       if (ielr_item_has_lookahead (*predecessor, 0,
  313                                                    predecessor_item,
  314                                                    contribution_token,
  315                                                    predecessors,
  316                                                    item_lookahead_sets))
  317                         Sbitset__set (annotation_node->contributions[ci],
  318                                       predecessor_item);
  319                     }
  320                 }
  321             }
  322             if (annotation_node->contributions[ci])
  323               {
  324                 Sbitset biter;
  325                 Sbitset__Index i;
  326                 SBITSET__FOR_EACH (annotation_node->contributions[ci],
  327                                    (*predecessor)->nitems, biter, i)
  328                   {
  329                     potential_contribution = true;
  330                     if (!lookaheads)
  331                       {
  332                         lookaheads = xnmalloc ((*predecessor)->nitems,
  333                                                sizeof *lookaheads);
  334                         for (size_t j = 0; j < (*predecessor)->nitems; ++j)
  335                           lookaheads[j] = NULL;
  336                       }
  337                     if (!lookaheads[i])
  338                       lookaheads[i] = bitset_create (ntokens, BITSET_FIXED);
  339                     bitset_set (lookaheads[i], contribution_token);
  340                   }
  341               }
  342           }
  343       }
  344 
  345       /* If the predecessor has any contributions besides just "always" and
  346          "never" contributions:
  347          - If the dominant contribution is split-stable, the annotation could
  348            not affect merging on this predecessor state or its eventual
  349            predecessor states.  Moreover, all contributions that affect
  350            whether the dominant contribution remains dominant must be "always"
  351            or "never" contributions in order for the dominant contribution to
  352            be split-stable.  Thus, the dominant contribution computation result
  353            in eventual successor states will not be affected by lookaheads
  354            tracked for this predecessor state.  (Also, as in the isocore
  355            compatibility test, we depend on the fact that isocores with equal
  356            dominant contributions will have the same dominant contribution when
  357            merged.  Otherwise, we might have to worry that the presence of a
  358            potential contribution might somehow be the culprit of that behavior
  359            and thus need to be tracked regardless of the split stability of the
  360            dominant contribution.)  Thus, go ahead and discard the annotation
  361            to save space now plus time during state splitting.
  362          - Otherwise, record the annotation, and compute any resulting
  363            annotations needed on predecessor states.  */
  364       if (potential_contribution)
  365         {
  366           if (ContributionIndex__none
  367               != AnnotationList__computeDominantContribution (
  368                    annotation_node, (*predecessor)->nitems, lookaheads, true))
  369             {
  370               obstack_free (annotations_obstackp, annotation_node);
  371               annotation_node = NULL;
  372             }
  373           {
  374             for (size_t i = 0; i < (*predecessor)->nitems; ++i)
  375               if (lookaheads[i])
  376                 bitset_free (lookaheads[i]);
  377             free (lookaheads);
  378           }
  379           if (annotation_node)
  380             {
  381               if (AnnotationList__insertInto (annotation_node,
  382                                               &annotation_lists[(*predecessor)
  383                                                                 ->number],
  384                                               (*predecessor)->nitems))
  385                 {
  386                   ++annotation_counts[(*predecessor)->number];
  387                   AnnotationList__computePredecessorAnnotations (
  388                     annotation_node, *predecessor,
  389                     follow_kernel_items, always_follows, predecessors,
  390                     item_lookahead_sets, annotation_lists, annotation_counts,
  391                     annotations_obstackp);
  392                 }
  393               else
  394                 obstack_free (annotations_obstackp, annotation_node);
  395             }
  396         }
  397       else
  398         obstack_free (annotations_obstackp, annotation_node);
  399     }
  400 }
  401 
  402 void
  403 AnnotationList__compute_from_inadequacies (
  404   state *s, bitsetv follow_kernel_items, bitsetv always_follows,
  405   state ***predecessors, bitset **item_lookahead_sets,
  406   InadequacyList **inadequacy_lists, AnnotationList **annotation_lists,
  407   AnnotationIndex *annotation_counts,
  408   ContributionIndex *max_contributionsp,
  409   struct obstack *annotations_obstackp,
  410   InadequacyListNodeCount *inadequacy_list_node_count)
  411 {
  412   /* Return an empty list if s->lookahead_tokens = NULL.  */
  413   if (s->consistent)
  414     return;
  415 
  416   bitsetv all_lookaheads = bitsetv_create (s->nitems, ntokens, BITSET_FIXED);
  417   bitsetv_ones (all_lookaheads);
  418   bitset shift_tokens = AnnotationList__compute_shift_tokens (s->transitions);
  419   bitset conflicted_tokens =
  420     AnnotationList__compute_conflicted_tokens (shift_tokens, s->reductions);
  421 
  422   /* Add an inadequacy annotation for each conflicted_token.  */
  423   bitset_iterator biter_conflict;
  424   bitset_bindex conflicted_token;
  425   BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0)
  426     {
  427       AnnotationList *annotation_node;
  428       /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient?  Now
  429          or convert it inside InadequacyList__new_conflict?  */
  430       bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
  431       ContributionIndex contribution_count = 0;
  432       bool potential_contribution = false;
  433 
  434       /* Allocate the annotation node.  */
  435       {
  436         for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
  437           if (bitset_test (s->reductions->lookahead_tokens[rule_i],
  438                            conflicted_token))
  439             ++contribution_count;
  440         if (bitset_test (shift_tokens, conflicted_token))
  441           ++contribution_count;
  442         annotation_node =
  443           AnnotationList__alloc_on_obstack (contribution_count,
  444                                             annotations_obstackp);
  445       }
  446 
  447       /* Add a contribution for each reduction that has conflicted_token as a
  448          lookahead.  */
  449       {
  450         ContributionIndex ci = 0;
  451         int item_i = 0;
  452         for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
  453           {
  454             rule *the_rule = s->reductions->rules[rule_i];
  455             if (bitset_test (s->reductions->lookahead_tokens[rule_i],
  456                              conflicted_token))
  457               {
  458                 bitset_set (actions, rule_i);
  459                 /* If this reduction is on a kernel item, just add it.  */
  460                 if (!item_number_is_rule_number (the_rule->rhs[0]))
  461                   {
  462                     annotation_node->contributions[ci] =
  463                       Sbitset__new_on_obstack (s->nitems,
  464                                                annotations_obstackp);
  465                     /* Catch item_i up to rule_i.  This works because both are
  466                        sorted on rule number.  */
  467                     while (!item_number_is_rule_number (
  468                              ritem[s->items[item_i]])
  469                            || item_number_as_rule_number (
  470                                 ritem[s->items[item_i]])
  471                               != the_rule->number)
  472                       {
  473                         ++item_i;
  474                         aver (item_i < s->nitems);
  475                       }
  476                     Sbitset__set (annotation_node->contributions[ci], item_i);
  477                   }
  478                 /* Otherwise, add the kernel items whose lookahead sets
  479                    contribute the conflicted token to this reduction's
  480                    lookahead set.  */
  481                 else if (AnnotationList__compute_lhs_contributions (
  482                            s, the_rule, conflicted_token, follow_kernel_items,
  483                            always_follows, predecessors, item_lookahead_sets,
  484                            &annotation_node->contributions[ci],
  485                            annotations_obstackp))
  486                   {
  487                     annotation_node->contributions[ci++] = NULL;
  488                     continue;
  489                   }
  490                 /* The lookahead token has to come from somewhere.  */
  491                 aver (!Sbitset__isEmpty (annotation_node->contributions[ci],
  492                                          s->nitems));
  493                 ++ci;
  494                 potential_contribution = true;
  495               }
  496           }
  497       }
  498 
  499       /* If there are any contributions besides just "always" contributions:
  500          - If there's also a shift contribution, record it.
  501          - If the dominant contribution is split-stable, then the annotation
  502            could not affect merging, so go ahead and discard the annotation and
  503            the inadequacy to save space now plus time during state splitting.
  504          - Otherwise, record the annotation and the inadequacy, and compute any
  505            resulting annotations needed on predecessor states.  */
  506       if (potential_contribution)
  507         {
  508           if (bitset_test (shift_tokens, conflicted_token))
  509             {
  510               bitset_set (actions, s->reductions->num);
  511               annotation_node->contributions[contribution_count - 1] = NULL;
  512             }
  513           {
  514             InadequacyList *conflict_node =
  515               InadequacyList__new_conflict (
  516                 s, symbols[conflicted_token], actions,
  517                 inadequacy_list_node_count);
  518             actions = NULL;
  519             annotation_node->inadequacyNode = conflict_node;
  520             if (ContributionIndex__none
  521                 != AnnotationList__computeDominantContribution (
  522                      annotation_node, s->nitems, all_lookaheads, true))
  523               {
  524                 obstack_free (annotations_obstackp, annotation_node);
  525                 InadequacyList__delete (conflict_node);
  526               }
  527             else
  528               {
  529                 InadequacyList__prependTo (conflict_node,
  530                                            &inadequacy_lists[s->number]);
  531                 {
  532                   bool b =
  533                     AnnotationList__insertInto (annotation_node,
  534                                                 &annotation_lists[s->number],
  535                                                 s->nitems);
  536                   aver (b); (void) b;
  537                 }
  538                 /* This aver makes sure the
  539                    AnnotationList__computeDominantContribution check above
  540                    does discard annotations in the simplest case of a S/R
  541                    conflict with no token precedence.  */
  542                 aver (!bitset_test (shift_tokens, conflicted_token)
  543                       || symbols[conflicted_token]->content->prec);
  544                 ++annotation_counts[s->number];
  545                 if (contribution_count > *max_contributionsp)
  546                   *max_contributionsp = contribution_count;
  547                 AnnotationList__computePredecessorAnnotations (
  548                   annotation_node, s,
  549                   follow_kernel_items, always_follows, predecessors,
  550                   item_lookahead_sets, annotation_lists, annotation_counts,
  551                   annotations_obstackp);
  552               }
  553           }
  554         }
  555       else
  556         {
  557           bitset_free (actions);
  558           obstack_free (annotations_obstackp, annotation_node);
  559         }
  560     }
  561 
  562   bitsetv_free (all_lookaheads);
  563   bitset_free (shift_tokens);
  564   bitset_free (conflicted_tokens);
  565 }
  566 
  567 void
  568 AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
  569 {
  570   AnnotationList const *a;
  571   AnnotationIndex ai;
  572   for (a = self, ai = 0; a; a = a->next, ++ai)
  573     {
  574       for (int j = 0; j < spaces; ++j)
  575         putc (' ', stderr);
  576       fprintf (stderr, "Annotation %d (manifesting state %d):\n",
  577                ai, a->inadequacyNode->manifestingState->number);
  578       {
  579         bitset_bindex rulei
  580           = bitset_first (a->inadequacyNode->inadequacy.conflict.actions);
  581         for (ContributionIndex ci = 0; ci < a->inadequacyNode->contributionCount; ++ci)
  582           {
  583             symbol_number token =
  584               InadequacyList__getContributionToken (a->inadequacyNode, ci)
  585                 ->content->number;
  586             for (int j = 0; j < spaces+2; ++j)
  587               putc (' ', stderr);
  588             if (ci == InadequacyList__getShiftContributionIndex (
  589                         a->inadequacyNode))
  590               fprintf (stderr, "Contributes shift of token %d.\n", token);
  591             else
  592               {
  593                 fprintf (stderr, "Contributes token %d", token);
  594                 aver (rulei != BITSET_BINDEX_MAX);
  595                 fprintf (stderr, " as lookahead, rule number %d",
  596                          a->inadequacyNode->manifestingState
  597                            ->reductions->rules[rulei]->number);
  598                 rulei =
  599                   bitset_next (a->inadequacyNode->inadequacy.conflict.actions,
  600                                rulei+1);
  601                 if (AnnotationList__isContributionAlways (a, ci))
  602                   fprintf (stderr, " always.");
  603                 else
  604                   {
  605                     fprintf (stderr, ", items: ");
  606                     Sbitset__fprint (a->contributions[ci], nitems, stderr);
  607                   }
  608                 fprintf (stderr, "\n");
  609               }
  610           }
  611       }
  612     }
  613 }
  614 
  615 void
  616 AnnotationList__computeLookaheadFilter (AnnotationList const *self,
  617                                         size_t nitems,
  618                                         bitsetv lookahead_filter)
  619 {
  620   bitsetv_zero (lookahead_filter);
  621   for (; self; self = self->next)
  622     for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
  623       if (!AnnotationList__isContributionAlways (self, ci))
  624         {
  625           symbol_number token =
  626             InadequacyList__getContributionToken (self->inadequacyNode, ci)
  627             ->content->number;
  628           Sbitset__Index item;
  629           Sbitset biter;
  630           SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
  631             bitset_set (lookahead_filter[item], token);
  632         }
  633 }
  634 
  635 /**
  636  * \pre
  637  *   - <tt>self != NULL</tt>.
  638  *   - \c nitems is the number of kernel items in the LR(0) state that \c self
  639  *     annotates.
  640  *   - \c lookaheads describes the lookahead sets on the kernel items of some
  641  *     isocore of the LR(0) state that \c self annotates.  Either:
  642  *     - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel
  643  *       item is empty.
  644  *     - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either:
  645  *       - \c NULL only if the lookahead set on kernel item \c i is empty.
  646  *       - The (possibly empty) lookahead set on kernel item \c i.
  647  *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
  648  * \post
  649  *   - \c result = true iff contribution \c ci in \c self is made by the state
  650  *     described by \c lookaheads.
  651  */
  652 static bool
  653 AnnotationList__stateMakesContribution (AnnotationList const *self,
  654                                         size_t nitems, ContributionIndex ci,
  655                                         bitset *lookaheads)
  656 {
  657   if (AnnotationList__isContributionAlways (self, ci))
  658     return true;
  659   if (!lookaheads)
  660     return false;
  661   {
  662     symbol_number token =
  663       InadequacyList__getContributionToken (self->inadequacyNode, ci)
  664       ->content->number;
  665     Sbitset__Index item;
  666     Sbitset biter;
  667     SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
  668       if (lookaheads[item] && bitset_test (lookaheads[item], token))
  669         return true;
  670   }
  671   return false;
  672 }
  673 
  674 ContributionIndex
  675 AnnotationList__computeDominantContribution (AnnotationList const *self,
  676                                              size_t nitems, bitset *lookaheads,
  677                                              bool require_split_stable)
  678 {
  679   ContributionIndex const ci_shift =
  680     InadequacyList__getShiftContributionIndex (self->inadequacyNode);
  681 
  682   symbol *token = self->inadequacyNode->inadequacy.conflict.token;
  683 
  684   /* S/R conflict.  */
  685   if (ci_shift != ContributionIndex__none)
  686     {
  687       bool find_stable_domination_over_shift = false;
  688       bool find_stable_error_action_domination = false;
  689       {
  690         int shift_precedence = token->content->prec;
  691 
  692         /* If the token has no precedence set, shift is always chosen.  */
  693         if (!shift_precedence)
  694           return ci_shift;
  695 
  696         /* Figure out which reductions contribute, which of those would
  697            dominate in a R/R comparison, and whether any reduction dominates
  698            the shift so that the R/R comparison is actually needed.  */
  699         ContributionIndex ci_rr_dominator = ContributionIndex__none;
  700         int actioni;
  701         ContributionIndex ci;
  702         for (ci = 0,
  703                actioni = bitset_first (self->inadequacyNode->inadequacy
  704                                        .conflict.actions);
  705              ci < self->inadequacyNode->contributionCount;
  706              ++ci,
  707                actioni = bitset_next (self->inadequacyNode->inadequacy
  708                                       .conflict.actions, actioni+1))
  709           {
  710             int reduce_precedence = 0;
  711             if (ci == ci_shift)
  712               continue;
  713             {
  714               rule *r = self->inadequacyNode->manifestingState
  715                           ->reductions->rules[actioni];
  716               if (r->prec)
  717                 reduce_precedence = r->prec->prec;
  718             }
  719             /* If there's no need to check whether this reduction actually
  720                contributes because the shift eliminates it from the R/R
  721                comparison anyway, continue to the next reduction.  */
  722             if (reduce_precedence
  723                 && (reduce_precedence < shift_precedence
  724                     || (reduce_precedence == shift_precedence
  725                         && token->content->assoc == right_assoc)))
  726               continue;
  727             if (!AnnotationList__stateMakesContribution (self, nitems, ci,
  728                                                          lookaheads))
  729               continue;
  730             /* This uneliminated reduction contributes, so see if it can cause
  731                an error action.  */
  732             if (reduce_precedence == shift_precedence
  733                  && token->content->assoc == non_assoc)
  734               {
  735                 /* It's not possible to find split-stable domination over
  736                    shift after a potential %nonassoc.  */
  737                 if (find_stable_domination_over_shift)
  738                   return ContributionIndex__none;
  739                 if (!require_split_stable
  740                     || AnnotationList__isContributionAlways (self, ci))
  741                   return ContributionIndex__error_action;
  742                 find_stable_error_action_domination = true;
  743               }
  744             /* Consider this uneliminated contributing reduction in the R/R
  745                comparison.  */
  746             if (ci_rr_dominator == ContributionIndex__none)
  747               ci_rr_dominator = ci;
  748             /* If precedence is set for this uneliminated contributing
  749                reduction, it dominates the shift, so try to figure out which
  750                reduction dominates the R/R comparison.  */
  751             if (reduce_precedence)
  752               {
  753                 /* It's not possible to find split-stable error action
  754                    domination after a potential reduction.  */
  755                 if (find_stable_error_action_domination)
  756                   return ContributionIndex__none;
  757                 if (!require_split_stable)
  758                   return ci_rr_dominator;
  759                 if (!AnnotationList__isContributionAlways (self,
  760                                                            ci_rr_dominator))
  761                   return ContributionIndex__none;
  762                 if (AnnotationList__isContributionAlways (self, ci))
  763                   return ci_rr_dominator;
  764                 find_stable_domination_over_shift = true;
  765               }
  766           }
  767       }
  768       if (find_stable_domination_over_shift
  769           || find_stable_error_action_domination)
  770         return ContributionIndex__none;
  771       /* No reduce or error action domination found, so shift dominates.  */
  772       return ci_shift;
  773     }
  774 
  775   /* R/R conflict, so the reduction with the lowest rule number dominates.
  776      Fortunately, contributions are sorted by rule number.  */
  777   for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
  778     if (AnnotationList__stateMakesContribution (self, nitems, ci,
  779                                                 lookaheads))
  780       {
  781         if (require_split_stable
  782             && !AnnotationList__isContributionAlways (self, ci))
  783           return ContributionIndex__none;
  784         return ci;
  785       }
  786   return ContributionIndex__none;
  787 }