diff gcc/sel-sched-ir.c @ 145:1830386684a0

gcc-9.2.0
author anatofuz
date Thu, 13 Feb 2020 11:34:05 +0900
parents 84e7813d76e9
children
line wrap: on
line diff
--- a/gcc/sel-sched-ir.c	Thu Oct 25 07:37:49 2018 +0900
+++ b/gcc/sel-sched-ir.c	Thu Feb 13 11:34:05 2020 +0900
@@ -1,5 +1,5 @@
 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
-   Copyright (C) 2006-2018 Free Software Foundation, Inc.
+   Copyright (C) 2006-2020 Free Software Foundation, Inc.
 
 This file is part of GCC.
 
@@ -33,7 +33,6 @@
 #include "insn-config.h"
 #include "insn-attr.h"
 #include "recog.h"
-#include "params.h"
 #include "target.h"
 #include "sched-int.h"
 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
@@ -61,7 +60,7 @@
 static struct common_sched_info_def sel_common_sched_info;
 
 /* The loop nest being pipelined.  */
-struct loop *current_loop_nest;
+class loop *current_loop_nest;
 
 /* LOOP_NESTS is a vector containing the corresponding loop nest for
    each region.  */
@@ -311,9 +310,10 @@
     flist_remove (lp);
 }
 
-/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL.  */
+/* Add ORIGINAL_INSN the def list DL honoring CROSSED_CALL_ABIS.  */
 void
-def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
+def_list_add (def_list_t *dl, insn_t original_insn,
+	      unsigned int crossed_call_abis)
 {
   def_t d;
 
@@ -321,7 +321,7 @@
   d = DEF_LIST_DEF (*dl);
 
   d->orig_insn = original_insn;
-  d->crosses_call = crosses_call;
+  d->crossed_call_abis = crossed_call_abis;
 }
 
 
@@ -424,7 +424,7 @@
 }
 
 /* Functions to work with dependence contexts.
-   Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
+   Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
    context.  It accumulates information about processed insns to decide if
    current insn is dependent on the processed ones.  */
 
@@ -440,7 +440,7 @@
 static deps_t
 alloc_deps_context (void)
 {
-  return XNEW (struct deps_desc);
+  return XNEW (class deps_desc);
 }
 
 /* Allocate and initialize dep context.  */
@@ -703,11 +703,6 @@
       else
         if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
           {
-            /* Would be weird if same insn is successor of several fallthrough
-               edges.  */
-            gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
-                        != BLOCK_FOR_INSN (last_scheduled_insn_old));
-
             state_free (FENCE_STATE (f));
             FENCE_STATE (f) = state;
 
@@ -2666,12 +2661,9 @@
     return;
 
   HARD_REG_SET temp;
-  unsigned regno;
-  hard_reg_set_iterator hrsi;
 
   get_implicit_reg_pending_clobbers (&temp, insn);
-  EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
-    SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
+  IOR_REG_SET_HRS (IDATA_REG_SETS (id), temp);
 }
 
 /* Setup register sets describing INSN in ID.  */
@@ -2754,7 +2746,7 @@
 static void
 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
 {
-  struct deps_desc _dc, *dc = &_dc;
+  class deps_desc _dc, *dc = &_dc;
 
   deps_init_id_data.where = DEPS_IN_NOWHERE;
   deps_init_id_data.id = id;
@@ -3395,7 +3387,7 @@
 {
   int i;
   ds_t ds;
-  struct deps_desc *dc;
+  class deps_desc *dc;
 
   if (INSN_SIMPLEJUMP_P (pred))
     /* Unconditional jump is just a transfer of control flow.
@@ -3782,7 +3774,7 @@
 
           if (!(e->flags & EDGE_FALLTHRU))
             {
-	      /* We can not invalidate computed topological order by moving
+	      /* We cannot invalidate computed topological order by moving
 	         the edge destination block (E->SUCC) along a fallthru edge.
 
 		 We will update dominators here only when we'll get
@@ -5402,7 +5394,7 @@
 
   if (current_loop_nest)
     {
-      struct loop *loop;
+      class loop *loop;
 
       for (loop = current_loop_nest; loop; loop = loop_outer (loop))
         if (considered_for_pipelining_p (loop) && loop->latch == from)
@@ -5642,6 +5634,8 @@
 			   recompute_dominator (CDI_DOMINATORS, to));
   set_immediate_dominator (CDI_DOMINATORS, orig_dest,
 			   recompute_dominator (CDI_DOMINATORS, orig_dest));
+  if (jump && sel_bb_head_p (jump))
+    compute_live (jump);
 }
 
 /* A wrapper for redirect_edge_and_branch.  Return TRUE if blocks connected by
@@ -5702,6 +5696,8 @@
       set_immediate_dominator (CDI_DOMINATORS, orig_dest,
                                recompute_dominator (CDI_DOMINATORS, orig_dest));
     }
+  if (jump && sel_bb_head_p (jump))
+    compute_live (jump);
   return recompute_toporder_p;
 }
 
@@ -6003,11 +5999,11 @@
 /* Create a region for LOOP and return its number.  If we don't want
    to pipeline LOOP, return -1.  */
 static int
-make_region_from_loop (struct loop *loop)
+make_region_from_loop (class loop *loop)
 {
   unsigned int i;
   int new_rgn_number = -1;
-  struct loop *inner;
+  class loop *inner;
 
   /* Basic block index, to be assigned to BLOCK_TO_BB.  */
   int bb_ord_index = 0;
@@ -6015,7 +6011,7 @@
   basic_block preheader_block;
 
   if (loop->num_nodes
-      > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+      > (unsigned) param_max_pipeline_region_blocks)
     return -1;
 
   /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
@@ -6024,7 +6020,7 @@
       return -1;
 
   loop->ninsns = num_loop_insns (loop);
-  if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+  if ((int) loop->ninsns > param_max_pipeline_region_insns)
     return -1;
 
   loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
@@ -6096,9 +6092,9 @@
    pipelined before outer loops.  Returns true when a region for LOOP
    is created.  */
 static bool
-make_regions_from_loop_nest (struct loop *loop)
-{
-  struct loop *cur_loop;
+make_regions_from_loop_nest (class loop *loop)
+{
+  class loop *cur_loop;
   int rgn_number;
 
   /* Traverse all inner nodes of the loop.  */
@@ -6134,7 +6130,7 @@
   recompute_rev_top_order ();
 }
 
-/* Returns a struct loop for region RGN.  */
+/* Returns a class loop for region RGN.  */
 loop_p
 get_loop_nest_for_rgn (unsigned int rgn)
 {
@@ -6148,7 +6144,7 @@
 
 /* True when LOOP was included into pipelining regions.   */
 bool
-considered_for_pipelining_p (struct loop *loop)
+considered_for_pipelining_p (class loop *loop)
 {
   if (loop_depth (loop) == 0)
     return false;
@@ -6250,7 +6246,7 @@
 /* Free data structures used in pipelining of loops.  */
 void sel_finish_pipelining (void)
 {
-  struct loop *loop;
+  class loop *loop;
 
   /* Release aux fields so we don't free them later by mistake.  */
   FOR_EACH_LOOP (loop, 0)
@@ -6325,7 +6321,7 @@
 {
   if (current_loop_nest)
     {
-      struct loop *outer;
+      class loop *outer;
 
       if (preheader_removed)
         return false;