diff gcc/tree-ssa-propagate.c @ 132:d34655255c78

update gcc-8.2
author mir3636
date Thu, 25 Oct 2018 10:21:07 +0900
parents 84e7813d76e9
children 1830386684a0
line wrap: on
line diff
--- a/gcc/tree-ssa-propagate.c	Thu Oct 25 08:08:40 2018 +0900
+++ b/gcc/tree-ssa-propagate.c	Thu Oct 25 10:21:07 2018 +0900
@@ -1,5 +1,5 @@
 /* Generic SSA value propagation engine.
-   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   Copyright (C) 2004-2018 Free Software Foundation, Inc.
    Contributed by Diego Novillo <dnovillo@redhat.com>
 
    This file is part of GCC.
@@ -108,55 +108,26 @@
      [3] Advanced Compiler Design and Implementation,
 	 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
 
-/* Function pointers used to parameterize the propagation engine.  */
-static ssa_prop_visit_stmt_fn ssa_prop_visit_stmt;
-static ssa_prop_visit_phi_fn ssa_prop_visit_phi;
-
-/* Worklist of control flow edge destinations.  This contains
+/* Worklists of control flow edge destinations.  This contains
    the CFG order number of the blocks so we can iterate in CFG
-   order by visiting in bit-order.  */
+   order by visiting in bit-order.  We use two worklists to
+   first make forward progress before iterating.  */
 static bitmap cfg_blocks;
+static bitmap cfg_blocks_back;
 static int *bb_to_cfg_order;
 static int *cfg_order_to_bb;
 
-/* Worklist of SSA edges which will need reexamination as their
+/* Worklists of SSA edges which will need reexamination as their
    definition has changed.  SSA edges are def-use edges in the SSA
    web.  For each D-U edge, we store the target statement or PHI node
-   UID in a bitmap.  UIDs order stmts in execution order.   */
+   UID in a bitmap.  UIDs order stmts in execution order.  We use
+   two worklists to first make forward progress before iterating.  */
 static bitmap ssa_edge_worklist;
+static bitmap ssa_edge_worklist_back;
 static vec<gimple *> uid_to_stmt;
 
-/* Return true if the block worklist empty.  */
-
-static inline bool
-cfg_blocks_empty_p (void)
-{
-  return bitmap_empty_p (cfg_blocks);
-}
-
-
-/* Add a basic block to the worklist.  The block must not be the ENTRY
-   or EXIT block.  */
-
-static void
-cfg_blocks_add (basic_block bb)
-{
-  gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
-	      && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
-  bitmap_set_bit (cfg_blocks, bb_to_cfg_order[bb->index]);
-}
-
-
-/* Remove a block from the worklist.  */
-
-static basic_block
-cfg_blocks_get (void)
-{
-  gcc_assert (!cfg_blocks_empty_p ());
-  int order_index = bitmap_first_set_bit (cfg_blocks);
-  bitmap_clear_bit (cfg_blocks, order_index);
-  return BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [order_index]);
-}
+/* Current RPO index in the iteration.  */
+static int curr_order;
 
 
 /* We have just defined a new value for VAR.  If IS_VARYING is true,
@@ -172,14 +143,28 @@
   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
     {
       gimple *use_stmt = USE_STMT (use_p);
+      if (!prop_simulate_again_p (use_stmt))
+	continue;
 
       /* If we did not yet simulate the block wait for this to happen
          and do not add the stmt to the SSA edge worklist.  */
-      if (! (gimple_bb (use_stmt)->flags & BB_VISITED))
+      basic_block use_bb = gimple_bb (use_stmt);
+      if (! (use_bb->flags & BB_VISITED))
 	continue;
 
-      if (prop_simulate_again_p (use_stmt)
-	  && bitmap_set_bit (ssa_edge_worklist, gimple_uid (use_stmt)))
+      /* If this is a use on a not yet executable edge do not bother to
+	 queue it.  */
+      if (gimple_code (use_stmt) == GIMPLE_PHI
+	  && !(EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->flags
+	       & EDGE_EXECUTABLE))
+	continue;
+
+      bitmap worklist;
+      if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order)
+	worklist = ssa_edge_worklist_back;
+      else
+	worklist = ssa_edge_worklist;
+      if (bitmap_set_bit (worklist, gimple_uid (use_stmt)))
 	{
 	  uid_to_stmt[gimple_uid (use_stmt)] = use_stmt;
 	  if (dump_file && (dump_flags & TDF_DETAILS))
@@ -207,7 +192,11 @@
 
   e->flags |= EDGE_EXECUTABLE;
 
-  cfg_blocks_add (bb);
+  int bb_order = bb_to_cfg_order[bb->index];
+  if (bb_order < curr_order)
+    bitmap_set_bit (cfg_blocks_back, bb_order);
+  else
+    bitmap_set_bit (cfg_blocks, bb_order);
 
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n",
@@ -217,8 +206,8 @@
 
 /* Simulate the execution of STMT and update the work lists accordingly.  */
 
-static void
-simulate_stmt (gimple *stmt)
+void
+ssa_propagation_engine::simulate_stmt (gimple *stmt)
 {
   enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING;
   edge taken_edge = NULL;
@@ -234,11 +223,11 @@
 
   if (gimple_code (stmt) == GIMPLE_PHI)
     {
-      val = ssa_prop_visit_phi (as_a <gphi *> (stmt));
+      val = visit_phi (as_a <gphi *> (stmt));
       output_name = gimple_phi_result (stmt);
     }
   else
-    val = ssa_prop_visit_stmt (stmt, &taken_edge, &output_name);
+    val = visit_stmt (stmt, &taken_edge, &output_name);
 
   if (val == SSA_PROP_VARYING)
     {
@@ -314,39 +303,12 @@
     }
 }
 
-/* Process an SSA edge worklist.  WORKLIST is the SSA edge worklist to
-   drain.  This pops statements off the given WORKLIST and processes
-   them until one statement was simulated or there are no more statements
-   on WORKLIST.  We take a pointer to WORKLIST because it may be reallocated
-   when an SSA edge is added to it in simulate_stmt.  Return true if a stmt
-   was simulated.  */
-
-static void
-process_ssa_edge_worklist ()
-{
-  /* Process the next entry from the worklist.  */
-  unsigned stmt_uid = bitmap_first_set_bit (ssa_edge_worklist);
-  bitmap_clear_bit (ssa_edge_worklist, stmt_uid);
-  gimple *stmt = uid_to_stmt[stmt_uid];
-
-  /* We should not have stmts in not yet simulated BBs on the worklist.  */
-  gcc_assert (gimple_bb (stmt)->flags & BB_VISITED);
-
-  if (dump_file && (dump_flags & TDF_DETAILS))
-    {
-      fprintf (dump_file, "\nSimulating statement: ");
-      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
-    }
-
-  simulate_stmt (stmt);
-}
-
 
 /* Simulate the execution of BLOCK.  Evaluate the statement associated
    with each variable reference inside the block.  */
 
-static void
-simulate_block (basic_block block)
+void
+ssa_propagation_engine::simulate_block (basic_block block)
 {
   gimple_stmt_iterator gsi;
 
@@ -418,6 +380,9 @@
 
   /* Worklists of SSA edges.  */
   ssa_edge_worklist = BITMAP_ALLOC (NULL);
+  ssa_edge_worklist_back = BITMAP_ALLOC (NULL);
+  bitmap_tree_view (ssa_edge_worklist);
+  bitmap_tree_view (ssa_edge_worklist_back);
 
   /* Worklist of basic-blocks.  */
   bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1);
@@ -427,9 +392,7 @@
   for (int i = 0; i < n; ++i)
     bb_to_cfg_order[cfg_order_to_bb[i]] = i;
   cfg_blocks = BITMAP_ALLOC (NULL);
-
-  if (dump_file && (dump_flags & TDF_DETAILS))
-    dump_immediate_uses (dump_file);
+  cfg_blocks_back = BITMAP_ALLOC (NULL);
 
   /* Initially assume that every edge in the CFG is not executable.
      (including the edges coming out of the entry block).  Mark blocks
@@ -475,9 +438,11 @@
 ssa_prop_fini (void)
 {
   BITMAP_FREE (cfg_blocks);
+  BITMAP_FREE (cfg_blocks_back);
   free (bb_to_cfg_order);
   free (cfg_order_to_bb);
   BITMAP_FREE (ssa_edge_worklist);
+  BITMAP_FREE (ssa_edge_worklist_back);
   uid_to_stmt.release ();
 }
 
@@ -781,36 +746,72 @@
     return false;
 }
 
-
 /* Entry point to the propagation engine.
 
-   VISIT_STMT is called for every statement visited.
-   VISIT_PHI is called for every PHI node visited.  */
+   The VISIT_STMT virtual function is called for every statement
+   visited and the VISIT_PHI virtual function is called for every PHI
+   node visited.  */
 
 void
-ssa_propagate (ssa_prop_visit_stmt_fn visit_stmt,
-	       ssa_prop_visit_phi_fn visit_phi)
+ssa_propagation_engine::ssa_propagate (void)
 {
-  ssa_prop_visit_stmt = visit_stmt;
-  ssa_prop_visit_phi = visit_phi;
-
   ssa_prop_init ();
 
-  /* Iterate until the worklists are empty.  */
-  while (! cfg_blocks_empty_p ()
-	 || ! bitmap_empty_p (ssa_edge_worklist))
+  curr_order = 0;
+
+  /* Iterate until the worklists are empty.  We iterate both blocks
+     and stmts in RPO order, using sets of two worklists to first
+     complete the current iteration before iterating over backedges.  */
+  while (1)
     {
-      /* First simulate whole blocks.  */
-      if (! cfg_blocks_empty_p ())
+      int next_block_order = (bitmap_empty_p (cfg_blocks)
+			      ? -1 : bitmap_first_set_bit (cfg_blocks));
+      int next_stmt_uid = (bitmap_empty_p (ssa_edge_worklist)
+			   ? -1 : bitmap_first_set_bit (ssa_edge_worklist));
+      if (next_block_order == -1 && next_stmt_uid == -1)
 	{
-	  /* Pull the next block to simulate off the worklist.  */
-	  basic_block dest_block = cfg_blocks_get ();
-	  simulate_block (dest_block);
+	  if (bitmap_empty_p (cfg_blocks_back)
+	      && bitmap_empty_p (ssa_edge_worklist_back))
+	    break;
+
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    fprintf (dump_file, "Regular worklists empty, now processing "
+		     "backedge destinations\n");
+	  std::swap (cfg_blocks, cfg_blocks_back);
+	  std::swap (ssa_edge_worklist, ssa_edge_worklist_back);
 	  continue;
 	}
 
-      /* Then simulate from the SSA edge worklist.  */
-      process_ssa_edge_worklist ();
+      int next_stmt_bb_order = -1;
+      gimple *next_stmt = NULL;
+      if (next_stmt_uid != -1)
+	{
+	  next_stmt = uid_to_stmt[next_stmt_uid];
+	  next_stmt_bb_order = bb_to_cfg_order[gimple_bb (next_stmt)->index];
+	}
+
+      /* Pull the next block to simulate off the worklist if it comes first.  */
+      if (next_block_order != -1
+	  && (next_stmt_bb_order == -1
+	      || next_block_order <= next_stmt_bb_order))
+	{
+	  curr_order = next_block_order;
+	  bitmap_clear_bit (cfg_blocks, next_block_order);
+	  basic_block bb
+	    = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [next_block_order]);
+	  simulate_block (bb);
+	}
+      /* Else simulate from the SSA edge worklist.  */
+      else
+	{
+	  curr_order = next_stmt_bb_order;
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    {
+	      fprintf (dump_file, "\nSimulating statement: ");
+	      print_gimple_stmt (dump_file, next_stmt, 0, dump_flags);
+	    }
+	  simulate_stmt (next_stmt);
+	}
     }
 
   ssa_prop_fini ();
@@ -861,7 +862,7 @@
    PROP_VALUE. Return true if at least one reference was replaced.  */
 
 bool
-replace_uses_in (gimple *stmt, ssa_prop_get_value_fn get_value)
+substitute_and_fold_engine::replace_uses_in (gimple *stmt)
 {
   bool replaced = false;
   use_operand_p use;
@@ -870,7 +871,7 @@
   FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
     {
       tree tuse = USE_FROM_PTR (use);
-      tree val = (*get_value) (tuse);
+      tree val = get_value (tuse);
 
       if (val == tuse || val == NULL_TREE)
 	continue;
@@ -899,8 +900,8 @@
 /* Replace propagated values into all the arguments for PHI using the
    values from PROP_VALUE.  */
 
-static bool
-replace_phi_args_in (gphi *phi, ssa_prop_get_value_fn get_value)
+bool
+substitute_and_fold_engine::replace_phi_args_in (gphi *phi)
 {
   size_t i;
   bool replaced = false;
@@ -917,7 +918,7 @@
 
       if (TREE_CODE (arg) == SSA_NAME)
 	{
-	  tree val = (*get_value) (arg);
+	  tree val = get_value (arg);
 
 	  if (val && val != arg && may_propagate_copy (arg, val))
 	    {
@@ -968,10 +969,10 @@
 {
 public:
     substitute_and_fold_dom_walker (cdi_direction direction,
-				    ssa_prop_get_value_fn get_value_fn_,
-				    ssa_prop_fold_stmt_fn fold_fn_)
-	: dom_walker (direction), get_value_fn (get_value_fn_),
-      fold_fn (fold_fn_), something_changed (false)
+				    class substitute_and_fold_engine *engine)
+	: dom_walker (direction),
+          something_changed (false),
+	  substitute_and_fold_engine (engine)
     {
       stmts_to_remove.create (0);
       stmts_to_fixup.create (0);
@@ -987,12 +988,12 @@
     virtual edge before_dom_children (basic_block);
     virtual void after_dom_children (basic_block) {}
 
-    ssa_prop_get_value_fn get_value_fn;
-    ssa_prop_fold_stmt_fn fold_fn;
     bool something_changed;
     vec<gimple *> stmts_to_remove;
     vec<gimple *> stmts_to_fixup;
     bitmap need_eh_cleanup;
+
+    class substitute_and_fold_engine *substitute_and_fold_engine;
 };
 
 edge
@@ -1009,7 +1010,7 @@
 	continue;
       if (res && TREE_CODE (res) == SSA_NAME)
 	{
-	  tree sprime = get_value_fn (res);
+	  tree sprime = substitute_and_fold_engine->get_value (res);
 	  if (sprime
 	      && sprime != res
 	      && may_propagate_copy (res, sprime))
@@ -1018,7 +1019,7 @@
 	      continue;
 	    }
 	}
-      something_changed |= replace_phi_args_in (phi, get_value_fn);
+      something_changed |= substitute_and_fold_engine->replace_phi_args_in (phi);
     }
 
   /* Propagate known values into stmts.  In some case it exposes
@@ -1035,11 +1036,11 @@
       tree lhs = gimple_get_lhs (stmt);
       if (lhs && TREE_CODE (lhs) == SSA_NAME)
 	{
-	  tree sprime = get_value_fn (lhs);
+	  tree sprime = substitute_and_fold_engine->get_value (lhs);
 	  if (sprime
 	      && sprime != lhs
 	      && may_propagate_copy (lhs, sprime)
-	      && !stmt_could_throw_p (stmt)
+	      && !stmt_could_throw_p (cfun, stmt)
 	      && !gimple_has_side_effects (stmt)
 	      /* We have to leave ASSERT_EXPRs around for jump-threading.  */
 	      && (!is_gimple_assign (stmt)
@@ -1064,7 +1065,7 @@
 			   && gimple_call_noreturn_p (stmt));
 
       /* Replace real uses in the statement.  */
-      did_replace |= replace_uses_in (stmt, get_value_fn);
+      did_replace |= substitute_and_fold_engine->replace_uses_in (stmt);
 
       /* If we made a replacement, fold the statement.  */
       if (did_replace)
@@ -1077,16 +1078,13 @@
       /* Some statements may be simplified using propagator
 	 specific information.  Do this before propagating
 	 into the stmt to not disturb pass specific information.  */
-      if (fold_fn)
+      update_stmt_if_modified (stmt);
+      if (substitute_and_fold_engine->fold_stmt(&i))
 	{
-	  update_stmt_if_modified (stmt);
-	  if ((*fold_fn)(&i))
-	    {
-	      did_replace = true;
-	      prop_stats.num_stmts_folded++;
-	      stmt = gsi_stmt (i);
-	      gimple_set_modified (stmt, true);
-	    }
+	  did_replace = true;
+	  prop_stats.num_stmts_folded++;
+	  stmt = gsi_stmt (i);
+	  gimple_set_modified (stmt, true);
 	}
 
       /* If this is a control statement the propagator left edges
@@ -1172,19 +1170,15 @@
    Return TRUE when something changed.  */
 
 bool
-substitute_and_fold (ssa_prop_get_value_fn get_value_fn,
-		     ssa_prop_fold_stmt_fn fold_fn)
+substitute_and_fold_engine::substitute_and_fold (void)
 {
-  gcc_assert (get_value_fn);
-
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "\nSubstituting values and folding statements\n\n");
 
   memset (&prop_stats, 0, sizeof (prop_stats));
 
   calculate_dominance_info (CDI_DOMINATORS);
-  substitute_and_fold_dom_walker walker(CDI_DOMINATORS,
-					get_value_fn, fold_fn);
+  substitute_and_fold_dom_walker walker (CDI_DOMINATORS, this);
   walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
 
   /* We cannot remove stmts during the BB walk, especially not release