diff gcc/tree-call-cdce.c @ 145:1830386684a0

gcc-9.2.0
author anatofuz
date Thu, 13 Feb 2020 11:34:05 +0900
parents 84e7813d76e9
children
line wrap: on
line diff
--- a/gcc/tree-call-cdce.c	Thu Oct 25 07:37:49 2018 +0900
+++ b/gcc/tree-call-cdce.c	Thu Feb 13 11:34:05 2020 +0900
@@ -1,5 +1,5 @@
 /* Conditional Dead Call Elimination pass for the GNU compiler.
-   Copyright (C) 2008-2018 Free Software Foundation, Inc.
+   Copyright (C) 2008-2020 Free Software Foundation, Inc.
    Contributed by Xinliang David Li <davidxl@google.com>
 
 This file is part of GCC.
@@ -93,10 +93,10 @@
 
 	y = sqrt (x);
      ==>
-	y = IFN_SQRT (x);
 	if (__builtin_isless (x, 0))
-	    sqrt (x);
-
+	  y =  sqrt (x);
+	else
+	  y = IFN_SQRT (x);
      In the vast majority of cases we should then never need to call sqrt.
 
    Note that library functions are not supposed to clear errno to zero without
@@ -172,7 +172,7 @@
       || (mode == DFmode
 	  && (rfmt == &ieee_double_format || rfmt == &mips_double_format
 	      || rfmt == &motorola_double_format))
-      /* For long double, we can not really check XFmode
+      /* For long double, we cannot really check XFmode
          which is only defined on intel platforms.
          Candidate pre-selection using builtin function
          code guarantees that we are checking formats
@@ -362,6 +362,40 @@
 	  || find_fallthru_edge (gimple_bb (call)->succs));
 }
 
+/* For a comparison code return the comparison code we should use if we don't
+   HONOR_NANS.  */
+
+static enum tree_code
+comparison_code_if_no_nans (tree_code code)
+{
+  switch (code)
+    {
+    case UNLT_EXPR:
+      return LT_EXPR;
+    case UNGT_EXPR:
+      return GT_EXPR;
+    case UNLE_EXPR:
+      return LE_EXPR;
+    case UNGE_EXPR:
+      return GE_EXPR;
+    case UNEQ_EXPR:
+      return EQ_EXPR;
+    case LTGT_EXPR:
+      return NE_EXPR;
+
+    case LT_EXPR:
+    case GT_EXPR:
+    case LE_EXPR:
+    case GE_EXPR:
+    case EQ_EXPR:
+    case NE_EXPR:
+      return code;
+
+    default:
+      gcc_unreachable ();
+    }
+}
+
 /* A helper function to generate gimple statements for one bound
    comparison, so that the built-in function is called whenever
    TCODE <ARG, LBUB> is *false*.  TEMP_NAME1/TEMP_NAME2 are names
@@ -378,6 +412,9 @@
 		   vec<gimple *> conds,
                    unsigned *nconds)
 {
+  if (!HONOR_NANS (arg))
+    tcode = comparison_code_if_no_nans (tcode);
+
   tree lbub_real_cst, lbub_cst, float_type;
   tree temp, tempn, tempc, tempcn;
   gassign *stmt1;
@@ -756,14 +793,16 @@
 }
 
 /* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
-   conditions in CONDS is false.  */
+   conditions in CONDS is false.  Also move BI_NEWCALL to a new basic block
+   when it is non-null, it is called while all of the CONDS are true.  */
 
 static void
 shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
-					  unsigned int nconds)
+					  unsigned int nconds,
+					  gcall *bi_newcall = NULL)
 {
   gimple_stmt_iterator bi_call_bsi;
-  basic_block bi_call_bb, join_tgt_bb, guard_bb;
+  basic_block bi_call_bb, bi_newcall_bb, join_tgt_bb, guard_bb;
   edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
   edge bi_call_in_edge0, guard_bb_in_edge;
   unsigned tn_cond_stmts;
@@ -772,27 +811,26 @@
   gimple *cond_expr_start;
 
   /* The cfg we want to create looks like this:
-
-	   [guard n-1]         <- guard_bb (old block)
-	     |    \
-	     | [guard n-2]                   }
-	     |    / \                        }
-	     |   /  ...                      } new blocks
-	     |  /  [guard 0]                 }
-	     | /    /   |                    }
-	    [ call ]    |     <- bi_call_bb  }
-	     | \        |
-	     |  \       |
-	     |   [ join ]     <- join_tgt_bb (old iff call must end bb)
-	     |
+          [guard n-1]         <- guard_bb (old block)
+            |    \
+            | [guard n-2]                   }
+            |    / \                        }
+            |   /  ...                      } new blocks
+            |  /  [guard 0]                 }
+            | /  /    |                     }
+           [call]     |      <- bi_call_bb  }
+             \    [newcall]  <-bi_newcall_bb}
+              \       |
+                [join]       <- join_tgt_bb (old iff call must end bb)
 	 possible EH edges (only if [join] is old)
 
      When [join] is new, the immediate dominators for these blocks are:
 
      1. [guard n-1]: unchanged
      2. [call]: [guard n-1]
-     3. [guard m]: [guard m+1] for 0 <= m <= n-2
-     4. [join]: [guard n-1]
+     3. [newcall]: [guard 0]
+     4. [guard m]: [guard m+1] for 0 <= m <= n-2
+     5. [join]: [guard n-1]
 
      We punt for the more complex case case of [join] being old and
      simply free the dominance info.  We also punt on postdominators,
@@ -890,6 +928,47 @@
       edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
     }
 
+  /* Move BI_NEWCALL to new basic block when it is non-null.  */
+  if (bi_newcall)
+    {
+      /* Get bi_newcall_bb by split join_tgt_in_edge_fall_thru edge,
+         and move BI_NEWCALL to bi_newcall_bb.  */
+      bi_newcall_bb = split_edge (join_tgt_in_edge_fall_thru);
+      gimple_stmt_iterator to_gsi = gsi_start_bb (bi_newcall_bb);
+      gimple_stmt_iterator from_gsi = gsi_for_stmt (bi_newcall);
+      gsi_move_before (&from_gsi, &to_gsi);
+      join_tgt_in_edge_fall_thru = EDGE_SUCC (bi_newcall_bb, 0);
+      join_tgt_bb = join_tgt_in_edge_fall_thru->dest;
+
+      tree bi_newcall_lhs = gimple_call_lhs (bi_newcall);
+      tree bi_call_lhs = gimple_call_lhs (bi_call);
+      if (!bi_call_lhs)
+        {
+          bi_call_lhs = copy_ssa_name (bi_newcall_lhs);
+          gimple_call_set_lhs (bi_call, bi_call_lhs);
+          SSA_NAME_DEF_STMT (bi_call_lhs) = bi_call;
+        }
+
+      /* Create phi node for lhs of BI_CALL and BI_NEWCALL.  */
+      gphi *new_phi = create_phi_node (copy_ssa_name (bi_newcall_lhs),
+				       join_tgt_bb);
+      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (new_phi))
+        = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (bi_newcall_lhs);
+      add_phi_arg (new_phi, bi_call_lhs, join_tgt_in_edge_from_call,
+                   gimple_location (bi_call));
+      add_phi_arg (new_phi, bi_newcall_lhs, join_tgt_in_edge_fall_thru,
+                   gimple_location (bi_newcall));
+
+      /* Replace all use of original return value with result of phi node.  */
+      use_operand_p use_p;
+      gimple *use_stmt;
+      imm_use_iterator iterator;
+      FOR_EACH_IMM_USE_STMT (use_stmt, iterator, bi_newcall_lhs)
+        if (use_stmt != new_phi)
+	  FOR_EACH_IMM_USE_ON_STMT (use_p, iterator)
+	    SET_USE (use_p, PHI_RESULT (new_phi));
+    }
+
   /* Now update the probability and profile information, processing the
      guards in order of execution.
 
@@ -993,9 +1072,11 @@
 
   unsigned nconds = 0;
   auto_vec<gimple *, 12> conds;
+  bool is_arg_conds = false;
   if (can_test_argument_range (call))
     {
       gen_shrink_wrap_conditions (call, conds, &nconds);
+      is_arg_conds = true;
       gcc_assert (nconds != 0);
     }
   else
@@ -1037,16 +1118,14 @@
 	{
 	  gimple_stmt_iterator gsi = gsi_for_stmt (call);
 	  gcall *new_call = gimple_build_call_internal (IFN_SET_EDOM, 0);
-	  gimple_set_vuse (new_call, gimple_vuse (call));
-	  gimple_set_vdef (new_call, gimple_vdef (call));
-	  SSA_NAME_DEF_STMT (gimple_vdef (new_call)) = new_call;
+	  gimple_move_vops (new_call, call);
 	  gimple_set_location (new_call, gimple_location (call));
 	  gsi_replace (&gsi, new_call, false);
 	  call = new_call;
 	}
     }
-
-  shrink_wrap_one_built_in_call_with_conds (call, conds, nconds);
+  shrink_wrap_one_built_in_call_with_conds (call, conds, nconds,
+					    is_arg_conds ? new_call : NULL);
 }
 
 /* The top level function for conditional dead code shrink