diff gcc/dse.c @ 145:1830386684a0

gcc-9.2.0
author anatofuz
date Thu, 13 Feb 2020 11:34:05 +0900
parents 84e7813d76e9
children
line wrap: on
line diff
--- a/gcc/dse.c	Thu Oct 25 07:37:49 2018 +0900
+++ b/gcc/dse.c	Thu Feb 13 11:34:05 2020 +0900
@@ -1,5 +1,5 @@
 /* RTL dead store elimination.
-   Copyright (C) 2005-2018 Free Software Foundation, Inc.
+   Copyright (C) 2005-2020 Free Software Foundation, Inc.
 
    Contributed by Richard Sandiford <rsandifor@codesourcery.com>
    and Kenneth Zadeck <zadeck@naturalbridge.com>
@@ -47,9 +47,9 @@
 #include "explow.h"
 #include "expr.h"
 #include "dbgcnt.h"
-#include "params.h"
 #include "rtl-iter.h"
 #include "cfgcleanup.h"
+#include "calls.h"
 
 /* This file contains three techniques for performing Dead Store
    Elimination (dse).
@@ -220,8 +220,9 @@
 struct insn_info_type;
 
 /* This structure holds information about a candidate store.  */
-struct store_info
+class store_info
 {
+public:
 
   /* False means this is a clobber.  */
   bool is_set;
@@ -277,7 +278,7 @@
     } positions_needed;
 
   /* The next store info for this insn.  */
-  struct store_info *next;
+  class store_info *next;
 
   /* The right hand side of the store.  This is used if there is a
      subsequent reload of the mems address somewhere later in the
@@ -309,8 +310,9 @@
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
-struct read_info_type
+class read_info_type
 {
+public:
   /* The id of the mem group of the base address.  */
   int group_id;
 
@@ -324,9 +326,9 @@
   rtx mem;
 
   /* The next read_info for this insn.  */
-  struct read_info_type *next;
+  class read_info_type *next;
 };
-typedef struct read_info_type *read_info_t;
+typedef class read_info_type *read_info_t;
 
 static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
 
@@ -819,7 +821,7 @@
   for (cur = new_insn; cur; cur = NEXT_INSN (cur))
     {
       info.current = cur;
-      note_stores (PATTERN (cur), note_add_store, &info);
+      note_stores (cur, note_add_store, &info);
     }
 
   /* If a failure was flagged above, return 1 so that for_each_inc_dec will
@@ -1507,7 +1509,7 @@
   while (ptr)
     {
       insn_info_t next = ptr->next_local_store;
-      struct store_info *s_info = ptr->store_rec;
+      class store_info *s_info = ptr->store_rec;
       bool del = true;
 
       /* Skip the clobbers. We delete the active insn if this insn
@@ -1841,7 +1843,7 @@
   else
     gap = read_offset - store_info->offset;
 
-  if (maybe_ne (gap, 0))
+  if (gap.is_constant () && maybe_ne (gap, 0))
     {
       poly_int64 shift = gap * BITS_PER_UNIT;
       poly_int64 access_size = GET_MODE_SIZE (read_mode) + gap;
@@ -1976,7 +1978,7 @@
       bitmap regs_set = BITMAP_ALLOC (&reg_obstack);
 
       for (this_insn = insns; this_insn != NULL_RTX; this_insn = NEXT_INSN (this_insn))
-	note_stores (PATTERN (this_insn), look_for_hardregs, regs_set);
+	note_stores (this_insn, look_for_hardregs, regs_set);
 
       bitmap_and_into (regs_set, regs_live);
       if (!bitmap_empty_p (regs_set))
@@ -2072,8 +2074,29 @@
   insn_info = bb_info->last_insn;
 
   if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
-      || (MEM_VOLATILE_P (mem)))
+      || MEM_VOLATILE_P (mem))
     {
+      if (crtl->stack_protect_guard
+	  && (MEM_EXPR (mem) == crtl->stack_protect_guard
+	      || (crtl->stack_protect_guard_decl
+		  && MEM_EXPR (mem) == crtl->stack_protect_guard_decl))
+	  && MEM_VOLATILE_P (mem))
+	{
+	  /* This is either the stack protector canary on the stack,
+	     which ought to be written by a MEM_VOLATILE_P store and
+	     thus shouldn't be deleted and is read at the very end of
+	     function, but shouldn't conflict with any other store.
+	     Or it is __stack_chk_guard variable or TLS or whatever else
+	     MEM holding the canary value, which really shouldn't be
+	     ever modified in -fstack-protector* protected functions,
+	     otherwise the prologue store wouldn't match the epilogue
+	     check.  */
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    fprintf (dump_file, " stack protector canary read ignored.\n");
+	  insn_info->cannot_delete = true;
+	  return;
+	}
+
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, " adding wild read, volatile or barrier.\n");
       add_wild_read (bb_info);
@@ -2320,7 +2343,8 @@
       if (!is_int_mode (TYPE_MODE (TREE_VALUE (arg)), &mode))
 	return false;
 
-      reg = targetm.calls.function_arg (args_so_far, mode, NULL_TREE, true);
+      function_arg_info arg (mode, /*named=*/true);
+      reg = targetm.calls.function_arg (args_so_far, arg);
       if (!reg || !REG_P (reg) || GET_MODE (reg) != mode)
 	return false;
 
@@ -2352,7 +2376,7 @@
       if (tmp)
 	args[idx] = tmp;
 
-      targetm.calls.function_arg_advance (args_so_far, mode, NULL_TREE, true);
+      targetm.calls.function_arg_advance (args_so_far, arg);
     }
   if (arg != void_list_node || idx != nargs)
     return false;
@@ -2367,7 +2391,7 @@
   bitmap ret;
 
   ret = ALLOC_REG_SET (NULL);
-  bitmap_and (ret, in, fixed_reg_set_regset);
+  bitmap_and (ret, in, bitmap_view<HARD_REG_SET> (fixed_reg_set));
   return ret;
 }
 
@@ -2376,7 +2400,7 @@
    non-register target.  */
 
 static void
-scan_insn (bb_info_t bb_info, rtx_insn *insn)
+scan_insn (bb_info_t bb_info, rtx_insn *insn, int max_active_local_stores)
 {
   rtx body;
   insn_info_type *insn_info = insn_info_type_pool.allocate ();
@@ -2498,8 +2522,7 @@
 		    fprintf (dump_file, "handling memset as BLKmode store\n");
 		  if (mems_found == 1)
 		    {
-		      if (active_local_stores_len++
-			  >= PARAM_VALUE (PARAM_MAX_DSE_ACTIVE_LOCAL_STORES))
+		      if (active_local_stores_len++ >= max_active_local_stores)
 			{
 			  active_local_stores_len = 1;
 			  active_local_stores = NULL;
@@ -2514,10 +2537,13 @@
 		clear_rhs_from_active_local_stores ();
 	    }
 	}
-      else if (SIBLING_CALL_P (insn) && reload_completed)
+      else if (SIBLING_CALL_P (insn)
+	       && (reload_completed || HARD_FRAME_POINTER_IS_ARG_POINTER))
 	/* Arguments for a sibling call that are pushed to memory are passed
 	   using the incoming argument pointer of the current function.  After
-	   reload that might be (and likely is) frame pointer based.  */
+	   reload that might be (and likely is) frame pointer based.  And, if
+	   it is a frame pointer on the target, even before reload we need to
+	   kill frame pointer based stores.  */
 	add_wild_read (bb_info);
       else
 	/* Every other call, including pure functions, may read any memory
@@ -2556,8 +2582,7 @@
      it as cannot delete.  This simplifies the processing later.  */
   if (mems_found == 1)
     {
-      if (active_local_stores_len++
-	  >= PARAM_VALUE (PARAM_MAX_DSE_ACTIVE_LOCAL_STORES))
+      if (active_local_stores_len++ >= max_active_local_stores)
 	{
 	  active_local_stores_len = 1;
 	  active_local_stores = NULL;
@@ -2587,7 +2612,7 @@
       bool del = false;
 
       /* If ANY of the store_infos match the cselib group that is
-	 being deleted, then the insn can not be deleted.  */
+	 being deleted, then the insn cannot be deleted.  */
       while (store_info)
 	{
 	  if ((store_info->group_id == -1)
@@ -2629,6 +2654,12 @@
   bitmap_set_bit (all_blocks, ENTRY_BLOCK);
   bitmap_set_bit (all_blocks, EXIT_BLOCK);
 
+  /* For -O1 reduce the maximum number of active local stores for RTL DSE
+     since this can consume huge amounts of memory (PR89115).  */
+  int max_active_local_stores = param_max_dse_active_local_stores;
+  if (optimize < 2)
+    max_active_local_stores /= 10;
+
   FOR_ALL_BB_FN (bb, cfun)
     {
       insn_info_t ptr;
@@ -2656,7 +2687,7 @@
 	  FOR_BB_INSNS (bb, insn)
 	    {
 	      if (INSN_P (insn))
-		scan_insn (bb_info, insn);
+		scan_insn (bb_info, insn, max_active_local_stores);
 	      cselib_process_insn (insn);
 	      if (INSN_P (insn))
 		df_simulate_one_insn_forwards (bb, insn, regs_live);
@@ -3598,7 +3629,10 @@
   if ((locally_deleted || globally_deleted)
       && cfun->can_throw_non_call_exceptions
       && purge_all_dead_edges ())
-    cleanup_cfg (0);
+    {
+      free_dominance_info (CDI_DOMINATORS);
+      cleanup_cfg (0);
+    }
 
   return 0;
 }