Mercurial > hg > CbC > CbC_gcc
diff gcc/cfgexpand.c @ 69:1b10fe6932e1
merge 69
author | Nobuyasu Oshiro <dimolto@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 21 Aug 2011 07:53:12 +0900 |
parents | 326d9e06c2e3 f6334be47118 |
children | b81903832de2 |
line wrap: on
line diff
--- a/gcc/cfgexpand.c Tue Dec 14 03:58:33 2010 +0900 +++ b/gcc/cfgexpand.c Sun Aug 21 07:53:12 2011 +0900 @@ -1,5 +1,5 @@ /* A pass for lowering trees to RTL. - Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 + Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. This file is part of GCC. @@ -36,6 +36,8 @@ #include "except.h" #include "flags.h" #include "diagnostic.h" +#include "tree-pretty-print.h" +#include "gimple-pretty-print.h" #include "toplev.h" #include "debug.h" #include "params.h" @@ -46,7 +48,9 @@ #include "cbc-tree.h" #endif #include "ssaexpand.h" - +#include "bitmap.h" +#include "sbitmap.h" +#include "insn-attr.h" /* For INSN_SCHEDULING. */ /* This variable holds information helping the rewriting of SSA trees into RTL. */ @@ -67,7 +71,13 @@ grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt)); - if (grhs_class == GIMPLE_BINARY_RHS) + if (grhs_class == GIMPLE_TERNARY_RHS) + t = build3 (gimple_assign_rhs_code (stmt), + TREE_TYPE (gimple_assign_lhs (stmt)), + gimple_assign_rhs1 (stmt), + gimple_assign_rhs2 (stmt), + gimple_assign_rhs3 (stmt)); + else if (grhs_class == GIMPLE_BINARY_RHS) t = build2 (gimple_assign_rhs_code (stmt), TREE_TYPE (gimple_assign_lhs (stmt)), gimple_assign_rhs1 (stmt), @@ -80,57 +90,26 @@ { t = gimple_assign_rhs1 (stmt); /* Avoid modifying this tree in place below. */ - if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t) - && gimple_location (stmt) != EXPR_LOCATION (t)) - t = copy_node (t); + if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t) + && gimple_location (stmt) != EXPR_LOCATION (t)) + || (gimple_block (stmt) + && currently_expanding_to_rtl + && EXPR_P (t) + && gimple_block (stmt) != TREE_BLOCK (t))) + t = copy_node (t); } else gcc_unreachable (); + if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)) + SET_EXPR_LOCATION (t, gimple_location (stmt)); + if (gimple_block (stmt) && currently_expanding_to_rtl && EXPR_P (t)) + TREE_BLOCK (t) = gimple_block (stmt); + return t; } -/* Verify that there is exactly single jump instruction since last and attach - REG_BR_PROB note specifying probability. - ??? We really ought to pass the probability down to RTL expanders and let it - re-distribute it when the conditional expands into multiple conditionals. - This is however difficult to do. */ -void -add_reg_br_prob_note (rtx last, int probability) -{ - if (profile_status == PROFILE_ABSENT) - return; - for (last = NEXT_INSN (last); last && NEXT_INSN (last); last = NEXT_INSN (last)) - if (JUMP_P (last)) - { - /* It is common to emit condjump-around-jump sequence when we don't know - how to reverse the conditional. Special case this. */ - if (!any_condjump_p (last) - || !JUMP_P (NEXT_INSN (last)) - || !simplejump_p (NEXT_INSN (last)) - || !NEXT_INSN (NEXT_INSN (last)) - || !BARRIER_P (NEXT_INSN (NEXT_INSN (last))) - || !NEXT_INSN (NEXT_INSN (NEXT_INSN (last))) - || !LABEL_P (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))) - || NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last))))) - goto failed; - gcc_assert (!find_reg_note (last, REG_BR_PROB, 0)); - add_reg_note (last, REG_BR_PROB, - GEN_INT (REG_BR_PROB_BASE - probability)); - return; - } - if (!last || !JUMP_P (last) || !any_condjump_p (last)) - goto failed; - gcc_assert (!find_reg_note (last, REG_BR_PROB, 0)); - add_reg_note (last, REG_BR_PROB, GEN_INT (probability)); - return; -failed: - if (dump_file) - fprintf (dump_file, "Failed to add probability note\n"); -} - - #ifndef STACK_ALIGNMENT_NEEDED #define STACK_ALIGNMENT_NEEDED 1 #endif @@ -235,29 +214,7 @@ static unsigned int get_decl_align_unit (tree decl) { - unsigned int align; - - align = LOCAL_DECL_ALIGNMENT (decl); - - if (align > MAX_SUPPORTED_STACK_ALIGNMENT) - align = MAX_SUPPORTED_STACK_ALIGNMENT; - - if (SUPPORTS_STACK_ALIGNMENT) - { - if (crtl->stack_alignment_estimated < align) - { - gcc_assert(!crtl->stack_realign_processed); - crtl->stack_alignment_estimated = align; - } - } - - /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted. - So here we only make sure stack_alignment_needed >= align. */ - if (crtl->stack_alignment_needed < align) - crtl->stack_alignment_needed = align; - if (crtl->max_used_stack_slot_alignment < align) - crtl->max_used_stack_slot_alignment = align; - + unsigned int align = LOCAL_DECL_ALIGNMENT (decl); return align / BITS_PER_UNIT; } @@ -265,7 +222,7 @@ Return the frame offset. */ static HOST_WIDE_INT -alloc_stack_frame_space (HOST_WIDE_INT size, HOST_WIDE_INT align) +alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align) { HOST_WIDE_INT offset, new_frame_offset; @@ -299,6 +256,8 @@ static void add_stack_var (tree decl) { + struct stack_var *v; + if (stack_vars_num >= stack_vars_alloc) { if (stack_vars_alloc) @@ -308,17 +267,23 @@ stack_vars = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc); } - stack_vars[stack_vars_num].decl = decl; - stack_vars[stack_vars_num].offset = 0; - stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1); - stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl)); + v = &stack_vars[stack_vars_num]; + + v->decl = decl; + v->offset = 0; + v->size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1); + /* Ensure that all variables have size, so that &a != &b for any two + variables that are simultaneously live. */ + if (v->size == 0) + v->size = 1; + v->alignb = get_decl_align_unit (SSAVAR (decl)); /* All variables are initially in their own partition. */ - stack_vars[stack_vars_num].representative = stack_vars_num; - stack_vars[stack_vars_num].next = EOC; + v->representative = stack_vars_num; + v->next = EOC; /* All variables initially conflict with no other. */ - stack_vars[stack_vars_num].conflicts = NULL; + v->conflicts = NULL; /* Ensure that this decl doesn't get put onto the list twice. */ set_rtl (decl, pc_rtx); @@ -368,7 +333,7 @@ if (TREE_CODE (type) != RECORD_TYPE) return false; - for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) + for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) if (aggregate_contains_union_type (TREE_TYPE (field))) return true; @@ -418,26 +383,43 @@ } /* A subroutine of partition_stack_vars. A comparison function for qsort, - sorting an array of indices by the size and type of the object. */ + sorting an array of indices by the properties of the object. */ static int -stack_var_size_cmp (const void *a, const void *b) +stack_var_cmp (const void *a, const void *b) { - HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size; - HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size; - tree decla, declb; + size_t ia = *(const size_t *)a; + size_t ib = *(const size_t *)b; + unsigned int aligna = stack_vars[ia].alignb; + unsigned int alignb = stack_vars[ib].alignb; + HOST_WIDE_INT sizea = stack_vars[ia].size; + HOST_WIDE_INT sizeb = stack_vars[ib].size; + tree decla = stack_vars[ia].decl; + tree declb = stack_vars[ib].decl; + bool largea, largeb; unsigned int uida, uidb; - if (sa < sb) + /* Primary compare on "large" alignment. Large comes first. */ + largea = (aligna * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT); + largeb = (alignb * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT); + if (largea != largeb) + return (int)largeb - (int)largea; + + /* Secondary compare on size, decreasing */ + if (sizea < sizeb) return -1; - if (sa > sb) + if (sizea > sizeb) return 1; - decla = stack_vars[*(const size_t *)a].decl; - declb = stack_vars[*(const size_t *)b].decl; - /* For stack variables of the same size use and id of the decls - to make the sort stable. Two SSA names are compared by their - version, SSA names come before non-SSA names, and two normal - decls are compared by their DECL_UID. */ + + /* Tertiary compare on true alignment, decreasing. */ + if (aligna < alignb) + return -1; + if (aligna > alignb) + return 1; + + /* Final compare on ID for sort stability, increasing. + Two SSA names are compared by their version, SSA names come before + non-SSA names, and two normal decls are compared by their DECL_UID. */ if (TREE_CODE (decla) == SSA_NAME) { if (TREE_CODE (declb) == SSA_NAME) @@ -450,9 +432,9 @@ else uida = DECL_UID (decla), uidb = DECL_UID (declb); if (uida < uidb) - return -1; + return 1; if (uida > uidb) - return 1; + return -1; return 0; } @@ -532,24 +514,26 @@ points-to sets later, so use GGC alloc. */ part = BITMAP_GGC_ALLOC (); for (j = i; j != EOC; j = stack_vars[j].next) - { - tree decl = stack_vars[j].decl; - unsigned int uid = DECL_UID (decl); - /* We should never end up partitioning SSA names (though they - may end up on the stack). Neither should we allocate stack - space to something that is unused and thus unreferenced. */ - gcc_assert (DECL_P (decl) - && referenced_var_lookup (uid)); - bitmap_set_bit (part, uid); - *((bitmap *) pointer_map_insert (decls_to_partitions, - (void *)(size_t) uid)) = part; - *((tree *) pointer_map_insert (cfun->gimple_df->decls_to_pointers, - decl)) = name; - } + { + tree decl = stack_vars[j].decl; + unsigned int uid = DECL_PT_UID (decl); + /* We should never end up partitioning SSA names (though they + may end up on the stack). Neither should we allocate stack + space to something that is unused and thus unreferenced, except + for -O0 where we are preserving even unreferenced variables. */ + gcc_assert (DECL_P (decl) + && (!optimize + || referenced_var_lookup (cfun, DECL_UID (decl)))); + bitmap_set_bit (part, uid); + *((bitmap *) pointer_map_insert (decls_to_partitions, + (void *)(size_t) uid)) = part; + *((tree *) pointer_map_insert (cfun->gimple_df->decls_to_pointers, + decl)) = name; + } /* Make the SSA name point to all partition members. */ pi = get_ptr_info (name); - pt_solution_set (&pi->pt, part); + pt_solution_set (&pi->pt, part, false, false); } /* Make all points-to sets that contain one member of a partition @@ -573,9 +557,7 @@ } add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped, - decls_to_partitions, visited, temp); - add_partitioned_vars_to_ptset (&cfun->gimple_df->callused, - decls_to_partitions, visited, temp); + decls_to_partitions, visited, temp); pointer_set_destroy (visited); pointer_map_destroy (decls_to_partitions); @@ -652,56 +634,62 @@ if (n == 1) return; - qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_size_cmp); + qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_cmp); for (si = 0; si < n; ++si) { size_t i = stack_vars_sorted[si]; HOST_WIDE_INT isize = stack_vars[i].size; + unsigned int ialign = stack_vars[i].alignb; HOST_WIDE_INT offset = 0; for (sj = si; sj-- > 0; ) - { - size_t j = stack_vars_sorted[sj]; - HOST_WIDE_INT jsize = stack_vars[j].size; - unsigned int jalign = stack_vars[j].alignb; - - /* Ignore objects that aren't partition representatives. */ - if (stack_vars[j].representative != j) - continue; - - /* Ignore objects too large for the remaining space. */ - if (isize < jsize) - continue; - - /* Ignore conflicting objects. */ - if (stack_var_conflict_p (i, j)) - continue; - - /* Refine the remaining space check to include alignment. */ - if (offset & (jalign - 1)) - { - HOST_WIDE_INT toff = offset; - toff += jalign - 1; - toff &= -(HOST_WIDE_INT)jalign; - if (isize - (toff - offset) < jsize) - continue; - - isize -= toff - offset; - offset = toff; - } - - /* UNION the objects, placing J at OFFSET. */ - union_stack_vars (i, j, offset); - - isize -= jsize; - if (isize == 0) - break; + { + size_t j = stack_vars_sorted[sj]; + HOST_WIDE_INT jsize = stack_vars[j].size; + unsigned int jalign = stack_vars[j].alignb; + + /* Ignore objects that aren't partition representatives. */ + if (stack_vars[j].representative != j) + continue; + + /* Ignore objects too large for the remaining space. */ + if (isize < jsize) + continue; + + /* Ignore conflicting objects. */ + if (stack_var_conflict_p (i, j)) + continue; + + /* Do not mix objects of "small" (supported) alignment + and "large" (unsupported) alignment. */ + if ((ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) + != (jalign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)) + continue; + + /* Refine the remaining space check to include alignment. */ + if (offset & (jalign - 1)) + { + HOST_WIDE_INT toff = offset; + toff += jalign - 1; + toff &= -(HOST_WIDE_INT)jalign; + if (isize - (toff - offset) < jsize) + continue; + + isize -= toff - offset; + offset = toff; + } + + /* UNION the objects, placing J at OFFSET. */ + union_stack_vars (i, j, offset); + + isize -= jsize; + if (isize == 0) + break; + } } - } - - if (optimize) - update_alias_info_with_stack_vars (); + + update_alias_info_with_stack_vars (); } /* A debugging aid for expand_used_vars. Dump the generated partitions. */ @@ -733,33 +721,36 @@ } } -/* Assign rtl to DECL at frame offset OFFSET. */ +/* Assign rtl to DECL at BASE + OFFSET. */ static void -expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset) +expand_one_stack_var_at (tree decl, rtx base, unsigned base_align, + HOST_WIDE_INT offset) { - /* Alignment is unsigned. */ - unsigned HOST_WIDE_INT align; + unsigned align; rtx x; /* If this fails, we've overflowed the stack frame. Error nicely? */ gcc_assert (offset == trunc_int_for_mode (offset, Pmode)); - x = plus_constant (virtual_stack_vars_rtx, offset); + x = plus_constant (base, offset); x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x); if (TREE_CODE (decl) != SSA_NAME) { /* Set alignment we actually gave this decl if it isn't an SSA name. If it is we generate stack slots only accidentally so it isn't as - important, we'll simply use the alignment that is already set. */ - offset -= frame_phase; + important, we'll simply use the alignment that is already set. */ + if (base == virtual_stack_vars_rtx) + offset -= frame_phase; align = offset & -offset; align *= BITS_PER_UNIT; - if (align == 0) - align = STACK_BOUNDARY; - else if (align > MAX_SUPPORTED_STACK_ALIGNMENT) - align = MAX_SUPPORTED_STACK_ALIGNMENT; + if (align == 0 || align > base_align) + align = base_align; + + /* One would think that we could assert that we're not decreasing + alignment here, but (at least) the i386 port does exactly this + via the MINIMUM_ALIGNMENT hook. */ DECL_ALIGN (decl) = align; DECL_USER_ALIGN (decl) = 0; @@ -777,9 +768,56 @@ expand_stack_vars (bool (*pred) (tree)) { size_t si, i, j, n = stack_vars_num; + HOST_WIDE_INT large_size = 0, large_alloc = 0; + rtx large_base = NULL; + unsigned large_align = 0; + tree decl; + + /* Determine if there are any variables requiring "large" alignment. + Since these are dynamically allocated, we only process these if + no predicate involved. */ + large_align = stack_vars[stack_vars_sorted[0]].alignb * BITS_PER_UNIT; + if (pred == NULL && large_align > MAX_SUPPORTED_STACK_ALIGNMENT) + { + /* Find the total size of these variables. */ + for (si = 0; si < n; ++si) + { + unsigned alignb; + + i = stack_vars_sorted[si]; + alignb = stack_vars[i].alignb; + + /* Stop when we get to the first decl with "small" alignment. */ + if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) + break; + + /* Skip variables that aren't partition representatives. */ + if (stack_vars[i].representative != i) + continue; + + /* Skip variables that have already had rtl assigned. See also + add_stack_var where we perpetrate this pc_rtx hack. */ + decl = stack_vars[i].decl; + if ((TREE_CODE (decl) == SSA_NAME + ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] + : DECL_RTL (decl)) != pc_rtx) + continue; + + large_size += alignb - 1; + large_size &= -(HOST_WIDE_INT)alignb; + large_size += stack_vars[i].size; + } + + /* If there were any, allocate space. */ + if (large_size > 0) + large_base = allocate_dynamic_stack_space (GEN_INT (large_size), 0, + large_align, true); + } for (si = 0; si < n; ++si) { + rtx base; + unsigned base_align, alignb; HOST_WIDE_INT offset; i = stack_vars_sorted[si]; @@ -789,29 +827,53 @@ continue; /* Skip variables that have already had rtl assigned. See also - add_stack_var where we perpetrate this pc_rtx hack. */ - if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME - ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)] - : DECL_RTL (stack_vars[i].decl)) != pc_rtx) - continue; + add_stack_var where we perpetrate this pc_rtx hack. */ + decl = stack_vars[i].decl; + if ((TREE_CODE (decl) == SSA_NAME + ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] + : DECL_RTL (decl)) != pc_rtx) + continue; /* Check the predicate to see whether this variable should be - allocated in this pass. */ - if (pred && !pred (stack_vars[i].decl)) - continue; - - offset = alloc_stack_frame_space (stack_vars[i].size, - stack_vars[i].alignb); + allocated in this pass. */ + if (pred && !pred (decl)) + continue; + + alignb = stack_vars[i].alignb; + if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) + { + offset = alloc_stack_frame_space (stack_vars[i].size, alignb); + base = virtual_stack_vars_rtx; + base_align = crtl->max_used_stack_slot_alignment; + } + else + { + /* Large alignment is only processed in the last pass. */ + if (pred) + continue; + gcc_assert (large_base != NULL); + + large_alloc += alignb - 1; + large_alloc &= -(HOST_WIDE_INT)alignb; + offset = large_alloc; + large_alloc += stack_vars[i].size; + + base = large_base; + base_align = large_align; + } /* Create rtl for each variable based on their location within the partition. */ for (j = i; j != EOC; j = stack_vars[j].next) - { - gcc_assert (stack_vars[j].offset <= stack_vars[i].size); - expand_one_stack_var_at (stack_vars[j].decl, - stack_vars[j].offset + offset); + { + gcc_assert (stack_vars[j].offset <= stack_vars[i].size); + expand_one_stack_var_at (stack_vars[j].decl, + base, base_align, + stack_vars[j].offset + offset); + } } - } + + gcc_assert (large_alloc == large_size); } /* Take into account all sizes of partitions and reset DECL_RTLs. */ @@ -842,13 +904,19 @@ static void expand_one_stack_var (tree var) { - HOST_WIDE_INT size, offset, align; + HOST_WIDE_INT size, offset; + unsigned byte_align; size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1); - align = get_decl_align_unit (SSAVAR (var)); - offset = alloc_stack_frame_space (size, align); - - expand_one_stack_var_at (var, offset); + byte_align = get_decl_align_unit (SSAVAR (var)); + + /* We handle highly aligned variables in expand_stack_vars. */ + gcc_assert (byte_align * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT); + + offset = alloc_stack_frame_space (size, byte_align); + + expand_one_stack_var_at (var, virtual_stack_vars_rtx, + crtl->max_used_stack_slot_alignment, offset); } /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL @@ -917,6 +985,11 @@ if (flag_stack_protect) return true; + /* We handle "large" alignment via dynamic allocation. We want to handle + this extra complication in only one place, so defer them. */ + if (DECL_ALIGN (var) > MAX_SUPPORTED_STACK_ALIGNMENT) + return true; + /* Variables in the outermost scope automatically conflict with every other variable. The only reason to want to defer them at all is that, after sorting, we can more efficiently pack @@ -946,35 +1019,53 @@ static HOST_WIDE_INT expand_one_var (tree var, bool toplevel, bool really_expand) { + unsigned int align = BITS_PER_UNIT; tree origvar = var; + var = SSAVAR (var); - if (SUPPORTS_STACK_ALIGNMENT - && TREE_TYPE (var) != error_mark_node - && TREE_CODE (var) == VAR_DECL) + if (TREE_TYPE (var) != error_mark_node && TREE_CODE (var) == VAR_DECL) { - unsigned int align; - /* Because we don't know if VAR will be in register or on stack, we conservatively assume it will be on stack even if VAR is eventually put into register after RA pass. For non-automatic variables, which won't be on stack, we collect alignment of type and ignore user specified alignment. */ if (TREE_STATIC (var) || DECL_EXTERNAL (var)) - align = MINIMUM_ALIGNMENT (TREE_TYPE (var), - TYPE_MODE (TREE_TYPE (var)), - TYPE_ALIGN (TREE_TYPE (var))); + align = MINIMUM_ALIGNMENT (TREE_TYPE (var), + TYPE_MODE (TREE_TYPE (var)), + TYPE_ALIGN (TREE_TYPE (var))); + else if (DECL_HAS_VALUE_EXPR_P (var) + || (DECL_RTL_SET_P (var) && MEM_P (DECL_RTL (var)))) + /* Don't consider debug only variables with DECL_HAS_VALUE_EXPR_P set + or variables which were assigned a stack slot already by + expand_one_stack_var_at - in the latter case DECL_ALIGN has been + changed from the offset chosen to it. */ + align = crtl->stack_alignment_estimated; else align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var)); - if (crtl->stack_alignment_estimated < align) - { - /* stack_alignment_estimated shouldn't change after stack - realign decision made */ - gcc_assert(!crtl->stack_realign_processed); + /* If the variable alignment is very large we'll dynamicaly allocate + it, which means that in-frame portion is just a pointer. */ + if (align > MAX_SUPPORTED_STACK_ALIGNMENT) + align = POINTER_SIZE; + } + + if (SUPPORTS_STACK_ALIGNMENT + && crtl->stack_alignment_estimated < align) + { + /* stack_alignment_estimated shouldn't change after stack + realign decision made */ + gcc_assert(!crtl->stack_realign_processed); crtl->stack_alignment_estimated = align; } - } + + /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted. + So here we only make sure stack_alignment_needed >= align. */ + if (crtl->stack_alignment_needed < align) + crtl->stack_alignment_needed = align; + if (crtl->max_used_stack_slot_alignment < align) + crtl->max_used_stack_slot_alignment = align; if (TREE_CODE (origvar) == SSA_NAME) { @@ -1011,6 +1102,14 @@ if (really_expand) expand_one_register_var (origvar); } + else if (!host_integerp (DECL_SIZE_UNIT (var), 1)) + { + if (really_expand) + { + error ("size of variable %q+D is too large", var); + expand_one_error_var (var); + } + } else if (defer_stack_allocation (var, toplevel)) add_stack_var (origvar); else @@ -1037,7 +1136,7 @@ old_sv_num = toplevel ? 0 : stack_vars_num; /* Expand all variables at this level. */ - for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t)) + for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t)) if (TREE_USED (t)) expand_one_var (t, toplevel, true); @@ -1069,7 +1168,7 @@ { tree t; - for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t)) + for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t)) /* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */ TREE_USED (t) = 0; @@ -1215,38 +1314,15 @@ crtl->stack_protect_guard = guard; } -/* A subroutine of expand_used_vars. Walk down through the BLOCK tree - expanding variables. Those variables that can be put into registers - are allocated pseudos; those that can't are put on the stack. - - TOPLEVEL is true if this is the outermost BLOCK. */ - -static HOST_WIDE_INT -account_used_vars_for_block (tree block, bool toplevel) -{ - tree t; - HOST_WIDE_INT size = 0; - - /* Expand all variables at this level. */ - for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t)) - if (TREE_USED (t)) - size += expand_one_var (t, toplevel, false); - - /* Expand all variables at containing levels. */ - for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t)) - size += account_used_vars_for_block (t, false); - - return size; -} - /* Prepare for expanding variables. */ static void init_vars_expansion (void) { tree t; + unsigned ix; /* Set TREE_USED on all variables in the local_decls. */ - for (t = cfun->local_decls; t; t = TREE_CHAIN (t)) - TREE_USED (TREE_VALUE (t)) = 1; + FOR_EACH_LOCAL_DECL (cfun, ix, t) + TREE_USED (t) = 1; /* Clear TREE_USED on all variables associated with a block scope. */ clear_tree_used (DECL_INITIAL (current_function_decl)); @@ -1269,32 +1345,30 @@ stack_vars_alloc = stack_vars_num = 0; } -/* Make a fair guess for the size of the stack frame of the current - function. This doesn't have to be exact, the result is only used - in the inline heuristics. So we don't want to run the full stack - var packing algorithm (which is quadratic in the number of stack - vars). Instead, we calculate the total size of all stack vars. - This turns out to be a pretty fair estimate -- packing of stack - vars doesn't happen very often. */ +/* Make a fair guess for the size of the stack frame of the function + in NODE. This doesn't have to be exact, the result is only used in + the inline heuristics. So we don't want to run the full stack var + packing algorithm (which is quadratic in the number of stack vars). + Instead, we calculate the total size of all stack vars. This turns + out to be a pretty fair estimate -- packing of stack vars doesn't + happen very often. */ HOST_WIDE_INT -estimated_stack_frame_size (void) +estimated_stack_frame_size (struct cgraph_node *node) { HOST_WIDE_INT size = 0; size_t i; - tree t, outer_block = DECL_INITIAL (current_function_decl); - - init_vars_expansion (); - - for (t = cfun->local_decls; t; t = TREE_CHAIN (t)) - { - tree var = TREE_VALUE (t); - - if (TREE_USED (var)) - size += expand_one_var (var, true, false); - TREE_USED (var) = 1; - } - size += account_used_vars_for_block (outer_block, true); + tree var; + tree old_cur_fun_decl = current_function_decl; + referenced_var_iterator rvi; + struct function *fn = DECL_STRUCT_FUNCTION (node->decl); + + current_function_decl = node->decl; + push_cfun (fn); + + gcc_checking_assert (gimple_referenced_vars (fn)); + FOR_EACH_REFERENCED_VAR (fn, var, rvi) + size += expand_one_var (var, true, false); if (stack_vars_num > 0) { @@ -1305,7 +1379,8 @@ size += account_stack_vars (); fini_vars_expansion (); } - + pop_cfun (); + current_function_decl = old_cur_fun_decl; return size; } @@ -1314,8 +1389,10 @@ static void expand_used_vars (void) { - tree t, next, outer_block = DECL_INITIAL (current_function_decl); + tree var, outer_block = DECL_INITIAL (current_function_decl); + VEC(tree,heap) *maybe_local_decls = NULL; unsigned i; + unsigned len; /* Compute the phase of the stack frame for this function. */ { @@ -1350,22 +1427,18 @@ /* At this point all variables on the local_decls with TREE_USED set are not associated with any block scope. Lay them out. */ - t = cfun->local_decls; - cfun->local_decls = NULL_TREE; - for (; t; t = next) + + len = VEC_length (tree, cfun->local_decls); + FOR_EACH_LOCAL_DECL (cfun, i, var) { - tree var = TREE_VALUE (t); bool expand_now = false; - next = TREE_CHAIN (t); - /* Expanded above already. */ if (is_gimple_reg (var)) - { - TREE_USED (var) = 0; - ggc_free (t); - continue; - } + { + TREE_USED (var) = 0; + goto next; + } /* We didn't set a block for static or extern because it's hard to tell the difference between a global variable (re)declared in a local scope, and one that's really declared there to @@ -1385,25 +1458,38 @@ TREE_USED (var) = 1; if (expand_now) - { - expand_one_var (var, true, true); + expand_one_var (var, true, true); + + next: if (DECL_ARTIFICIAL (var) && !DECL_IGNORED_P (var)) - { - rtx rtl = DECL_RTL_IF_SET (var); - - /* Keep artificial non-ignored vars in cfun->local_decls - chain until instantiate_decls. */ - if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT)) - { - TREE_CHAIN (t) = cfun->local_decls; - cfun->local_decls = t; - continue; - } - } + { + rtx rtl = DECL_RTL_IF_SET (var); + + /* Keep artificial non-ignored vars in cfun->local_decls + chain until instantiate_decls. */ + if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT)) + add_local_decl (cfun, var); + else if (rtl == NULL_RTX) + /* If rtl isn't set yet, which can happen e.g. with + -fstack-protector, retry before returning from this + function. */ + VEC_safe_push (tree, heap, maybe_local_decls, var); + } } - ggc_free (t); - } + /* We duplicated some of the decls in CFUN->LOCAL_DECLS. + + +-----------------+-----------------+ + | ...processed... | ...duplicates...| + +-----------------+-----------------+ + ^ + +-- LEN points here. + + We just want the duplicates, as those are the artificial + non-ignored vars that we want to keep until instantiate_decls. + Move them down and truncate the array. */ + if (!VEC_empty (tree, cfun->local_decls)) + VEC_block_remove (tree, cfun->local_decls, 0, len); /* At this point, all variables within the block tree with TREE_USED set are actually used by the optimized function. Lay them out. */ @@ -1458,6 +1544,20 @@ fini_vars_expansion (); } + /* If there were any artificial non-ignored vars without rtl + found earlier, see if deferred stack allocation hasn't assigned + rtl to them. */ + FOR_EACH_VEC_ELT_REVERSE (tree, maybe_local_decls, i, var) + { + rtx rtl = DECL_RTL_IF_SET (var); + + /* Keep artificial non-ignored vars in cfun->local_decls + chain until instantiate_decls. */ + if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT)) + add_local_decl (cfun, var); + } + VEC_free (tree, heap, maybe_local_decls); + /* If the target requires that FRAME_OFFSET be aligned, do it. */ if (STACK_ALIGNMENT_NEEDED) { @@ -1566,11 +1666,18 @@ confused. */ gcc_assert (JUMP_P (insn) && !any_condjump_p (insn)); for (insn = PREV_INSN (insn); insn != last;) - { - insn = PREV_INSN (insn); - if (JUMP_P (NEXT_INSN (insn))) - delete_insn (NEXT_INSN (insn)); - } + { + insn = PREV_INSN (insn); + if (JUMP_P (NEXT_INSN (insn))) + { + if (!any_condjump_p (NEXT_INSN (insn))) + { + gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn)))); + delete_insn (NEXT_INSN (NEXT_INSN (insn))); + } + delete_insn (NEXT_INSN (insn)); + } + } } } @@ -1655,8 +1762,8 @@ two-way jump that needs to be decomposed into two basic blocks. */ if (false_edge->dest == bb->next_bb) { - jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest)); - add_reg_br_prob_note (last, true_edge->probability); + jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest), + true_edge->probability); maybe_dump_rtl_for_gimple_stmt (stmt, last); if (true_edge->goto_locus) { @@ -1671,8 +1778,8 @@ } if (true_edge->dest == bb->next_bb) { - jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest)); - add_reg_br_prob_note (last, false_edge->probability); + jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest), + false_edge->probability); maybe_dump_rtl_for_gimple_stmt (stmt, last); if (false_edge->goto_locus) { @@ -1686,8 +1793,8 @@ return NULL; } - jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest)); - add_reg_br_prob_note (last, true_edge->probability); + jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest), + true_edge->probability); last = get_last_insn (); if (false_edge->goto_locus) { @@ -1738,15 +1845,31 @@ tree exp; tree lhs = gimple_call_lhs (stmt); size_t i; + bool builtin_p; + tree decl; exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3); CALL_EXPR_FN (exp) = gimple_call_fn (stmt); + decl = gimple_call_fndecl (stmt); + builtin_p = decl && DECL_BUILT_IN (decl); + TREE_TYPE (exp) = gimple_call_return_type (stmt); CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt); for (i = 0; i < gimple_call_num_args (stmt); i++) - CALL_EXPR_ARG (exp, i) = gimple_call_arg (stmt, i); + { + tree arg = gimple_call_arg (stmt, i); + gimple def; + /* TER addresses into arguments of builtin functions so we have a + chance to infer more correct alignment information. See PR39954. */ + if (builtin_p + && TREE_CODE (arg) == SSA_NAME + && (def = get_gimple_for_ssa_name (arg)) + && gimple_assign_rhs_code (def) == ADDR_EXPR) + arg = gimple_assign_rhs1 (def); + CALL_EXPR_ARG (exp, i) = arg; + } if (gimple_has_side_effects (stmt)) TREE_SIDE_EFFECTS (exp) = 1; @@ -1833,84 +1956,87 @@ case GIMPLE_ASSIGN: { - tree lhs = gimple_assign_lhs (stmt); - - /* Tree expand used to fiddle with |= and &= of two bitfield - COMPONENT_REFs here. This can't happen with gimple, the LHS - of binary assigns must be a gimple reg. */ - - if (TREE_CODE (lhs) != SSA_NAME - || get_gimple_rhs_class (gimple_expr_code (stmt)) - == GIMPLE_SINGLE_RHS) - { - tree rhs = gimple_assign_rhs1 (stmt); - gcc_assert (get_gimple_rhs_class (gimple_expr_code (stmt)) - == GIMPLE_SINGLE_RHS); - if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (rhs)) - SET_EXPR_LOCATION (rhs, gimple_location (stmt)); - expand_assignment (lhs, rhs, - gimple_assign_nontemporal_move_p (stmt)); - } - else - { - rtx target, temp; - bool nontemporal = gimple_assign_nontemporal_move_p (stmt); - struct separate_ops ops; - bool promoted = false; - - target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); - if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target)) - promoted = true; - - ops.code = gimple_assign_rhs_code (stmt); - ops.type = TREE_TYPE (lhs); - switch (get_gimple_rhs_class (gimple_expr_code (stmt))) - { - case GIMPLE_BINARY_RHS: - ops.op1 = gimple_assign_rhs2 (stmt); - /* Fallthru */ - case GIMPLE_UNARY_RHS: - ops.op0 = gimple_assign_rhs1 (stmt); - break; - default: - gcc_unreachable (); - } - ops.location = gimple_location (stmt); - - /* If we want to use a nontemporal store, force the value to - register first. If we store into a promoted register, - don't directly expand to target. */ - temp = nontemporal || promoted ? NULL_RTX : target; - temp = expand_expr_real_2 (&ops, temp, GET_MODE (target), - EXPAND_NORMAL); - - if (temp == target) - ; - else if (promoted) - { - int unsignedp = SUBREG_PROMOTED_UNSIGNED_P (target); - /* If TEMP is a VOIDmode constant, use convert_modes to make - sure that we properly convert it. */ - if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode) - { - temp = convert_modes (GET_MODE (target), - TYPE_MODE (ops.type), - temp, unsignedp); - temp = convert_modes (GET_MODE (SUBREG_REG (target)), - GET_MODE (target), temp, unsignedp); - } - - convert_move (SUBREG_REG (target), temp, unsignedp); - } - else if (nontemporal && emit_storent_insn (target, temp)) - ; - else - { - temp = force_operand (temp, target); - if (temp != target) - emit_move_insn (target, temp); - } - } + tree lhs = gimple_assign_lhs (stmt); + + /* Tree expand used to fiddle with |= and &= of two bitfield + COMPONENT_REFs here. This can't happen with gimple, the LHS + of binary assigns must be a gimple reg. */ + + if (TREE_CODE (lhs) != SSA_NAME + || get_gimple_rhs_class (gimple_expr_code (stmt)) + == GIMPLE_SINGLE_RHS) + { + tree rhs = gimple_assign_rhs1 (stmt); + gcc_assert (get_gimple_rhs_class (gimple_expr_code (stmt)) + == GIMPLE_SINGLE_RHS); + if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (rhs)) + SET_EXPR_LOCATION (rhs, gimple_location (stmt)); + expand_assignment (lhs, rhs, + gimple_assign_nontemporal_move_p (stmt)); + } + else + { + rtx target, temp; + bool nontemporal = gimple_assign_nontemporal_move_p (stmt); + struct separate_ops ops; + bool promoted = false; + + target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target)) + promoted = true; + + ops.code = gimple_assign_rhs_code (stmt); + ops.type = TREE_TYPE (lhs); + switch (get_gimple_rhs_class (gimple_expr_code (stmt))) + { + case GIMPLE_TERNARY_RHS: + ops.op2 = gimple_assign_rhs3 (stmt); + /* Fallthru */ + case GIMPLE_BINARY_RHS: + ops.op1 = gimple_assign_rhs2 (stmt); + /* Fallthru */ + case GIMPLE_UNARY_RHS: + ops.op0 = gimple_assign_rhs1 (stmt); + break; + default: + gcc_unreachable (); + } + ops.location = gimple_location (stmt); + + /* If we want to use a nontemporal store, force the value to + register first. If we store into a promoted register, + don't directly expand to target. */ + temp = nontemporal || promoted ? NULL_RTX : target; + temp = expand_expr_real_2 (&ops, temp, GET_MODE (target), + EXPAND_NORMAL); + + if (temp == target) + ; + else if (promoted) + { + int unsignedp = SUBREG_PROMOTED_UNSIGNED_P (target); + /* If TEMP is a VOIDmode constant, use convert_modes to make + sure that we properly convert it. */ + if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode) + { + temp = convert_modes (GET_MODE (target), + TYPE_MODE (ops.type), + temp, unsignedp); + temp = convert_modes (GET_MODE (SUBREG_REG (target)), + GET_MODE (target), temp, unsignedp); + } + + convert_move (SUBREG_REG (target), temp, unsignedp); + } + else if (nontemporal && emit_storent_insn (target, temp)) + ; + else + { + temp = force_operand (temp, target); + if (temp != target) + emit_move_insn (target, temp); + } + } } break; @@ -2169,15 +2295,21 @@ any rtl. */ static rtx -convert_debug_memory_address (enum machine_mode mode, rtx x) +convert_debug_memory_address (enum machine_mode mode, rtx x, + addr_space_t as) { enum machine_mode xmode = GET_MODE (x); #ifndef POINTERS_EXTEND_UNSIGNED - gcc_assert (mode == Pmode); + gcc_assert (mode == Pmode + || mode == targetm.addr_space.address_mode (as)); gcc_assert (xmode == mode || xmode == VOIDmode); #else - gcc_assert (mode == Pmode || mode == ptr_mode); + rtx temp; + enum machine_mode address_mode = targetm.addr_space.address_mode (as); + enum machine_mode pointer_mode = targetm.addr_space.pointer_mode (as); + + gcc_assert (mode == address_mode || mode == pointer_mode); if (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode) return x; @@ -2191,7 +2323,47 @@ else if (!POINTERS_EXTEND_UNSIGNED) x = gen_rtx_SIGN_EXTEND (mode, x); else - gcc_unreachable (); + { + switch (GET_CODE (x)) + { + case SUBREG: + if ((SUBREG_PROMOTED_VAR_P (x) + || (REG_P (SUBREG_REG (x)) && REG_POINTER (SUBREG_REG (x))) + || (GET_CODE (SUBREG_REG (x)) == PLUS + && REG_P (XEXP (SUBREG_REG (x), 0)) + && REG_POINTER (XEXP (SUBREG_REG (x), 0)) + && CONST_INT_P (XEXP (SUBREG_REG (x), 1)))) + && GET_MODE (SUBREG_REG (x)) == mode) + return SUBREG_REG (x); + break; + case LABEL_REF: + temp = gen_rtx_LABEL_REF (mode, XEXP (x, 0)); + LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x); + return temp; + case SYMBOL_REF: + temp = shallow_copy_rtx (x); + PUT_MODE (temp, mode); + return temp; + case CONST: + temp = convert_debug_memory_address (mode, XEXP (x, 0), as); + if (temp) + temp = gen_rtx_CONST (mode, temp); + return temp; + case PLUS: + case MINUS: + if (CONST_INT_P (XEXP (x, 1))) + { + temp = convert_debug_memory_address (mode, XEXP (x, 0), as); + if (temp) + return gen_rtx_fmt_ee (GET_CODE (x), mode, temp, XEXP (x, 1)); + } + break; + default: + break; + } + /* Don't know how to express ptr_extend as operation in debug info. */ + return NULL; + } #endif /* POINTERS_EXTEND_UNSIGNED */ return x; @@ -2207,29 +2379,32 @@ enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp)); addr_space_t as; - enum machine_mode address_mode; switch (TREE_CODE_CLASS (TREE_CODE (exp))) { case tcc_expression: switch (TREE_CODE (exp)) - { - case COND_EXPR: - goto ternary; - - case TRUTH_ANDIF_EXPR: - case TRUTH_ORIF_EXPR: - case TRUTH_AND_EXPR: - case TRUTH_OR_EXPR: - case TRUTH_XOR_EXPR: - goto binary; - - case TRUTH_NOT_EXPR: - goto unary; - - default: - break; - } + { + case COND_EXPR: + case DOT_PROD_EXPR: + case WIDEN_MULT_PLUS_EXPR: + case WIDEN_MULT_MINUS_EXPR: + case FMA_EXPR: + goto ternary; + + case TRUTH_ANDIF_EXPR: + case TRUTH_ORIF_EXPR: + case TRUTH_AND_EXPR: + case TRUTH_OR_EXPR: + case TRUTH_XOR_EXPR: + goto binary; + + case TRUTH_NOT_EXPR: + goto unary; + + default: + break; + } break; ternary: @@ -2314,31 +2489,35 @@ /* This decl was probably optimized away. */ if (!op0) - { - if (TREE_CODE (exp) != VAR_DECL - || DECL_EXTERNAL (exp) - || !TREE_STATIC (exp) - || !DECL_NAME (exp) - || DECL_HARD_REGISTER (exp) - || mode == VOIDmode) - return NULL; - - op0 = DECL_RTL (exp); - SET_DECL_RTL (exp, NULL); - if (!MEM_P (op0) - || GET_CODE (XEXP (op0, 0)) != SYMBOL_REF - || SYMBOL_REF_DECL (XEXP (op0, 0)) != exp) - return NULL; - } + { + if (TREE_CODE (exp) != VAR_DECL + || DECL_EXTERNAL (exp) + || !TREE_STATIC (exp) + || !DECL_NAME (exp) + || DECL_HARD_REGISTER (exp) + || mode == VOIDmode) + return NULL; + + op0 = make_decl_rtl_for_debug (exp); + if (!MEM_P (op0) + || GET_CODE (XEXP (op0, 0)) != SYMBOL_REF + || SYMBOL_REF_DECL (XEXP (op0, 0)) != exp) + return NULL; + } else op0 = copy_rtx (op0); - if (GET_MODE (op0) == BLKmode) - { - gcc_assert (MEM_P (op0)); - op0 = adjust_address_nv (op0, mode, 0); - return op0; - } + if (GET_MODE (op0) == BLKmode + /* If op0 is not BLKmode, but BLKmode is, adjust_mode + below would ICE. While it is likely a FE bug, + try to be robust here. See PR43166. */ + || mode == BLKmode + || (mode == VOIDmode && GET_MODE (op0) != VOIDmode)) + { + gcc_assert (MEM_P (op0)); + op0 = adjust_address_nv (op0, mode, 0); + return op0; + } /* Fall through. */ @@ -2347,77 +2526,129 @@ case NOP_EXPR: case CONVERT_EXPR: { - enum machine_mode inner_mode = GET_MODE (op0); - - if (mode == inner_mode) - return op0; - - if (inner_mode == VOIDmode) - { - inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); - if (mode == inner_mode) - return op0; - } - - if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode)) - { - if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (inner_mode)) - op0 = simplify_gen_subreg (mode, op0, inner_mode, 0); - else if (GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (inner_mode)) - op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode); - else - op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode); + enum machine_mode inner_mode = GET_MODE (op0); + + if (mode == inner_mode) + return op0; + + if (inner_mode == VOIDmode) + { + if (TREE_CODE (exp) == SSA_NAME) + inner_mode = TYPE_MODE (TREE_TYPE (exp)); + else + inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); + if (mode == inner_mode) + return op0; + } + + if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode)) + { + if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (inner_mode)) + op0 = simplify_gen_subreg (mode, op0, inner_mode, 0); + else if (GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (inner_mode)) + op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode); + else + op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode); + } + else if (FLOAT_MODE_P (mode)) + { + gcc_assert (TREE_CODE (exp) != SSA_NAME); + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))) + op0 = simplify_gen_unary (UNSIGNED_FLOAT, mode, op0, inner_mode); + else + op0 = simplify_gen_unary (FLOAT, mode, op0, inner_mode); + } + else if (FLOAT_MODE_P (inner_mode)) + { + if (unsignedp) + op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode); + else + op0 = simplify_gen_unary (FIX, mode, op0, inner_mode); + } + else if (CONSTANT_P (op0) + || GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (inner_mode)) + op0 = simplify_gen_subreg (mode, op0, inner_mode, + subreg_lowpart_offset (mode, + inner_mode)); + else if (TREE_CODE_CLASS (TREE_CODE (exp)) == tcc_unary + ? TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))) + : unsignedp) + op0 = gen_rtx_ZERO_EXTEND (mode, op0); + else + op0 = gen_rtx_SIGN_EXTEND (mode, op0); + + return op0; } - else if (FLOAT_MODE_P (mode)) - { - if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))) - op0 = simplify_gen_unary (UNSIGNED_FLOAT, mode, op0, inner_mode); - else - op0 = simplify_gen_unary (FLOAT, mode, op0, inner_mode); - } - else if (FLOAT_MODE_P (inner_mode)) - { - if (unsignedp) - op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode); - else - op0 = simplify_gen_unary (FIX, mode, op0, inner_mode); - } - else if (CONSTANT_P (op0) - || GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (inner_mode)) - op0 = simplify_gen_subreg (mode, op0, inner_mode, - subreg_lowpart_offset (mode, - inner_mode)); - else if (unsignedp) - op0 = gen_rtx_ZERO_EXTEND (mode, op0); - else - op0 = gen_rtx_SIGN_EXTEND (mode, op0); - - return op0; - } - + + case MEM_REF: + if (!is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0))) + { + tree newexp = fold_binary (MEM_REF, TREE_TYPE (exp), + TREE_OPERAND (exp, 0), + TREE_OPERAND (exp, 1)); + if (newexp) + return expand_debug_expr (newexp); + } + /* FALLTHROUGH */ case INDIRECT_REF: - case ALIGN_INDIRECT_REF: - case MISALIGNED_INDIRECT_REF: op0 = expand_debug_expr (TREE_OPERAND (exp, 0)); if (!op0) return NULL; + if (TREE_CODE (exp) == MEM_REF) + { + if (GET_CODE (op0) == DEBUG_IMPLICIT_PTR + || (GET_CODE (op0) == PLUS + && GET_CODE (XEXP (op0, 0)) == DEBUG_IMPLICIT_PTR)) + /* (mem (debug_implicit_ptr)) might confuse aliasing. + Instead just use get_inner_reference. */ + goto component_ref; + + op1 = expand_debug_expr (TREE_OPERAND (exp, 1)); + if (!op1 || !CONST_INT_P (op1)) + return NULL; + + op0 = plus_constant (op0, INTVAL (op1)); + } + + if (POINTER_TYPE_P (TREE_TYPE (exp))) + as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp))); + else + as = ADDR_SPACE_GENERIC; + + op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as), + op0, as); + if (op0 == NULL_RTX) + return NULL; + + op0 = gen_rtx_MEM (mode, op0); + set_mem_attributes (op0, exp, 0); + if (TREE_CODE (exp) == MEM_REF + && !is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0))) + set_mem_expr (op0, NULL_TREE); + set_mem_addr_space (op0, as); + + return op0; + + case TARGET_MEM_REF: + if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR + && !DECL_RTL_SET_P (TREE_OPERAND (TMR_BASE (exp), 0))) + return NULL; + + op0 = expand_debug_expr + (tree_mem_ref_addr (build_pointer_type (TREE_TYPE (exp)), exp)); + if (!op0) + return NULL; + if (POINTER_TYPE_P (TREE_TYPE (exp))) - { - as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp))); - address_mode = targetm.addr_space.address_mode (as); - } + as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp))); else - { - as = ADDR_SPACE_GENERIC; - address_mode = Pmode; - } - - if (TREE_CODE (exp) == ALIGN_INDIRECT_REF) - { - int align = TYPE_ALIGN_UNIT (TREE_TYPE (exp)); - op0 = gen_rtx_AND (address_mode, op0, GEN_INT (-align)); - } + as = ADDR_SPACE_GENERIC; + + op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as), + op0, as); + if (op0 == NULL_RTX) + return NULL; op0 = gen_rtx_MEM (mode, op0); @@ -2426,24 +2657,7 @@ return op0; - case TARGET_MEM_REF: - if (TMR_SYMBOL (exp) && !DECL_RTL_SET_P (TMR_SYMBOL (exp))) - return NULL; - - op0 = expand_debug_expr - (tree_mem_ref_addr (build_pointer_type (TREE_TYPE (exp)), exp)); - if (!op0) - return NULL; - - as = TYPE_ADDR_SPACE (TREE_TYPE (exp)); - - op0 = gen_rtx_MEM (mode, op0); - - set_mem_attributes (op0, exp, 0); - set_mem_addr_space (op0, as); - - return op0; - + component_ref: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: @@ -2452,116 +2666,120 @@ case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: { - enum machine_mode mode1; - HOST_WIDE_INT bitsize, bitpos; - tree offset; - int volatilep = 0; - tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset, - &mode1, &unsignedp, &volatilep, false); - rtx orig_op0; - - if (bitsize == 0) - return NULL; - - orig_op0 = op0 = expand_debug_expr (tem); - - if (!op0) - return NULL; - - if (offset) - { - enum machine_mode addrmode, offmode; - - gcc_assert (MEM_P (op0)); - - op0 = XEXP (op0, 0); - addrmode = GET_MODE (op0); - if (addrmode == VOIDmode) - addrmode = Pmode; - - op1 = expand_debug_expr (offset); - if (!op1) - return NULL; - - offmode = GET_MODE (op1); - if (offmode == VOIDmode) - offmode = TYPE_MODE (TREE_TYPE (offset)); - - if (addrmode != offmode) - op1 = simplify_gen_subreg (addrmode, op1, offmode, - subreg_lowpart_offset (addrmode, - offmode)); - - /* Don't use offset_address here, we don't need a - recognizable address, and we don't want to generate - code. */ - op0 = gen_rtx_MEM (mode, gen_rtx_PLUS (addrmode, op0, op1)); - } - - if (MEM_P (op0)) - { - if (mode1 == VOIDmode) - /* Bitfield. */ - mode1 = smallest_mode_for_size (bitsize, MODE_INT); - if (bitpos >= BITS_PER_UNIT) - { - op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT); - bitpos %= BITS_PER_UNIT; - } - else if (bitpos < 0) - { - HOST_WIDE_INT units - = (-bitpos + BITS_PER_UNIT - 1) / BITS_PER_UNIT; - op0 = adjust_address_nv (op0, mode1, units); - bitpos += units * BITS_PER_UNIT; - } - else if (bitpos == 0 && bitsize == GET_MODE_BITSIZE (mode)) - op0 = adjust_address_nv (op0, mode, 0); - else if (GET_MODE (op0) != mode1) - op0 = adjust_address_nv (op0, mode1, 0); - else - op0 = copy_rtx (op0); - if (op0 == orig_op0) - op0 = shallow_copy_rtx (op0); - set_mem_attributes (op0, exp, 0); - } - - if (bitpos == 0 && mode == GET_MODE (op0)) - return op0; + enum machine_mode mode1; + HOST_WIDE_INT bitsize, bitpos; + tree offset; + int volatilep = 0; + tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset, + &mode1, &unsignedp, &volatilep, false); + rtx orig_op0; + + if (bitsize == 0) + return NULL; + + orig_op0 = op0 = expand_debug_expr (tem); + + if (!op0) + return NULL; + + if (offset) + { + enum machine_mode addrmode, offmode; + + if (!MEM_P (op0)) + return NULL; + + op0 = XEXP (op0, 0); + addrmode = GET_MODE (op0); + if (addrmode == VOIDmode) + addrmode = Pmode; + + op1 = expand_debug_expr (offset); + if (!op1) + return NULL; + + offmode = GET_MODE (op1); + if (offmode == VOIDmode) + offmode = TYPE_MODE (TREE_TYPE (offset)); + + if (addrmode != offmode) + op1 = simplify_gen_subreg (addrmode, op1, offmode, + subreg_lowpart_offset (addrmode, + offmode)); + + /* Don't use offset_address here, we don't need a + recognizable address, and we don't want to generate + code. */ + op0 = gen_rtx_MEM (mode, gen_rtx_PLUS (addrmode, op0, op1)); + } + + if (MEM_P (op0)) + { + if (mode1 == VOIDmode) + /* Bitfield. */ + mode1 = smallest_mode_for_size (bitsize, MODE_INT); + if (bitpos >= BITS_PER_UNIT) + { + op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT); + bitpos %= BITS_PER_UNIT; + } + else if (bitpos < 0) + { + HOST_WIDE_INT units + = (-bitpos + BITS_PER_UNIT - 1) / BITS_PER_UNIT; + op0 = adjust_address_nv (op0, mode1, units); + bitpos += units * BITS_PER_UNIT; + } + else if (bitpos == 0 && bitsize == GET_MODE_BITSIZE (mode)) + op0 = adjust_address_nv (op0, mode, 0); + else if (GET_MODE (op0) != mode1) + op0 = adjust_address_nv (op0, mode1, 0); + else + op0 = copy_rtx (op0); + if (op0 == orig_op0) + op0 = shallow_copy_rtx (op0); + set_mem_attributes (op0, exp, 0); + } + + if (bitpos == 0 && mode == GET_MODE (op0)) + return op0; if (bitpos < 0) return NULL; - if ((bitpos % BITS_PER_UNIT) == 0 - && bitsize == GET_MODE_BITSIZE (mode1)) - { - enum machine_mode opmode = GET_MODE (op0); - - gcc_assert (opmode != BLKmode); - - if (opmode == VOIDmode) - opmode = mode1; - - /* This condition may hold if we're expanding the address - right past the end of an array that turned out not to - be addressable (i.e., the address was only computed in - debug stmts). The gen_subreg below would rightfully - crash, and the address doesn't really exist, so just - drop it. */ - if (bitpos >= GET_MODE_BITSIZE (opmode)) - return NULL; - - return simplify_gen_subreg (mode, op0, opmode, - bitpos / BITS_PER_UNIT); - } - - return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0)) - && TYPE_UNSIGNED (TREE_TYPE (exp)) - ? SIGN_EXTRACT - : ZERO_EXTRACT, mode, - GET_MODE (op0) != VOIDmode - ? GET_MODE (op0) : mode1, - op0, GEN_INT (bitsize), GEN_INT (bitpos)); + if (GET_MODE (op0) == BLKmode) + return NULL; + + if ((bitpos % BITS_PER_UNIT) == 0 + && bitsize == GET_MODE_BITSIZE (mode1)) + { + enum machine_mode opmode = GET_MODE (op0); + + if (opmode == VOIDmode) + opmode = TYPE_MODE (TREE_TYPE (tem)); + + /* This condition may hold if we're expanding the address + right past the end of an array that turned out not to + be addressable (i.e., the address was only computed in + debug stmts). The gen_subreg below would rightfully + crash, and the address doesn't really exist, so just + drop it. */ + if (bitpos >= GET_MODE_BITSIZE (opmode)) + return NULL; + + if ((bitpos % GET_MODE_BITSIZE (mode)) == 0) + return simplify_gen_subreg (mode, op0, opmode, + bitpos / BITS_PER_UNIT); + } + + return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0)) + && TYPE_UNSIGNED (TREE_TYPE (exp)) + ? SIGN_EXTRACT + : ZERO_EXTRACT, mode, + GET_MODE (op0) != VOIDmode + ? GET_MODE (op0) + : TYPE_MODE (TREE_TYPE (tem)), + op0, GEN_INT (bitsize), GEN_INT (bitpos)); } case ABS_EXPR: @@ -2586,6 +2804,22 @@ return gen_rtx_FIX (mode, op0); case POINTER_PLUS_EXPR: + /* For the rare target where pointers are not the same size as + size_t, we need to check for mis-matched modes and correct + the addend. */ + if (op0 && op1 + && GET_MODE (op0) != VOIDmode && GET_MODE (op1) != VOIDmode + && GET_MODE (op0) != GET_MODE (op1)) + { + if (GET_MODE_BITSIZE (GET_MODE (op0)) < GET_MODE_BITSIZE (GET_MODE (op1))) + op1 = gen_rtx_TRUNCATE (GET_MODE (op0), op1); + else + /* We always sign-extend, regardless of the signedness of + the operand, because the operand is always unsigned + here even if the original C expression is signed. */ + op1 = gen_rtx_SIGN_EXTEND (GET_MODE (op0), op1); + } + /* Fall through. */ case PLUS_EXPR: return gen_rtx_PLUS (mode, op0, op1); @@ -2851,9 +3085,35 @@ case ADDR_EXPR: op0 = expand_debug_expr (TREE_OPERAND (exp, 0)); if (!op0 || !MEM_P (op0)) - return NULL; - - op0 = convert_debug_memory_address (mode, XEXP (op0, 0)); + { + if ((TREE_CODE (TREE_OPERAND (exp, 0)) == VAR_DECL + || TREE_CODE (TREE_OPERAND (exp, 0)) == PARM_DECL + || TREE_CODE (TREE_OPERAND (exp, 0)) == RESULT_DECL) + && !TREE_ADDRESSABLE (TREE_OPERAND (exp, 0))) + return gen_rtx_DEBUG_IMPLICIT_PTR (mode, TREE_OPERAND (exp, 0)); + + if (handled_component_p (TREE_OPERAND (exp, 0))) + { + HOST_WIDE_INT bitoffset, bitsize, maxsize; + tree decl + = get_ref_base_and_extent (TREE_OPERAND (exp, 0), + &bitoffset, &bitsize, &maxsize); + if ((TREE_CODE (decl) == VAR_DECL + || TREE_CODE (decl) == PARM_DECL + || TREE_CODE (decl) == RESULT_DECL) + && !TREE_ADDRESSABLE (decl) + && (bitoffset % BITS_PER_UNIT) == 0 + && bitsize > 0 + && bitsize == maxsize) + return plus_constant (gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl), + bitoffset / BITS_PER_UNIT); + } + + return NULL; + } + + as = TYPE_ADDR_SPACE (TREE_TYPE (exp)); + op0 = convert_debug_memory_address (mode, XEXP (op0, 0), as); return op0; @@ -2864,35 +3124,35 @@ case CONSTRUCTOR: if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE) - { - unsigned i; - tree val; - - op0 = gen_rtx_CONCATN - (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))); - - FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val) - { - op1 = expand_debug_expr (val); - if (!op1) - return NULL; - XVECEXP (op0, 0, i) = op1; - } - - if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))) - { - op1 = expand_debug_expr - (fold_convert (TREE_TYPE (TREE_TYPE (exp)), integer_zero_node)); - - if (!op1) - return NULL; - - for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++) - XVECEXP (op0, 0, i) = op1; - } - - return op0; - } + { + unsigned i; + tree val; + + op0 = gen_rtx_CONCATN + (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))); + + FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val) + { + op1 = expand_debug_expr (val); + if (!op1) + return NULL; + XVECEXP (op0, 0, i) = op1; + } + + if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))) + { + op1 = expand_debug_expr + (build_zero_cst (TREE_TYPE (TREE_TYPE (exp)))); + + if (!op1) + return NULL; + + for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++) + XVECEXP (op0, 0, i) = op1; + } + + return op0; + } else goto flag_unsupported; @@ -2902,20 +3162,117 @@ case SSA_NAME: { - int part = var_to_partition (SA.map, exp); - - if (part == NO_PARTITION) - return NULL; - - gcc_assert (part >= 0 && (unsigned)part < SA.map->num_partitions); - - op0 = SA.partition_to_pseudo[part]; - goto adjust_mode; + gimple g = get_gimple_for_ssa_name (exp); + if (g) + { + op0 = expand_debug_expr (gimple_assign_rhs_to_tree (g)); + if (!op0) + return NULL; + } + else + { + int part = var_to_partition (SA.map, exp); + + if (part == NO_PARTITION) + return NULL; + + gcc_assert (part >= 0 && (unsigned)part < SA.map->num_partitions); + + op0 = copy_rtx (SA.partition_to_pseudo[part]); + } + goto adjust_mode; } case ERROR_MARK: return NULL; + /* Vector stuff. For most of the codes we don't have rtl codes. */ + case REALIGN_LOAD_EXPR: + case REDUC_MAX_EXPR: + case REDUC_MIN_EXPR: + case REDUC_PLUS_EXPR: + case VEC_COND_EXPR: + case VEC_EXTRACT_EVEN_EXPR: + case VEC_EXTRACT_ODD_EXPR: + case VEC_INTERLEAVE_HIGH_EXPR: + case VEC_INTERLEAVE_LOW_EXPR: + case VEC_LSHIFT_EXPR: + case VEC_PACK_FIX_TRUNC_EXPR: + case VEC_PACK_SAT_EXPR: + case VEC_PACK_TRUNC_EXPR: + case VEC_RSHIFT_EXPR: + case VEC_UNPACK_FLOAT_HI_EXPR: + case VEC_UNPACK_FLOAT_LO_EXPR: + case VEC_UNPACK_HI_EXPR: + case VEC_UNPACK_LO_EXPR: + case VEC_WIDEN_MULT_HI_EXPR: + case VEC_WIDEN_MULT_LO_EXPR: + return NULL; + + /* Misc codes. */ + case ADDR_SPACE_CONVERT_EXPR: + case FIXED_CONVERT_EXPR: + case OBJ_TYPE_REF: + case WITH_SIZE_EXPR: + return NULL; + + case DOT_PROD_EXPR: + if (SCALAR_INT_MODE_P (GET_MODE (op0)) + && SCALAR_INT_MODE_P (mode)) + { + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))) + op0 = gen_rtx_ZERO_EXTEND (mode, op0); + else + op0 = gen_rtx_SIGN_EXTEND (mode, op0); + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))) + op1 = gen_rtx_ZERO_EXTEND (mode, op1); + else + op1 = gen_rtx_SIGN_EXTEND (mode, op1); + op0 = gen_rtx_MULT (mode, op0, op1); + return gen_rtx_PLUS (mode, op0, op2); + } + return NULL; + + case WIDEN_MULT_EXPR: + case WIDEN_MULT_PLUS_EXPR: + case WIDEN_MULT_MINUS_EXPR: + if (SCALAR_INT_MODE_P (GET_MODE (op0)) + && SCALAR_INT_MODE_P (mode)) + { + enum machine_mode inner_mode = GET_MODE (op0); + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))) + op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode); + else + op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode); + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))) + op1 = simplify_gen_unary (ZERO_EXTEND, mode, op1, inner_mode); + else + op1 = simplify_gen_unary (SIGN_EXTEND, mode, op1, inner_mode); + op0 = gen_rtx_MULT (mode, op0, op1); + if (TREE_CODE (exp) == WIDEN_MULT_EXPR) + return op0; + else if (TREE_CODE (exp) == WIDEN_MULT_PLUS_EXPR) + return gen_rtx_PLUS (mode, op0, op2); + else + return gen_rtx_MINUS (mode, op2, op0); + } + return NULL; + + case WIDEN_SUM_EXPR: + if (SCALAR_INT_MODE_P (GET_MODE (op0)) + && SCALAR_INT_MODE_P (mode)) + { + if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))) + op0 = gen_rtx_ZERO_EXTEND (mode, op0); + else + op0 = gen_rtx_SIGN_EXTEND (mode, op0); + return gen_rtx_PLUS (mode, op0, op1); + } + return NULL; + + case FMA_EXPR: + return gen_rtx_FMA (mode, op0, op1, op2); + default: flag_unsupported: #ifdef ENABLE_CHECKING @@ -3066,6 +3423,105 @@ basic_block new_bb; stmt = gsi_stmt (gsi); + + /* If this statement is a non-debug one, and we generate debug + insns, then this one might be the last real use of a TERed + SSA_NAME, but where there are still some debug uses further + down. Expanding the current SSA name in such further debug + uses by their RHS might lead to wrong debug info, as coalescing + might make the operands of such RHS be placed into the same + pseudo as something else. Like so: + a_1 = a_0 + 1; // Assume a_1 is TERed and a_0 is dead + use(a_1); + a_2 = ... + #DEBUG ... => a_1 + As a_0 and a_2 don't overlap in lifetime, assume they are coalesced. + If we now would expand a_1 by it's RHS (a_0 + 1) in the debug use, + the write to a_2 would actually have clobbered the place which + formerly held a_0. + + So, instead of that, we recognize the situation, and generate + debug temporaries at the last real use of TERed SSA names: + a_1 = a_0 + 1; + #DEBUG #D1 => a_1 + use(a_1); + a_2 = ... + #DEBUG ... => #D1 + */ + if (MAY_HAVE_DEBUG_INSNS + && SA.values + && !is_gimple_debug (stmt)) + { + ssa_op_iter iter; + tree op; + gimple def; + + location_t sloc = get_curr_insn_source_location (); + tree sblock = get_curr_insn_block (); + + /* Look for SSA names that have their last use here (TERed + names always have only one real use). */ + FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE) + if ((def = get_gimple_for_ssa_name (op))) + { + imm_use_iterator imm_iter; + use_operand_p use_p; + bool have_debug_uses = false; + + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op) + { + if (gimple_debug_bind_p (USE_STMT (use_p))) + { + have_debug_uses = true; + break; + } + } + + if (have_debug_uses) + { + /* OP is a TERed SSA name, with DEF it's defining + statement, and where OP is used in further debug + instructions. Generate a debug temporary, and + replace all uses of OP in debug insns with that + temporary. */ + gimple debugstmt; + tree value = gimple_assign_rhs_to_tree (def); + tree vexpr = make_node (DEBUG_EXPR_DECL); + rtx val; + enum machine_mode mode; + + set_curr_insn_source_location (gimple_location (def)); + set_curr_insn_block (gimple_block (def)); + + DECL_ARTIFICIAL (vexpr) = 1; + TREE_TYPE (vexpr) = TREE_TYPE (value); + if (DECL_P (value)) + mode = DECL_MODE (value); + else + mode = TYPE_MODE (TREE_TYPE (value)); + DECL_MODE (vexpr) = mode; + + val = gen_rtx_VAR_LOCATION + (mode, vexpr, (rtx)value, VAR_INIT_STATUS_INITIALIZED); + + val = emit_debug_insn (val); + + FOR_EACH_IMM_USE_STMT (debugstmt, imm_iter, op) + { + if (!gimple_debug_bind_p (debugstmt)) + continue; + + FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) + SET_USE (use_p, vexpr); + + update_stmt (debugstmt); + } + } + } + set_curr_insn_source_location (sloc); + set_curr_insn_block (sblock); + } + currently_expanding_gimple_stmt = stmt; /* Expand this statement, then evaluate the resulting RTL and @@ -3077,59 +3533,66 @@ return new_bb; } else if (gimple_debug_bind_p (stmt)) - { - location_t sloc = get_curr_insn_source_location (); - tree sblock = get_curr_insn_block (); - gimple_stmt_iterator nsi = gsi; - - for (;;) - { - tree var = gimple_debug_bind_get_var (stmt); - tree value; - rtx val; - enum machine_mode mode; - - if (gimple_debug_bind_has_value_p (stmt)) - value = gimple_debug_bind_get_value (stmt); - else - value = NULL_TREE; - - last = get_last_insn (); - - set_curr_insn_source_location (gimple_location (stmt)); - set_curr_insn_block (gimple_block (stmt)); - - if (DECL_P (var)) - mode = DECL_MODE (var); - else - mode = TYPE_MODE (TREE_TYPE (var)); - - val = gen_rtx_VAR_LOCATION - (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED); - - val = emit_debug_insn (val); - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - /* We can't dump the insn with a TREE where an RTX - is expected. */ - INSN_VAR_LOCATION_LOC (val) = const0_rtx; - maybe_dump_rtl_for_gimple_stmt (stmt, last); - INSN_VAR_LOCATION_LOC (val) = (rtx)value; - } - - gsi = nsi; - gsi_next (&nsi); - if (gsi_end_p (nsi)) - break; - stmt = gsi_stmt (nsi); - if (!gimple_debug_bind_p (stmt)) - break; - } - - set_curr_insn_source_location (sloc); - set_curr_insn_block (sblock); - } + { + location_t sloc = get_curr_insn_source_location (); + tree sblock = get_curr_insn_block (); + gimple_stmt_iterator nsi = gsi; + + for (;;) + { + tree var = gimple_debug_bind_get_var (stmt); + tree value; + rtx val; + enum machine_mode mode; + + if (gimple_debug_bind_has_value_p (stmt)) + value = gimple_debug_bind_get_value (stmt); + else + value = NULL_TREE; + + last = get_last_insn (); + + set_curr_insn_source_location (gimple_location (stmt)); + set_curr_insn_block (gimple_block (stmt)); + + if (DECL_P (var)) + mode = DECL_MODE (var); + else + mode = TYPE_MODE (TREE_TYPE (var)); + + val = gen_rtx_VAR_LOCATION + (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED); + + val = emit_debug_insn (val); + + if (dump_file && (dump_flags & TDF_DETAILS)) + { + /* We can't dump the insn with a TREE where an RTX + is expected. */ + INSN_VAR_LOCATION_LOC (val) = const0_rtx; + maybe_dump_rtl_for_gimple_stmt (stmt, last); + INSN_VAR_LOCATION_LOC (val) = (rtx)value; + } + + /* In order not to generate too many debug temporaries, + we delink all uses of debug statements we already expanded. + Therefore debug statements between definition and real + use of TERed SSA names will continue to use the SSA name, + and not be replaced with debug temps. */ + delink_stmt_imm_use (stmt); + + gsi = nsi; + gsi_next (&nsi); + if (gsi_end_p (nsi)) + break; + stmt = gsi_stmt (nsi); + if (!gimple_debug_bind_p (stmt)) + break; + } + + set_curr_insn_source_location (sloc); + set_curr_insn_block (sblock); + } else { if (is_gimple_call (stmt) && gimple_call_tail_p (stmt)) @@ -3400,8 +3863,9 @@ FOR_EACH_BB (bb) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { - gimple stmt = gsi_stmt (gsi); - walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL); + gimple stmt = gsi_stmt (gsi); + if (!is_gimple_debug (stmt)) + walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL); } } @@ -3440,7 +3904,7 @@ stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call exceptions since callgraph doesn't collect incoming stack alignment in this case. */ - if (flag_non_call_exceptions + if (cfun->can_throw_non_call_exceptions && PREFERRED_STACK_BOUNDARY > crtl->preferred_stack_boundary) preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; else @@ -3494,9 +3958,12 @@ sbitmap blocks; edge_iterator ei; edge e; + rtx var_seq; unsigned i; + timevar_push (TV_OUT_OF_SSA); rewrite_out_of_ssa (&SA); + timevar_pop (TV_OUT_OF_SSA); SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions, sizeof (rtx)); @@ -3515,9 +3982,15 @@ else set_curr_insn_source_location (cfun->function_start_locus); } + else + set_curr_insn_source_location (UNKNOWN_LOCATION); set_curr_insn_block (DECL_INITIAL (current_function_decl)); prologue_locator = curr_insn_locator (); +#ifdef INSN_SCHEDULING + init_sched_attrs (); +#endif + /* Make sure first insn is a note even if we don't want linenums. This makes sure the first insn will never be deleted. Also, final expects a note to appear there. */ @@ -3533,25 +4006,49 @@ crtl->preferred_stack_boundary = STACK_BOUNDARY; cfun->cfg->max_jumptable_ents = 0; + /* Resovle the function section. Some targets, like ARM EABI rely on knowledge + of the function section at exapnsion time to predict distance of calls. */ + resolve_unique_section (current_function_decl, 0, flag_function_sections); /* Expand the variables recorded during gimple lowering. */ + timevar_push (TV_VAR_EXPAND); + start_sequence (); + expand_used_vars (); + var_seq = get_insns (); + end_sequence (); + timevar_pop (TV_VAR_EXPAND); + /* Honor stack protection warnings. */ if (warn_stack_protect) { if (cfun->calls_alloca) - warning (OPT_Wstack_protector, - "not protecting local variables: variable length buffer"); + warning (OPT_Wstack_protector, + "stack protector not protecting local variables: " + "variable length buffer"); if (has_short_buffer && !crtl->stack_protect_guard) - warning (OPT_Wstack_protector, - "not protecting function: no buffer at least %d bytes long", - (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE)); + warning (OPT_Wstack_protector, + "stack protector not protecting function: " + "all local arrays are less than %d bytes long", + (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE)); } /* Set up parameters and prepare for return, for the function. */ expand_function_start (current_function_decl); + /* If we emitted any instructions for setting up the variables, + emit them before the FUNCTION_START note. */ + if (var_seq) + { + emit_insn_before (var_seq, parm_birth_insn); + + /* In expand_function_end we'll insert the alloca save/restore + before parm_birth_insn. We've just insertted an alloca call. + Adjust the pointer to match. */ + parm_birth_insn = var_seq; + } + /* Now that we also have the parameter RTXs, copy them over to our partitions. */ for (i = 0; i < SA.map->num_partitions; i++) @@ -3613,7 +4110,13 @@ expand_debug_locations (); execute_free_datastructures (); + timevar_push (TV_OUT_OF_SSA); finish_out_of_ssa (&SA); + timevar_pop (TV_OUT_OF_SSA); + + timevar_push (TV_POST_EXPAND); + /* We are no longer in SSA form. */ + cfun->gimple_df->in_ssa_p = false; /* Expansion is used by optimization passes too, set maybe_hot_insn_p conservatively to true until they are all profile aware. */ @@ -3634,12 +4137,24 @@ edge e; edge_iterator ei; for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) - { - if (e->insns.r) - commit_one_edge_insertion (e); - else - ei_next (&ei); - } + { + if (e->insns.r) + { + /* Avoid putting insns before parm_birth_insn. */ + if (e->src == ENTRY_BLOCK_PTR + && single_succ_p (ENTRY_BLOCK_PTR) + && parm_birth_insn) + { + rtx insns = e->insns.r; + e->insns.r = NULL_RTX; + emit_insn_after_noloc (insns, parm_birth_insn, e->dest); + } + else + commit_one_edge_insertion (e); + } + else + ei_next (&ei); + } } /* We're done expanding trees to RTL. */ @@ -3721,6 +4236,7 @@ the common parent easily. */ set_block_levels (DECL_INITIAL (cfun->decl), 0); default_rtl_profile (); + timevar_pop (TV_POST_EXPAND); return 0; } @@ -3734,8 +4250,9 @@ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ - TV_EXPAND, /* tv_id */ - PROP_ssa | PROP_gimple_leh | PROP_cfg,/* properties_required */ + TV_EXPAND, /* tv_id */ + PROP_ssa | PROP_gimple_leh | PROP_cfg + | PROP_gimple_lcx, /* properties_required */ PROP_rtl, /* properties_provided */ PROP_ssa | PROP_trees, /* properties_destroyed */ TODO_verify_ssa | TODO_verify_flow