Mercurial > hg > CbC > CbC_gcc
diff gcc/cfgexpand.c @ 146:351920fa3827
merge
author | anatofuz <anatofuz@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 01 Mar 2020 16:13:28 +0900 |
parents | ce508c72660f 1830386684a0 |
children |
line wrap: on
line diff
--- a/gcc/cfgexpand.c Sun Dec 23 21:23:56 2018 +0900 +++ b/gcc/cfgexpand.c Sun Mar 01 16:13:28 2020 +0900 @@ -1,5 +1,5 @@ /* A pass for lowering trees to RTL. - Copyright (C) 2004-2018 Free Software Foundation, Inc. + Copyright (C) 2004-2020 Free Software Foundation, Inc. This file is part of GCC. @@ -61,7 +61,6 @@ #include "gimple-pretty-print.h" #include "toplev.h" #include "debug.h" -#include "params.h" #include "tree-inline.h" #include "value-prof.h" #include "tree-ssa-live.h" @@ -107,38 +106,38 @@ gimple_assign_rhs_to_tree (gimple *stmt) { tree t; - enum gimple_rhs_class grhs_class; - - grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt)); - - if (grhs_class == GIMPLE_TERNARY_RHS) - t = build3 (gimple_assign_rhs_code (stmt), - TREE_TYPE (gimple_assign_lhs (stmt)), - gimple_assign_rhs1 (stmt), - gimple_assign_rhs2 (stmt), - gimple_assign_rhs3 (stmt)); - else if (grhs_class == GIMPLE_BINARY_RHS) - t = build2 (gimple_assign_rhs_code (stmt), - TREE_TYPE (gimple_assign_lhs (stmt)), - gimple_assign_rhs1 (stmt), - gimple_assign_rhs2 (stmt)); - else if (grhs_class == GIMPLE_UNARY_RHS) - t = build1 (gimple_assign_rhs_code (stmt), - TREE_TYPE (gimple_assign_lhs (stmt)), - gimple_assign_rhs1 (stmt)); - else if (grhs_class == GIMPLE_SINGLE_RHS) + switch (get_gimple_rhs_class (gimple_expr_code (stmt))) { - t = gimple_assign_rhs1 (stmt); - /* Avoid modifying this tree in place below. */ - if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t) - && gimple_location (stmt) != EXPR_LOCATION (t)) - || (gimple_block (stmt) - && currently_expanding_to_rtl - && EXPR_P (t))) - t = copy_node (t); + case GIMPLE_TERNARY_RHS: + t = build3 (gimple_assign_rhs_code (stmt), + TREE_TYPE (gimple_assign_lhs (stmt)), + gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt), + gimple_assign_rhs3 (stmt)); + break; + case GIMPLE_BINARY_RHS: + t = build2 (gimple_assign_rhs_code (stmt), + TREE_TYPE (gimple_assign_lhs (stmt)), + gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt)); + break; + case GIMPLE_UNARY_RHS: + t = build1 (gimple_assign_rhs_code (stmt), + TREE_TYPE (gimple_assign_lhs (stmt)), + gimple_assign_rhs1 (stmt)); + break; + case GIMPLE_SINGLE_RHS: + { + t = gimple_assign_rhs1 (stmt); + /* Avoid modifying this tree in place below. */ + if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t) + && gimple_location (stmt) != EXPR_LOCATION (t)) + || (gimple_block (stmt) && currently_expanding_to_rtl + && EXPR_P (t))) + t = copy_node (t); + break; + } + default: + gcc_unreachable (); } - else - gcc_unreachable (); if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)) SET_EXPR_LOCATION (t, gimple_location (stmt)); @@ -308,8 +307,9 @@ /* This structure holds data relevant to one variable that will be placed in a stack slot. */ -struct stack_var +class stack_var { +public: /* The Variable. */ tree decl; @@ -334,7 +334,7 @@ #define EOC ((size_t)-1) /* We have an array of such objects while deciding allocation. */ -static struct stack_var *stack_vars; +static class stack_var *stack_vars; static size_t stack_vars_alloc; static size_t stack_vars_num; static hash_map<tree, size_t> *decl_to_stack_part; @@ -364,7 +364,7 @@ we can't do with expected alignment of the stack boundary. */ static unsigned int -align_local_variable (tree decl) +align_local_variable (tree decl, bool really_expand) { unsigned int align; @@ -373,7 +373,12 @@ else { align = LOCAL_DECL_ALIGNMENT (decl); - SET_DECL_ALIGN (decl, align); + /* Don't change DECL_ALIGN when called from estimated_stack_frame_size. + That is done before IPA and could bump alignment based on host + backend even for offloaded code which wants different + LOCAL_DECL_ALIGNMENT. */ + if (really_expand) + SET_DECL_ALIGN (decl, align); } return align / BITS_PER_UNIT; } @@ -421,9 +426,9 @@ /* Accumulate DECL into STACK_VARS. */ static void -add_stack_var (tree decl) +add_stack_var (tree decl, bool really_expand) { - struct stack_var *v; + class stack_var *v; if (stack_vars_num >= stack_vars_alloc) { @@ -432,7 +437,7 @@ else stack_vars_alloc = 32; stack_vars - = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc); + = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc); } if (!decl_to_stack_part) decl_to_stack_part = new hash_map<tree, size_t>; @@ -449,7 +454,7 @@ variables that are simultaneously live. */ if (known_eq (v->size, 0U)) v->size = 1; - v->alignb = align_local_variable (decl); + v->alignb = align_local_variable (decl, really_expand); /* An alignment of zero can mightily confuse us later. */ gcc_assert (v->alignb != 0); @@ -471,8 +476,10 @@ static void add_stack_var_conflict (size_t x, size_t y) { - struct stack_var *a = &stack_vars[x]; - struct stack_var *b = &stack_vars[y]; + class stack_var *a = &stack_vars[x]; + class stack_var *b = &stack_vars[y]; + if (x == y) + return; if (!a->conflicts) a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack); if (!b->conflicts) @@ -486,8 +493,8 @@ static bool stack_var_conflict_p (size_t x, size_t y) { - struct stack_var *a = &stack_vars[x]; - struct stack_var *b = &stack_vars[y]; + class stack_var *a = &stack_vars[x]; + class stack_var *b = &stack_vars[y]; if (x == y) return false; /* Partitions containing an SSA name result from gimple registers @@ -602,7 +609,7 @@ unsigned i; EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi) { - struct stack_var *a = &stack_vars[i]; + class stack_var *a = &stack_vars[i]; if (!a->conflicts) a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack); bitmap_ior_into (a->conflicts, work); @@ -848,7 +855,7 @@ static void union_stack_vars (size_t a, size_t b) { - struct stack_var *vb = &stack_vars[b]; + class stack_var *vb = &stack_vars[b]; bitmap_iterator bi; unsigned u; @@ -858,6 +865,9 @@ stack_vars[b].representative = a; stack_vars[a].next = b; + /* Make sure A is big enough to hold B. */ + stack_vars[a].size = upper_bound (stack_vars[a].size, stack_vars[b].size); + /* Update the required alignment of partition A to account for B. */ if (stack_vars[a].alignb < stack_vars[b].alignb) stack_vars[a].alignb = stack_vars[b].alignb; @@ -1017,8 +1027,9 @@ set_rtl (decl, x); } -struct stack_vars_data +class stack_vars_data { +public: /* Vector of offset pairs, always end of some padding followed by start of the padding that needs Address Sanitizer protection. The vector is in reversed, highest offset pairs come first. */ @@ -1039,7 +1050,7 @@ with that location. */ static void -expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data) +expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data) { size_t si, i, j, n = stack_vars_num; poly_uint64 large_size = 0, large_alloc = 0; @@ -1127,14 +1138,23 @@ && frame_offset.is_constant (&prev_offset) && stack_vars[i].size.is_constant ()) { + if (data->asan_vec.is_empty ()) + { + alloc_stack_frame_space (0, ASAN_RED_ZONE_SIZE); + prev_offset = frame_offset.to_constant (); + } prev_offset = align_base (prev_offset, - MAX (alignb, ASAN_RED_ZONE_SIZE), + ASAN_MIN_RED_ZONE_SIZE, !FRAME_GROWS_DOWNWARD); tree repr_decl = NULL_TREE; - offset - = alloc_stack_frame_space (stack_vars[i].size - + ASAN_RED_ZONE_SIZE, - MAX (alignb, ASAN_RED_ZONE_SIZE)); + unsigned HOST_WIDE_INT size + = asan_var_and_redzone_size (stack_vars[i].size.to_constant ()); + if (data->asan_vec.is_empty ()) + size = MAX (size, ASAN_RED_ZONE_SIZE); + + unsigned HOST_WIDE_INT alignment = MAX (alignb, + ASAN_MIN_RED_ZONE_SIZE); + offset = alloc_stack_frame_space (size, alignment); data->asan_vec.safe_push (prev_offset); /* Allocating a constant amount of space from a constant @@ -1315,7 +1335,7 @@ else { size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var)); - byte_align = align_local_variable (var); + byte_align = align_local_variable (var, true); } /* We handle highly aligned variables in expand_stack_vars. */ @@ -1405,7 +1425,7 @@ if (!use_register_for_decl (var)) { if (defer_stack_allocation (var, true)) - add_stack_var (var); + add_stack_var (var, true); else expand_one_stack_var_1 (var); return; @@ -1533,7 +1553,7 @@ bool smallish = (poly_int_tree_p (size_unit, &size) && (estimated_poly_value (size) - < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING))); + < param_min_size_for_stack_sharing)); /* If stack protection is enabled, *all* stack variables must be deferred, so that we can re-order the strings to the top of the frame. @@ -1687,14 +1707,14 @@ } } else if (defer_stack_allocation (var, toplevel)) - add_stack_var (origvar); + add_stack_var (origvar, really_expand); else { if (really_expand) { if (lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl))) - error ("cannot allocate stack for variable %q+D, naked function.", + error ("cannot allocate stack for variable %q+D, naked function", var); expand_one_stack_var (origvar); @@ -1773,7 +1793,7 @@ || t == signed_char_type_node || t == unsigned_char_type_node) { - unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE); + unsigned HOST_WIDE_INT max = param_ssp_buffer_size; unsigned HOST_WIDE_INT len; if (!TYPE_SIZE_UNIT (type) @@ -1874,17 +1894,23 @@ } /* Ensure that variables in different stack protection phases conflict - so that they are not merged and share the same stack slot. */ - -static void + so that they are not merged and share the same stack slot. + Return true if there are any address taken variables. */ + +static bool add_stack_protection_conflicts (void) { size_t i, j, n = stack_vars_num; unsigned char *phase; + bool ret = false; phase = XNEWVEC (unsigned char, n); for (i = 0; i < n; ++i) - phase[i] = stack_protect_decl_phase (stack_vars[i].decl); + { + phase[i] = stack_protect_decl_phase (stack_vars[i].decl); + if (TREE_ADDRESSABLE (stack_vars[i].decl)) + ret = true; + } for (i = 0; i < n; ++i) { @@ -1895,6 +1921,7 @@ } XDELETEVEC (phase); + return ret; } /* Create a decl for the guard at the top of the stack frame. */ @@ -1979,50 +2006,6 @@ return estimated_poly_value (size); } -/* Helper routine to check if a record or union contains an array field. */ - -static int -record_or_union_type_has_array_p (const_tree tree_type) -{ - tree fields = TYPE_FIELDS (tree_type); - tree f; - - for (f = fields; f; f = DECL_CHAIN (f)) - if (TREE_CODE (f) == FIELD_DECL) - { - tree field_type = TREE_TYPE (f); - if (RECORD_OR_UNION_TYPE_P (field_type) - && record_or_union_type_has_array_p (field_type)) - return 1; - if (TREE_CODE (field_type) == ARRAY_TYPE) - return 1; - } - return 0; -} - -/* Check if the current function has local referenced variables that - have their addresses taken, contain an array, or are arrays. */ - -static bool -stack_protect_decl_p () -{ - unsigned i; - tree var; - - FOR_EACH_LOCAL_DECL (cfun, i, var) - if (!is_global_var (var)) - { - tree var_type = TREE_TYPE (var); - if (VAR_P (var) - && (TREE_CODE (var_type) == ARRAY_TYPE - || TREE_ADDRESSABLE (var) - || (RECORD_OR_UNION_TYPE_P (var_type) - && record_or_union_type_has_array_p (var_type)))) - return true; - } - return false; -} - /* Check if the current function has calls that use a return slot. */ static bool @@ -2089,8 +2072,7 @@ } if (flag_stack_protect == SPCT_FLAG_STRONG) - gen_stack_protect_signal - = stack_protect_decl_p () || stack_protect_return_slot_p (); + gen_stack_protect_signal = stack_protect_return_slot_p (); /* At this point all variables on the local_decls with TREE_USED set are not associated with any block scope. Lay them out. */ @@ -2166,6 +2148,8 @@ if (stack_vars_num > 0) { + bool has_addressable_vars = false; + add_scope_conflicts (); /* If stack protection is enabled, we don't share space between @@ -2175,7 +2159,10 @@ || (flag_stack_protect == SPCT_FLAG_EXPLICIT && lookup_attribute ("stack_protect", DECL_ATTRIBUTES (current_function_decl))))) - add_stack_protection_conflicts (); + has_addressable_vars = add_stack_protection_conflicts (); + + if (flag_stack_protect == SPCT_FLAG_STRONG && has_addressable_vars) + gen_stack_protect_signal = true; /* Now that we have collected all stack variables, and have computed a minimal interference graph, attempt to save some stack space. */ @@ -2192,14 +2179,16 @@ case SPCT_FLAG_STRONG: if (gen_stack_protect_signal - || cfun->calls_alloca || has_protected_decls + || cfun->calls_alloca + || has_protected_decls || lookup_attribute ("stack_protect", DECL_ATTRIBUTES (current_function_decl))) create_stack_guard (); break; case SPCT_FLAG_DEFAULT: - if (cfun->calls_alloca || has_protected_decls + if (cfun->calls_alloca + || has_protected_decls || lookup_attribute ("stack_protect", DECL_ATTRIBUTES (current_function_decl))) create_stack_guard (); @@ -2210,14 +2199,15 @@ DECL_ATTRIBUTES (current_function_decl))) create_stack_guard (); break; + default: - ; + break; } /* Assign rtl to each variable based on these partitions. */ if (stack_vars_num > 0) { - struct stack_vars_data data; + class stack_vars_data data; data.asan_base = NULL_RTX; data.asan_alignb = 0; @@ -2835,7 +2825,8 @@ if (overlap) { - error ("asm-specifier for variable %qE conflicts with asm clobber list", + error ("%<asm%> specifier for variable %qE conflicts with " + "%<asm%> clobber list", DECL_NAME (overlap)); /* Reset registerness to stop multiple errors emitted for a single @@ -2847,6 +2838,51 @@ return false; } +/* Check that the given REGNO spanning NREGS is a valid + asm clobber operand. Some HW registers cannot be + saved/restored, hence they should not be clobbered by + asm statements. */ +static bool +asm_clobber_reg_is_valid (int regno, int nregs, const char *regname) +{ + bool is_valid = true; + HARD_REG_SET regset; + + CLEAR_HARD_REG_SET (regset); + + add_range_to_hard_reg_set (®set, regno, nregs); + + /* Clobbering the PIC register is an error. */ + if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM + && overlaps_hard_reg_set_p (regset, Pmode, PIC_OFFSET_TABLE_REGNUM)) + { + /* ??? Diagnose during gimplification? */ + error ("PIC register clobbered by %qs in %<asm%>", regname); + is_valid = false; + } + else if (!in_hard_reg_set_p + (accessible_reg_set, reg_raw_mode[regno], regno)) + { + /* ??? Diagnose during gimplification? */ + error ("the register %qs cannot be clobbered in %<asm%>" + " for the current target", regname); + is_valid = false; + } + + /* Clobbering the stack pointer register is deprecated. GCC expects + the value of the stack pointer after an asm statement to be the same + as it was before, so no asm can validly clobber the stack pointer in + the usual sense. Adding the stack pointer to the clobber list has + traditionally had some undocumented and somewhat obscure side-effects. */ + if (overlaps_hard_reg_set_p (regset, Pmode, STACK_POINTER_REGNUM) + && warning (OPT_Wdeprecated, "listing the stack pointer register" + " %qs in a clobber list is deprecated", regname)) + inform (input_location, "the value of the stack pointer after an %<asm%>" + " statement must be the same as it was before the statement"); + + return is_valid; +} + /* Generate RTL for an asm statement with arguments. STRING is the instruction template. OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs. @@ -2979,14 +3015,8 @@ else for (int reg = j; reg < j + nregs; reg++) { - /* Clobbering the PIC register is an error. */ - if (reg == (int) PIC_OFFSET_TABLE_REGNUM) - { - /* ??? Diagnose during gimplification? */ - error ("PIC register clobbered by %qs in %<asm%>", - regname); - return; - } + if (!asm_clobber_reg_is_valid (reg, nregs, regname)) + return; SET_HARD_REG_BIT (clobbered_regs, reg); rtx x = gen_rtx_REG (reg_raw_mode[reg], reg); @@ -2994,7 +3024,6 @@ } } } - unsigned nclobbers = clobber_rvec.length(); /* First pass over inputs and outputs checks validity and sets mark_addressable if needed. */ @@ -3016,6 +3045,55 @@ &allows_mem, &allows_reg, &is_inout)) return; + /* If the output is a hard register, verify it doesn't conflict with + any other operand's possible hard register use. */ + if (DECL_P (val) + && REG_P (DECL_RTL (val)) + && HARD_REGISTER_P (DECL_RTL (val))) + { + unsigned j, output_hregno = REGNO (DECL_RTL (val)); + bool early_clobber_p = strchr (constraints[i], '&') != NULL; + unsigned long match; + + /* Verify the other outputs do not use the same hard register. */ + for (j = i + 1; j < noutputs; ++j) + if (DECL_P (output_tvec[j]) + && REG_P (DECL_RTL (output_tvec[j])) + && HARD_REGISTER_P (DECL_RTL (output_tvec[j])) + && output_hregno == REGNO (DECL_RTL (output_tvec[j]))) + error ("invalid hard register usage between output operands"); + + /* Verify matching constraint operands use the same hard register + and that the non-matching constraint operands do not use the same + hard register if the output is an early clobber operand. */ + for (j = 0; j < ninputs; ++j) + if (DECL_P (input_tvec[j]) + && REG_P (DECL_RTL (input_tvec[j])) + && HARD_REGISTER_P (DECL_RTL (input_tvec[j]))) + { + unsigned input_hregno = REGNO (DECL_RTL (input_tvec[j])); + switch (*constraints[j + noutputs]) + { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + match = strtoul (constraints[j + noutputs], NULL, 10); + break; + default: + match = ULONG_MAX; + break; + } + if (i == match + && output_hregno != input_hregno) + error ("invalid hard register usage between output operand " + "and matching constraint operand"); + else if (early_clobber_p + && i != match + && output_hregno == input_hregno) + error ("invalid hard register usage between earlyclobber " + "operand and input operand"); + } + } + if (! allows_reg && (allows_mem || is_inout @@ -3169,7 +3247,8 @@ if (allows_reg && TYPE_MODE (type) != BLKmode) op = force_reg (TYPE_MODE (type), op); else if (!allows_mem) - warning (0, "asm operand %d probably doesn%'t match constraints", + warning (0, "%<asm%> operand %d probably does not match " + "constraints", i + noutputs); else if (MEM_P (op)) { @@ -3217,7 +3296,7 @@ gcc_assert (constraints.length() == noutputs + ninputs); /* But it certainly can adjust the clobbers. */ - nclobbers = clobber_rvec.length(); + unsigned nclobbers = clobber_rvec.length (); /* Third pass checks for easy conflicts. */ /* ??? Why are we doing this on trees instead of rtx. */ @@ -3352,11 +3431,13 @@ tripping over the under-construction body. */ for (unsigned k = 0; k < noutputs; ++k) if (reg_overlap_mentioned_p (clobbered_reg, output_rvec[k])) - internal_error ("asm clobber conflict with output operand"); + internal_error ("%<asm%> clobber conflict with " + "output operand"); for (unsigned k = 0; k < ninputs - ninout; ++k) if (reg_overlap_mentioned_p (clobbered_reg, input_rvec[k])) - internal_error ("asm clobber conflict with input operand"); + internal_error ("%<asm%> clobber conflict with " + "input operand"); } XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg); @@ -3611,6 +3692,12 @@ { op0 = gimple_return_retval (as_a <greturn *> (stmt)); + /* If a return doesn't have a location, it very likely represents + multiple user returns so we cannot let it inherit the location + of the last statement of the previous basic block in RTL. */ + if (!gimple_has_location (stmt)) + set_curr_insn_location (cfun->function_end_locus); + if (op0 && op0 != error_mark_node) { tree result = DECL_RESULT (current_function_decl); @@ -3782,7 +3869,6 @@ /* If we want exceptions for non-call insns, any may_trap_p instruction may throw. */ && GET_CODE (PATTERN (insn)) != CLOBBER - && GET_CODE (PATTERN (insn)) != CLOBBER_HIGH && GET_CODE (PATTERN (insn)) != USE && insn_could_throw_p (insn)) make_reg_eh_region_note (insn, 0, lp_nr); @@ -4285,7 +4371,11 @@ op0 = DECL_RTL_IF_SET (exp); /* This decl was probably optimized away. */ - if (!op0) + if (!op0 + /* At least label RTXen are sometimes replaced by + NOTE_INSN_DELETED_LABEL. Any notes here are not + handled by copy_rtx. */ + || NOTE_P (op0)) { if (!VAR_P (exp) || DECL_EXTERNAL (exp) @@ -5079,6 +5169,7 @@ case VEC_PERM_EXPR: case VEC_DUPLICATE_EXPR: case VEC_SERIES_EXPR: + case SAD_EXPR: return NULL; /* Misc codes. */ @@ -5891,11 +5982,11 @@ { first_block = e->dest; redirect_edge_succ (e, init_block); - e = make_single_succ_edge (init_block, first_block, flags); + make_single_succ_edge (init_block, first_block, flags); } else - e = make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun), - EDGE_FALLTHRU); + make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun), + EDGE_FALLTHRU); update_bb_for_insn (init_block); return init_block; @@ -6022,6 +6113,21 @@ *walk_subtrees = 0; } + /* References of size POLY_INT_CST to a fixed-size object must go + through memory. It's more efficient to force that here than + to create temporary slots on the fly. */ + else if ((TREE_CODE (t) == MEM_REF || TREE_CODE (t) == TARGET_MEM_REF) + && TYPE_SIZE (TREE_TYPE (t)) + && POLY_INT_CST_P (TYPE_SIZE (TREE_TYPE (t)))) + { + tree base = get_base_address (t); + if (base + && DECL_P (base) + && DECL_MODE (base) != BLKmode + && GET_MODE_SIZE (DECL_MODE (base)).is_constant ()) + TREE_ADDRESSABLE (base) = 1; + *walk_subtrees = 0; + } return NULL_TREE; } @@ -6042,7 +6148,24 @@ { gimple *stmt = gsi_stmt (gsi); if (!is_gimple_debug (stmt)) - walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL); + { + walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL); + gcall *call = dyn_cast <gcall *> (stmt); + if (call && gimple_call_internal_p (call)) + switch (gimple_call_internal_fn (call)) + { + case IFN_LOAD_LANES: + /* The source must be a MEM. */ + mark_addressable (gimple_call_arg (call, 0)); + break; + case IFN_STORE_LANES: + /* The destination must be a MEM. */ + mark_addressable (gimple_call_lhs (call)); + break; + default: + break; + } + } } } @@ -6141,7 +6264,25 @@ tree guard_decl = targetm.stack_protect_guard (); rtx x, y; + crtl->stack_protect_guard_decl = guard_decl; x = expand_normal (crtl->stack_protect_guard); + + if (targetm.have_stack_protect_combined_set () && guard_decl) + { + gcc_assert (DECL_P (guard_decl)); + y = DECL_RTL (guard_decl); + + /* Allow the target to compute address of Y and copy it to X without + leaking Y into a register. This combined address + copy pattern + allows the target to prevent spilling of any intermediate results by + splitting it after register allocator. */ + if (rtx_insn *insn = targetm.gen_stack_protect_combined_set (x, y)) + { + emit_insn (insn); + return; + } + } + if (guard_decl) y = expand_normal (guard_decl); else @@ -6222,6 +6363,9 @@ avoid_deep_ter_for_debug (gsi_stmt (gsi), 0); } + /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */ + discover_nonconstant_array_refs (); + /* Make sure all values used by the optimization passes have sane defaults. */ reg_renumber = 0; @@ -6256,9 +6400,6 @@ Also, final expects a note to appear there. */ emit_note (NOTE_INSN_DELETED); - /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */ - discover_nonconstant_array_refs (); - targetm.expand_to_rtl_hook (); crtl->init_stack_alignment (); fun->cfg->max_jumptable_ents = 0; @@ -6288,7 +6429,7 @@ warning (OPT_Wstack_protector, "stack protector not protecting function: " "all local arrays are less than %d bytes long", - (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE)); + (int) param_ssp_buffer_size); } /* Set up parameters and prepare for return, for the function. */ @@ -6398,7 +6539,7 @@ /* If the function has too many markers, drop them while expanding. */ if (cfun->debug_marker_count - >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT)) + >= param_max_debug_marker_count) cfun->debug_nonbind_markers = false; lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>; @@ -6453,37 +6594,27 @@ split edges which edge insertions might do. */ rebuild_jump_labels (get_insns ()); - FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun), - EXIT_BLOCK_PTR_FOR_FN (fun), next_bb) + /* If we have a single successor to the entry block, put the pending insns + after parm birth, but before NOTE_INSNS_FUNCTION_BEG. */ + if (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun))) { - edge e; - edge_iterator ei; - for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fun)); + if (e->insns.r) { - if (e->insns.r) - { - rebuild_jump_labels_chain (e->insns.r); - /* Put insns after parm birth, but before - NOTE_INSNS_FUNCTION_BEG. */ - if (e->src == ENTRY_BLOCK_PTR_FOR_FN (fun) - && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun))) - { - rtx_insn *insns = e->insns.r; - e->insns.r = NULL; - if (NOTE_P (parm_birth_insn) - && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG) - emit_insn_before_noloc (insns, parm_birth_insn, e->dest); - else - emit_insn_after_noloc (insns, parm_birth_insn, e->dest); - } - else - commit_one_edge_insertion (e); - } + rtx_insn *insns = e->insns.r; + e->insns.r = NULL; + rebuild_jump_labels_chain (insns); + if (NOTE_P (parm_birth_insn) + && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG) + emit_insn_before_noloc (insns, parm_birth_insn, e->dest); else - ei_next (&ei); + emit_insn_after_noloc (insns, parm_birth_insn, e->dest); } } + /* Otherwise, as well as for other edges, take the usual way. */ + commit_edge_insertions (); + /* We're done expanding trees to RTL. */ currently_expanding_to_rtl = 0; @@ -6516,6 +6647,14 @@ find_many_sub_basic_blocks (blocks); purge_all_dead_edges (); + /* After initial rtl generation, call back to finish generating + exception support code. We need to do this before cleaning up + the CFG as the code does not expect dead landing pads. */ + if (fun->eh->region_tree != NULL) + finish_eh_generation (); + + /* Call expand_stack_alignment after finishing all + updates to crtl->preferred_stack_boundary. */ expand_stack_alignment (); /* Fixup REG_EQUIV notes in the prologue if there are tailcalls in this @@ -6523,12 +6662,6 @@ if (crtl->tail_call_emit) fixup_tail_calls (); - /* After initial rtl generation, call back to finish generating - exception support code. We need to do this before cleaning up - the CFG as the code does not expect dead landing pads. */ - if (fun->eh->region_tree != NULL) - finish_eh_generation (); - /* BB subdivision may have created basic blocks that are are only reachable from unlikely bbs but not marked as such in the profile. */ if (optimize)