comparison gcc/tree-ssa-threadupdate.c @ 145:1830386684a0

gcc-9.2.0
author anatofuz
date Thu, 13 Feb 2020 11:34:05 +0900
parents 84e7813d76e9
children
comparison
equal deleted inserted replaced
131:84e7813d76e9 145:1830386684a0
1 /* Thread edges through blocks and update the control flow and SSA graphs. 1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc. 2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
3 3
4 This file is part of GCC. 4 This file is part of GCC.
5 5
6 GCC is free software; you can redistribute it and/or modify 6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
233 233
234 The second duplicate block in a path is specific to that path. Creating 234 The second duplicate block in a path is specific to that path. Creating
235 and sharing a template for that block is considerably more difficult. */ 235 and sharing a template for that block is considerably more difficult. */
236 basic_block template_block; 236 basic_block template_block;
237 237
238 /* If we append debug stmts to the template block after creating it,
239 this iterator won't be the last one in the block, and further
240 copies of the template block shouldn't get debug stmts after
241 it. */
242 gimple_stmt_iterator template_last_to_copy;
243
238 /* Blocks duplicated for the thread. */ 244 /* Blocks duplicated for the thread. */
239 bitmap duplicate_blocks; 245 bitmap duplicate_blocks;
240 246
241 /* TRUE if we thread one or more jumps, FALSE otherwise. */ 247 /* TRUE if we thread one or more jumps, FALSE otherwise. */
242 bool jumps_threaded; 248 bool jumps_threaded;
334 /* We can use the generic block duplication code and simply remove 340 /* We can use the generic block duplication code and simply remove
335 the stuff we do not need. */ 341 the stuff we do not need. */
336 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL); 342 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
337 343
338 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs) 344 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
339 e->aux = NULL; 345 {
346 e->aux = NULL;
347
348 /* If we duplicate a block with an outgoing edge marked as
349 EDGE_IGNORE, we must clear EDGE_IGNORE so that it doesn't
350 leak out of the current pass.
351
352 It would be better to simplify switch statements and remove
353 the edges before we get here, but the sequencing is nontrivial. */
354 e->flags &= ~EDGE_IGNORE;
355 }
340 356
341 /* Zero out the profile, since the block is unreachable for now. */ 357 /* Zero out the profile, since the block is unreachable for now. */
342 rd->dup_blocks[count]->count = profile_count::uninitialized (); 358 rd->dup_blocks[count]->count = profile_count::uninitialized ();
343 if (duplicate_blocks) 359 if (duplicate_blocks)
344 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index); 360 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
429 gsi_next (&gsi), gsi_next (&gsi2)) 445 gsi_next (&gsi), gsi_next (&gsi2))
430 { 446 {
431 gphi *src_phi = gsi.phi (); 447 gphi *src_phi = gsi.phi ();
432 gphi *dest_phi = gsi2.phi (); 448 gphi *dest_phi = gsi2.phi ();
433 tree val = gimple_phi_arg_def (src_phi, src_idx); 449 tree val = gimple_phi_arg_def (src_phi, src_idx);
434 source_location locus = gimple_phi_arg_location (src_phi, src_idx); 450 location_t locus = gimple_phi_arg_location (src_phi, src_idx);
435 451
436 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val); 452 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
437 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus); 453 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
438 } 454 }
439 } 455 }
443 LOCUS to location of the constant phi arg and return the value. 459 LOCUS to location of the constant phi arg and return the value.
444 Return DEF directly if either PATH or idx is ZERO. */ 460 Return DEF directly if either PATH or idx is ZERO. */
445 461
446 static tree 462 static tree
447 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path, 463 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
448 basic_block bb, int idx, source_location *locus) 464 basic_block bb, int idx, location_t *locus)
449 { 465 {
450 tree arg; 466 tree arg;
451 gphi *def_phi; 467 gphi *def_phi;
452 basic_block def_bb; 468 basic_block def_bb;
453 469
497 513
498 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 514 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
499 { 515 {
500 gphi *phi = gsi.phi (); 516 gphi *phi = gsi.phi ();
501 tree def = gimple_phi_arg_def (phi, src_indx); 517 tree def = gimple_phi_arg_def (phi, src_indx);
502 source_location locus = gimple_phi_arg_location (phi, src_indx); 518 location_t locus = gimple_phi_arg_location (phi, src_indx);
503 519
504 if (TREE_CODE (def) == SSA_NAME 520 if (TREE_CODE (def) == SSA_NAME
505 && !virtual_operand_p (gimple_phi_result (phi))) 521 && !virtual_operand_p (gimple_phi_result (phi)))
506 def = get_value_locus_in_path (def, path, bb, idx, &locus); 522 def = get_value_locus_in_path (def, path, bb, idx, &locus);
507 523
1112 if (local_info->template_block == NULL) 1128 if (local_info->template_block == NULL)
1113 { 1129 {
1114 create_block_for_threading ((*path)[1]->e->src, rd, 0, 1130 create_block_for_threading ((*path)[1]->e->src, rd, 0,
1115 &local_info->duplicate_blocks); 1131 &local_info->duplicate_blocks);
1116 local_info->template_block = rd->dup_blocks[0]; 1132 local_info->template_block = rd->dup_blocks[0];
1133 local_info->template_last_to_copy
1134 = gsi_last_bb (local_info->template_block);
1117 1135
1118 /* We do not create any outgoing edges for the template. We will 1136 /* We do not create any outgoing edges for the template. We will
1119 take care of that in a later traversal. That way we do not 1137 take care of that in a later traversal. That way we do not
1120 create edges that are going to just be deleted. */ 1138 create edges that are going to just be deleted. */
1121 } 1139 }
1122 else 1140 else
1123 { 1141 {
1142 gimple_seq seq = NULL;
1143 if (gsi_stmt (local_info->template_last_to_copy)
1144 != gsi_stmt (gsi_last_bb (local_info->template_block)))
1145 {
1146 if (gsi_end_p (local_info->template_last_to_copy))
1147 {
1148 seq = bb_seq (local_info->template_block);
1149 set_bb_seq (local_info->template_block, NULL);
1150 }
1151 else
1152 seq = gsi_split_seq_after (local_info->template_last_to_copy);
1153 }
1124 create_block_for_threading (local_info->template_block, rd, 0, 1154 create_block_for_threading (local_info->template_block, rd, 0,
1125 &local_info->duplicate_blocks); 1155 &local_info->duplicate_blocks);
1156 if (seq)
1157 {
1158 if (gsi_end_p (local_info->template_last_to_copy))
1159 set_bb_seq (local_info->template_block, seq);
1160 else
1161 gsi_insert_seq_after (&local_info->template_last_to_copy,
1162 seq, GSI_SAME_STMT);
1163 }
1126 1164
1127 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate 1165 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1128 block. */ 1166 block. */
1129 ssa_fix_duplicate_block_edges (rd, local_info); 1167 ssa_fix_duplicate_block_edges (rd, local_info);
1168 }
1169
1170 if (MAY_HAVE_DEBUG_STMTS)
1171 {
1172 /* Copy debug stmts from each NO_COPY src block to the block
1173 that would have been its predecessor, if we can append to it
1174 (we can't add stmts after a block-ending stmt), or prepending
1175 to the duplicate of the successor, if there is one. If
1176 there's no duplicate successor, we'll mostly drop the blocks
1177 on the floor; propagate_threaded_block_debug_into, called
1178 elsewhere, will consolidate and preserve the effects of the
1179 binds, but none of the markers. */
1180 gimple_stmt_iterator copy_to = gsi_last_bb (rd->dup_blocks[0]);
1181 if (!gsi_end_p (copy_to))
1182 {
1183 if (stmt_ends_bb_p (gsi_stmt (copy_to)))
1184 {
1185 if (rd->dup_blocks[1])
1186 copy_to = gsi_after_labels (rd->dup_blocks[1]);
1187 else
1188 copy_to = gsi_none ();
1189 }
1190 else
1191 gsi_next (&copy_to);
1192 }
1193 for (unsigned int i = 2, j = 0; i < path->length (); i++)
1194 if ((*path)[i]->type == EDGE_NO_COPY_SRC_BLOCK
1195 && gsi_bb (copy_to))
1196 {
1197 for (gimple_stmt_iterator gsi = gsi_start_bb ((*path)[i]->e->src);
1198 !gsi_end_p (gsi); gsi_next (&gsi))
1199 {
1200 if (!is_gimple_debug (gsi_stmt (gsi)))
1201 continue;
1202 gimple *stmt = gsi_stmt (gsi);
1203 gimple *copy = gimple_copy (stmt);
1204 gsi_insert_before (&copy_to, copy, GSI_SAME_STMT);
1205 }
1206 }
1207 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1208 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1209 {
1210 j++;
1211 gcc_assert (j < 2);
1212 copy_to = gsi_last_bb (rd->dup_blocks[j]);
1213 if (!gsi_end_p (copy_to))
1214 {
1215 if (stmt_ends_bb_p (gsi_stmt (copy_to)))
1216 copy_to = gsi_none ();
1217 else
1218 gsi_next (&copy_to);
1219 }
1220 }
1130 } 1221 }
1131 1222
1132 /* Keep walking the hash table. */ 1223 /* Keep walking the hash table. */
1133 return 1; 1224 return 1;
1134 } 1225 }
1463 1554
1464 /* Evaluates the dominance relationship of latch of the LOOP and BB, and 1555 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1465 returns the state. */ 1556 returns the state. */
1466 1557
1467 enum bb_dom_status 1558 enum bb_dom_status
1468 determine_bb_domination_status (struct loop *loop, basic_block bb) 1559 determine_bb_domination_status (class loop *loop, basic_block bb)
1469 { 1560 {
1470 basic_block *bblocks; 1561 basic_block *bblocks;
1471 unsigned nblocks, i; 1562 unsigned nblocks, i;
1472 bool bb_reachable = false; 1563 bool bb_reachable = false;
1473 edge_iterator ei; 1564 edge_iterator ei;
1521 /* Thread jumps through the header of LOOP. Returns true if cfg changes. 1612 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1522 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges 1613 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1523 to the inside of the loop. */ 1614 to the inside of the loop. */
1524 1615
1525 static bool 1616 static bool
1526 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers) 1617 thread_through_loop_header (class loop *loop, bool may_peel_loop_headers)
1527 { 1618 {
1528 basic_block header = loop->header; 1619 basic_block header = loop->header;
1529 edge e, tgt_edge, latch = loop_latch_edge (loop); 1620 edge e, tgt_edge, latch = loop_latch_edge (loop);
1530 edge_iterator ei; 1621 edge_iterator ei;
1531 basic_block tgt_bb, atgt_bb; 1622 basic_block tgt_bb, atgt_bb;
2224 static bool 2315 static bool
2225 duplicate_thread_path (edge entry, edge exit, basic_block *region, 2316 duplicate_thread_path (edge entry, edge exit, basic_block *region,
2226 unsigned n_region, unsigned current_path_no) 2317 unsigned n_region, unsigned current_path_no)
2227 { 2318 {
2228 unsigned i; 2319 unsigned i;
2229 struct loop *loop = entry->dest->loop_father; 2320 class loop *loop = entry->dest->loop_father;
2230 edge exit_copy; 2321 edge exit_copy;
2231 edge redirected; 2322 edge redirected;
2232 profile_count curr_count; 2323 profile_count curr_count;
2233 2324
2234 if (!can_copy_bbs_p (region, n_region)) 2325 if (!can_copy_bbs_p (region, n_region))
2424 bool 2515 bool
2425 thread_through_all_blocks (bool may_peel_loop_headers) 2516 thread_through_all_blocks (bool may_peel_loop_headers)
2426 { 2517 {
2427 bool retval = false; 2518 bool retval = false;
2428 unsigned int i; 2519 unsigned int i;
2429 struct loop *loop; 2520 class loop *loop;
2430 auto_bitmap threaded_blocks; 2521 auto_bitmap threaded_blocks;
2431 hash_set<edge> visited_starting_edges; 2522 hash_set<edge> visited_starting_edges;
2432 2523
2433 if (!paths.exists ()) 2524 if (!paths.exists ())
2434 { 2525 {
2538 /* The order in which we process jump threads can be important. 2629 /* The order in which we process jump threads can be important.
2539 2630
2540 Consider if we have two jump threading paths A and B. If the 2631 Consider if we have two jump threading paths A and B. If the
2541 target edge of A is the starting edge of B and we thread path A 2632 target edge of A is the starting edge of B and we thread path A
2542 first, then we create an additional incoming edge into B->dest that 2633 first, then we create an additional incoming edge into B->dest that
2543 we can not discover as a jump threading path on this iteration. 2634 we cannot discover as a jump threading path on this iteration.
2544 2635
2545 If we instead thread B first, then the edge into B->dest will have 2636 If we instead thread B first, then the edge into B->dest will have
2546 already been redirected before we process path A and path A will 2637 already been redirected before we process path A and path A will
2547 natually, with no further work, target the redirected path for B. 2638 natually, with no further work, target the redirected path for B.
2548 2639