Mercurial > hg > CbC > CbC_gcc
comparison gcc/cfgloopanal.c @ 145:1830386684a0
gcc-9.2.0
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 11:34:05 +0900 |
parents | 84e7813d76e9 |
children |
comparison
equal
deleted
inserted
replaced
131:84e7813d76e9 | 145:1830386684a0 |
---|---|
1 /* Natural loop analysis code for GNU compiler. | 1 /* Natural loop analysis code for GNU compiler. |
2 Copyright (C) 2002-2018 Free Software Foundation, Inc. | 2 Copyright (C) 2002-2020 Free Software Foundation, Inc. |
3 | 3 |
4 This file is part of GCC. | 4 This file is part of GCC. |
5 | 5 |
6 GCC is free software; you can redistribute it and/or modify it under | 6 GCC is free software; you can redistribute it and/or modify it under |
7 the terms of the GNU General Public License as published by the Free | 7 the terms of the GNU General Public License as published by the Free |
28 #include "emit-rtl.h" | 28 #include "emit-rtl.h" |
29 #include "cfgloop.h" | 29 #include "cfgloop.h" |
30 #include "explow.h" | 30 #include "explow.h" |
31 #include "expr.h" | 31 #include "expr.h" |
32 #include "graphds.h" | 32 #include "graphds.h" |
33 #include "params.h" | |
34 #include "sreal.h" | 33 #include "sreal.h" |
34 #include "regs.h" | |
35 #include "function-abi.h" | |
35 | 36 |
36 struct target_cfgloop default_target_cfgloop; | 37 struct target_cfgloop default_target_cfgloop; |
37 #if SWITCHABLE_TARGET | 38 #if SWITCHABLE_TARGET |
38 struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop; | 39 struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop; |
39 #endif | 40 #endif |
40 | 41 |
41 /* Checks whether BB is executed exactly once in each LOOP iteration. */ | 42 /* Checks whether BB is executed exactly once in each LOOP iteration. */ |
42 | 43 |
43 bool | 44 bool |
44 just_once_each_iteration_p (const struct loop *loop, const_basic_block bb) | 45 just_once_each_iteration_p (const class loop *loop, const_basic_block bb) |
45 { | 46 { |
46 /* It must be executed at least once each iteration. */ | 47 /* It must be executed at least once each iteration. */ |
47 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) | 48 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) |
48 return false; | 49 return false; |
49 | 50 |
79 edge_iterator ei; | 80 edge_iterator ei; |
80 int src, dest; | 81 int src, dest; |
81 unsigned depth; | 82 unsigned depth; |
82 struct graph *g; | 83 struct graph *g; |
83 int num = number_of_loops (cfun); | 84 int num = number_of_loops (cfun); |
84 struct loop *cloop; | 85 class loop *cloop; |
85 bool irred_loop_found = false; | 86 bool irred_loop_found = false; |
86 int i; | 87 int i; |
87 | 88 |
88 gcc_assert (current_loops != NULL); | 89 gcc_assert (current_loops != NULL); |
89 | 90 |
171 return irred_loop_found; | 172 return irred_loop_found; |
172 } | 173 } |
173 | 174 |
174 /* Counts number of insns inside LOOP. */ | 175 /* Counts number of insns inside LOOP. */ |
175 int | 176 int |
176 num_loop_insns (const struct loop *loop) | 177 num_loop_insns (const class loop *loop) |
177 { | 178 { |
178 basic_block *bbs, bb; | 179 basic_block *bbs, bb; |
179 unsigned i, ninsns = 0; | 180 unsigned i, ninsns = 0; |
180 rtx_insn *insn; | 181 rtx_insn *insn; |
181 | 182 |
195 return ninsns; | 196 return ninsns; |
196 } | 197 } |
197 | 198 |
198 /* Counts number of insns executed on average per iteration LOOP. */ | 199 /* Counts number of insns executed on average per iteration LOOP. */ |
199 int | 200 int |
200 average_num_loop_insns (const struct loop *loop) | 201 average_num_loop_insns (const class loop *loop) |
201 { | 202 { |
202 basic_block *bbs, bb; | 203 basic_block *bbs, bb; |
203 unsigned i, binsns; | 204 unsigned i, binsns; |
204 sreal ninsns; | 205 sreal ninsns; |
205 rtx_insn *insn; | 206 rtx_insn *insn; |
216 binsns++; | 217 binsns++; |
217 | 218 |
218 ninsns += (sreal)binsns * bb->count.to_sreal_scale (loop->header->count); | 219 ninsns += (sreal)binsns * bb->count.to_sreal_scale (loop->header->count); |
219 /* Avoid overflows. */ | 220 /* Avoid overflows. */ |
220 if (ninsns > 1000000) | 221 if (ninsns > 1000000) |
221 return 100000; | 222 { |
223 free (bbs); | |
224 return 1000000; | |
225 } | |
222 } | 226 } |
223 free (bbs); | 227 free (bbs); |
224 | 228 |
225 int64_t ret = ninsns.to_int (); | 229 int64_t ret = ninsns.to_int (); |
226 if (!ret) | 230 if (!ret) |
236 information is not good enough to derive osmething. | 240 information is not good enough to derive osmething. |
237 If BY_PROFILE_ONLY is set, this logic is bypassed and function | 241 If BY_PROFILE_ONLY is set, this logic is bypassed and function |
238 return -1 in those scenarios. */ | 242 return -1 in those scenarios. */ |
239 | 243 |
240 gcov_type | 244 gcov_type |
241 expected_loop_iterations_unbounded (const struct loop *loop, | 245 expected_loop_iterations_unbounded (const class loop *loop, |
242 bool *read_profile_p, | 246 bool *read_profile_p, |
243 bool by_profile_only) | 247 bool by_profile_only) |
244 { | 248 { |
245 edge e; | 249 edge e; |
246 edge_iterator ei; | 250 edge_iterator ei; |
252 /* If we have no profile at all, use AVG_LOOP_NITER. */ | 256 /* If we have no profile at all, use AVG_LOOP_NITER. */ |
253 if (profile_status_for_fn (cfun) == PROFILE_ABSENT) | 257 if (profile_status_for_fn (cfun) == PROFILE_ABSENT) |
254 { | 258 { |
255 if (by_profile_only) | 259 if (by_profile_only) |
256 return -1; | 260 return -1; |
257 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER); | 261 expected = param_avg_loop_niter; |
258 } | 262 } |
259 else if (loop->latch && (loop->latch->count.initialized_p () | 263 else if (loop->latch && (loop->latch->count.initialized_p () |
260 || loop->header->count.initialized_p ())) | 264 || loop->header->count.initialized_p ())) |
261 { | 265 { |
262 profile_count count_in = profile_count::zero (), | 266 profile_count count_in = profile_count::zero (), |
270 | 274 |
271 if (!count_latch.initialized_p ()) | 275 if (!count_latch.initialized_p ()) |
272 { | 276 { |
273 if (by_profile_only) | 277 if (by_profile_only) |
274 return -1; | 278 return -1; |
275 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER); | 279 expected = param_avg_loop_niter; |
276 } | 280 } |
277 else if (!count_in.nonzero_p ()) | 281 else if (!count_in.nonzero_p ()) |
278 { | 282 { |
279 if (by_profile_only) | 283 if (by_profile_only) |
280 return -1; | 284 return -1; |
291 } | 295 } |
292 else | 296 else |
293 { | 297 { |
294 if (by_profile_only) | 298 if (by_profile_only) |
295 return -1; | 299 return -1; |
296 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER); | 300 expected = param_avg_loop_niter; |
297 } | 301 } |
298 | 302 |
299 if (!by_profile_only) | 303 if (!by_profile_only) |
300 { | 304 { |
301 HOST_WIDE_INT max = get_max_loop_iterations_int (loop); | 305 HOST_WIDE_INT max = get_max_loop_iterations_int (loop); |
308 | 312 |
309 /* Returns expected number of LOOP iterations. The returned value is bounded | 313 /* Returns expected number of LOOP iterations. The returned value is bounded |
310 by REG_BR_PROB_BASE. */ | 314 by REG_BR_PROB_BASE. */ |
311 | 315 |
312 unsigned | 316 unsigned |
313 expected_loop_iterations (struct loop *loop) | 317 expected_loop_iterations (class loop *loop) |
314 { | 318 { |
315 gcov_type expected = expected_loop_iterations_unbounded (loop); | 319 gcov_type expected = expected_loop_iterations_unbounded (loop); |
316 return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected); | 320 return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected); |
317 } | 321 } |
318 | 322 |
319 /* Returns the maximum level of nesting of subloops of LOOP. */ | 323 /* Returns the maximum level of nesting of subloops of LOOP. */ |
320 | 324 |
321 unsigned | 325 unsigned |
322 get_loop_level (const struct loop *loop) | 326 get_loop_level (const class loop *loop) |
323 { | 327 { |
324 const struct loop *ploop; | 328 const class loop *ploop; |
325 unsigned mx = 0, l; | 329 unsigned mx = 0, l; |
326 | 330 |
327 for (ploop = loop->inner; ploop; ploop = ploop->next) | 331 for (ploop = loop->inner; ploop; ploop = ploop->next) |
328 { | 332 { |
329 l = get_loop_level (ploop); | 333 l = get_loop_level (ploop); |
351 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | 355 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
352 if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i) | 356 if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i) |
353 && !fixed_regs[i]) | 357 && !fixed_regs[i]) |
354 { | 358 { |
355 target_avail_regs++; | 359 target_avail_regs++; |
356 if (call_used_regs[i]) | 360 /* ??? This is only a rough heuristic. It doesn't cope well |
361 with alternative ABIs, but that's an optimization rather than | |
362 correctness issue. */ | |
363 if (default_function_abi.clobbers_full_reg_p (i)) | |
357 target_clobbered_regs++; | 364 target_clobbered_regs++; |
358 } | 365 } |
359 | 366 |
360 target_res_regs = 3; | 367 target_res_regs = 3; |
361 | 368 |
420 one. */ | 427 one. */ |
421 cost = target_spill_cost [speed] * n_new; | 428 cost = target_spill_cost [speed] * n_new; |
422 | 429 |
423 if (optimize && (flag_ira_region == IRA_REGION_ALL | 430 if (optimize && (flag_ira_region == IRA_REGION_ALL |
424 || flag_ira_region == IRA_REGION_MIXED) | 431 || flag_ira_region == IRA_REGION_MIXED) |
425 && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM) | 432 && number_of_loops (cfun) <= (unsigned) param_ira_max_loops_num) |
426 /* IRA regional allocation deals with high register pressure | 433 /* IRA regional allocation deals with high register pressure |
427 better. So decrease the cost (to do more accurate the cost | 434 better. So decrease the cost (to do more accurate the cost |
428 calculation for IRA, we need to know how many registers lives | 435 calculation for IRA, we need to know how many registers lives |
429 through the loop transparently). */ | 436 through the loop transparently). */ |
430 cost /= 2; | 437 cost /= 2; |
461 /* Return exit edge if loop has only one exit that is likely | 468 /* Return exit edge if loop has only one exit that is likely |
462 to be executed on runtime (i.e. it is not EH or leading | 469 to be executed on runtime (i.e. it is not EH or leading |
463 to noreturn call. */ | 470 to noreturn call. */ |
464 | 471 |
465 edge | 472 edge |
466 single_likely_exit (struct loop *loop) | 473 single_likely_exit (class loop *loop, vec<edge> exits) |
467 { | 474 { |
468 edge found = single_exit (loop); | 475 edge found = single_exit (loop); |
469 vec<edge> exits; | |
470 unsigned i; | 476 unsigned i; |
471 edge ex; | 477 edge ex; |
472 | 478 |
473 if (found) | 479 if (found) |
474 return found; | 480 return found; |
475 exits = get_loop_exit_edges (loop); | |
476 FOR_EACH_VEC_ELT (exits, i, ex) | 481 FOR_EACH_VEC_ELT (exits, i, ex) |
477 { | 482 { |
478 if (probably_never_executed_edge_p (cfun, ex) | 483 if (probably_never_executed_edge_p (cfun, ex) |
479 /* We want to rule out paths to noreturns but not low probabilities | 484 /* We want to rule out paths to noreturns but not low probabilities |
480 resulting from adjustments or combining. | 485 resulting from adjustments or combining. |
483 || ex->probability <= profile_probability::very_unlikely ()) | 488 || ex->probability <= profile_probability::very_unlikely ()) |
484 continue; | 489 continue; |
485 if (!found) | 490 if (!found) |
486 found = ex; | 491 found = ex; |
487 else | 492 else |
488 { | 493 return NULL; |
489 exits.release (); | 494 } |
490 return NULL; | |
491 } | |
492 } | |
493 exits.release (); | |
494 return found; | 495 return found; |
495 } | 496 } |
496 | 497 |
497 | 498 |
498 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs | 499 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs |
499 order against direction of edges from latch. Specially, if | 500 order against direction of edges from latch. Specially, if |
500 header != latch, latch is the 1-st block. */ | 501 header != latch, latch is the 1-st block. */ |
501 | 502 |
502 vec<basic_block> | 503 vec<basic_block> |
503 get_loop_hot_path (const struct loop *loop) | 504 get_loop_hot_path (const class loop *loop) |
504 { | 505 { |
505 basic_block bb = loop->header; | 506 basic_block bb = loop->header; |
506 vec<basic_block> path = vNULL; | 507 vec<basic_block> path = vNULL; |
507 bitmap visited = BITMAP_ALLOC (NULL); | 508 bitmap visited = BITMAP_ALLOC (NULL); |
508 | 509 |