Mercurial > hg > CbC > GCC_original
annotate gcc/tree-predcom.c @ 16:04ced10e8804
gcc 7
author | kono |
---|---|
date | Fri, 27 Oct 2017 22:46:09 +0900 |
parents | f6334be47118 |
children | 84e7813d76e9 |
rev | line source |
---|---|
0 | 1 /* Predictive commoning. |
16 | 2 Copyright (C) 2005-2017 Free Software Foundation, Inc. |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3 |
0 | 4 This file is part of GCC. |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
5 |
0 | 6 GCC is free software; you can redistribute it and/or modify it |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 3, or (at your option) any | |
9 later version. | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
10 |
0 | 11 GCC is distributed in the hope that it will be useful, but WITHOUT |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
15 |
0 | 16 You should have received a copy of the GNU General Public License |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 /* This file implements the predictive commoning optimization. Predictive | |
21 commoning can be viewed as CSE around a loop, and with some improvements, | |
22 as generalized strength reduction-- i.e., reusing values computed in | |
23 earlier iterations of a loop in the later ones. So far, the pass only | |
24 handles the most useful case, that is, reusing values of memory references. | |
25 If you think this is all just a special case of PRE, you are sort of right; | |
26 however, concentrating on loops is simpler, and makes it possible to | |
27 incorporate data dependence analysis to detect the opportunities, perform | |
28 loop unrolling to avoid copies together with renaming immediately, | |
29 and if needed, we could also take register pressure into account. | |
30 | |
31 Let us demonstrate what is done on an example: | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
32 |
0 | 33 for (i = 0; i < 100; i++) |
34 { | |
35 a[i+2] = a[i] + a[i+1]; | |
36 b[10] = b[10] + i; | |
37 c[i] = c[99 - i]; | |
38 d[i] = d[i + 1]; | |
39 } | |
40 | |
41 1) We find data references in the loop, and split them to mutually | |
42 independent groups (i.e., we find components of a data dependence | |
43 graph). We ignore read-read dependences whose distance is not constant. | |
44 (TODO -- we could also ignore antidependences). In this example, we | |
45 find the following groups: | |
46 | |
47 a[i]{read}, a[i+1]{read}, a[i+2]{write} | |
48 b[10]{read}, b[10]{write} | |
49 c[99 - i]{read}, c[i]{write} | |
50 d[i + 1]{read}, d[i]{write} | |
51 | |
52 2) Inside each of the group, we verify several conditions: | |
53 a) all the references must differ in indices only, and the indices | |
54 must all have the same step | |
55 b) the references must dominate loop latch (and thus, they must be | |
56 ordered by dominance relation). | |
57 c) the distance of the indices must be a small multiple of the step | |
58 We are then able to compute the difference of the references (# of | |
59 iterations before they point to the same place as the first of them). | |
60 Also, in case there are writes in the loop, we split the groups into | |
61 chains whose head is the write whose values are used by the reads in | |
62 the same chain. The chains are then processed independently, | |
63 making the further transformations simpler. Also, the shorter chains | |
64 need the same number of registers, but may require lower unrolling | |
65 factor in order to get rid of the copies on the loop latch. | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
66 |
0 | 67 In our example, we get the following chains (the chain for c is invalid). |
68 | |
69 a[i]{read,+0}, a[i+1]{read,-1}, a[i+2]{write,-2} | |
70 b[10]{read,+0}, b[10]{write,+0} | |
71 d[i + 1]{read,+0}, d[i]{write,+1} | |
72 | |
73 3) For each read, we determine the read or write whose value it reuses, | |
74 together with the distance of this reuse. I.e. we take the last | |
75 reference before it with distance 0, or the last of the references | |
76 with the smallest positive distance to the read. Then, we remove | |
77 the references that are not used in any of these chains, discard the | |
78 empty groups, and propagate all the links so that they point to the | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
79 single root reference of the chain (adjusting their distance |
0 | 80 appropriately). Some extra care needs to be taken for references with |
81 step 0. In our example (the numbers indicate the distance of the | |
82 reuse), | |
83 | |
84 a[i] --> (*) 2, a[i+1] --> (*) 1, a[i+2] (*) | |
85 b[10] --> (*) 1, b[10] (*) | |
86 | |
87 4) The chains are combined together if possible. If the corresponding | |
88 elements of two chains are always combined together with the same | |
89 operator, we remember just the result of this combination, instead | |
90 of remembering the values separately. We may need to perform | |
91 reassociation to enable combining, for example | |
92 | |
93 e[i] + f[i+1] + e[i+1] + f[i] | |
94 | |
95 can be reassociated as | |
96 | |
97 (e[i] + f[i]) + (e[i+1] + f[i+1]) | |
98 | |
99 and we can combine the chains for e and f into one chain. | |
100 | |
101 5) For each root reference (end of the chain) R, let N be maximum distance | |
16 | 102 of a reference reusing its value. Variables R0 up to RN are created, |
0 | 103 together with phi nodes that transfer values from R1 .. RN to |
104 R0 .. R(N-1). | |
105 Initial values are loaded to R0..R(N-1) (in case not all references | |
106 must necessarily be accessed and they may trap, we may fail here; | |
107 TODO sometimes, the loads could be guarded by a check for the number | |
108 of iterations). Values loaded/stored in roots are also copied to | |
109 RN. Other reads are replaced with the appropriate variable Ri. | |
110 Everything is put to SSA form. | |
111 | |
112 As a small improvement, if R0 is dead after the root (i.e., all uses of | |
113 the value with the maximum distance dominate the root), we can avoid | |
114 creating RN and use R0 instead of it. | |
115 | |
116 In our example, we get (only the parts concerning a and b are shown): | |
117 for (i = 0; i < 100; i++) | |
118 { | |
119 f = phi (a[0], s); | |
120 s = phi (a[1], f); | |
121 x = phi (b[10], x); | |
122 | |
123 f = f + s; | |
124 a[i+2] = f; | |
125 x = x + i; | |
126 b[10] = x; | |
127 } | |
128 | |
129 6) Factor F for unrolling is determined as the smallest common multiple of | |
130 (N + 1) for each root reference (N for references for that we avoided | |
131 creating RN). If F and the loop is small enough, loop is unrolled F | |
132 times. The stores to RN (R0) in the copies of the loop body are | |
133 periodically replaced with R0, R1, ... (R1, R2, ...), so that they can | |
134 be coalesced and the copies can be eliminated. | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
135 |
0 | 136 TODO -- copy propagation and other optimizations may change the live |
137 ranges of the temporary registers and prevent them from being coalesced; | |
138 this may increase the register pressure. | |
139 | |
140 In our case, F = 2 and the (main loop of the) result is | |
141 | |
142 for (i = 0; i < ...; i += 2) | |
143 { | |
144 f = phi (a[0], f); | |
145 s = phi (a[1], s); | |
146 x = phi (b[10], x); | |
147 | |
148 f = f + s; | |
149 a[i+2] = f; | |
150 x = x + i; | |
151 b[10] = x; | |
152 | |
153 s = s + f; | |
154 a[i+3] = s; | |
155 x = x + i; | |
156 b[10] = x; | |
157 } | |
158 | |
16 | 159 Apart from predictive commoning on Load-Load and Store-Load chains, we |
160 also support Store-Store chains -- stores killed by other store can be | |
161 eliminated. Given below example: | |
162 | |
163 for (i = 0; i < n; i++) | |
164 { | |
165 a[i] = 1; | |
166 a[i+2] = 2; | |
167 } | |
168 | |
169 It can be replaced with: | |
170 | |
171 t0 = a[0]; | |
172 t1 = a[1]; | |
173 for (i = 0; i < n; i++) | |
174 { | |
175 a[i] = 1; | |
176 t2 = 2; | |
177 t0 = t1; | |
178 t1 = t2; | |
179 } | |
180 a[n] = t0; | |
181 a[n+1] = t1; | |
182 | |
183 If the loop runs more than 1 iterations, it can be further simplified into: | |
184 | |
185 for (i = 0; i < n; i++) | |
186 { | |
187 a[i] = 1; | |
188 } | |
189 a[n] = 2; | |
190 a[n+1] = 2; | |
191 | |
192 The interesting part is this can be viewed either as general store motion | |
193 or general dead store elimination in either intra/inter-iterations way. | |
194 | |
195 TODO: For now, we don't support store-store chains in multi-exit loops. We | |
196 force to not unroll in case of store-store chain even if other chains might | |
197 ask for unroll. | |
0 | 198 |
199 Predictive commoning can be generalized for arbitrary computations (not | |
200 just memory loads), and also nontrivial transfer functions (e.g., replacing | |
201 i * i with ii_last + 2 * i + 1), to generalize strength reduction. */ | |
202 | |
203 #include "config.h" | |
204 #include "system.h" | |
205 #include "coretypes.h" | |
16 | 206 #include "backend.h" |
207 #include "rtl.h" | |
0 | 208 #include "tree.h" |
16 | 209 #include "gimple.h" |
210 #include "predict.h" | |
211 #include "tree-pass.h" | |
212 #include "ssa.h" | |
213 #include "gimple-pretty-print.h" | |
214 #include "alias.h" | |
215 #include "fold-const.h" | |
0 | 216 #include "cfgloop.h" |
16 | 217 #include "tree-eh.h" |
218 #include "gimplify.h" | |
219 #include "gimple-iterator.h" | |
220 #include "gimplify-me.h" | |
221 #include "tree-ssa-loop-ivopts.h" | |
222 #include "tree-ssa-loop-manip.h" | |
223 #include "tree-ssa-loop-niter.h" | |
224 #include "tree-ssa-loop.h" | |
225 #include "tree-into-ssa.h" | |
226 #include "tree-dfa.h" | |
227 #include "tree-ssa.h" | |
0 | 228 #include "tree-data-ref.h" |
229 #include "tree-scalar-evolution.h" | |
230 #include "params.h" | |
231 #include "tree-affine.h" | |
16 | 232 #include "builtins.h" |
0 | 233 |
234 /* The maximum number of iterations between the considered memory | |
235 references. */ | |
236 | |
237 #define MAX_DISTANCE (target_avail_regs < 16 ? 4 : 8) | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
238 |
0 | 239 /* Data references (or phi nodes that carry data reference values across |
240 loop iterations). */ | |
241 | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
242 typedef struct dref_d |
0 | 243 { |
244 /* The reference itself. */ | |
245 struct data_reference *ref; | |
246 | |
247 /* The statement in that the reference appears. */ | |
16 | 248 gimple *stmt; |
0 | 249 |
250 /* In case that STMT is a phi node, this field is set to the SSA name | |
251 defined by it in replace_phis_by_defined_names (in order to avoid | |
252 pointing to phi node that got reallocated in the meantime). */ | |
253 tree name_defined_by_phi; | |
254 | |
255 /* Distance of the reference from the root of the chain (in number of | |
256 iterations of the loop). */ | |
257 unsigned distance; | |
258 | |
259 /* Number of iterations offset from the first reference in the component. */ | |
16 | 260 widest_int offset; |
0 | 261 |
262 /* Number of the reference in a component, in dominance ordering. */ | |
263 unsigned pos; | |
264 | |
265 /* True if the memory reference is always accessed when the loop is | |
266 entered. */ | |
267 unsigned always_accessed : 1; | |
268 } *dref; | |
269 | |
270 | |
271 /* Type of the chain of the references. */ | |
272 | |
273 enum chain_type | |
274 { | |
275 /* The addresses of the references in the chain are constant. */ | |
276 CT_INVARIANT, | |
277 | |
278 /* There are only loads in the chain. */ | |
279 CT_LOAD, | |
280 | |
281 /* Root of the chain is store, the rest are loads. */ | |
282 CT_STORE_LOAD, | |
283 | |
16 | 284 /* There are only stores in the chain. */ |
285 CT_STORE_STORE, | |
286 | |
0 | 287 /* A combination of two chains. */ |
288 CT_COMBINATION | |
289 }; | |
290 | |
291 /* Chains of data references. */ | |
292 | |
293 typedef struct chain | |
294 { | |
295 /* Type of the chain. */ | |
296 enum chain_type type; | |
297 | |
298 /* For combination chains, the operator and the two chains that are | |
299 combined, and the type of the result. */ | |
300 enum tree_code op; | |
301 tree rslt_type; | |
302 struct chain *ch1, *ch2; | |
303 | |
304 /* The references in the chain. */ | |
16 | 305 vec<dref> refs; |
0 | 306 |
307 /* The maximum distance of the reference in the chain from the root. */ | |
308 unsigned length; | |
309 | |
310 /* The variables used to copy the value throughout iterations. */ | |
16 | 311 vec<tree> vars; |
0 | 312 |
313 /* Initializers for the variables. */ | |
16 | 314 vec<tree> inits; |
315 | |
316 /* Finalizers for the eliminated stores. */ | |
317 vec<tree> finis; | |
318 | |
319 /* gimple stmts intializing the initial variables of the chain. */ | |
320 gimple_seq init_seq; | |
321 | |
322 /* gimple stmts finalizing the eliminated stores of the chain. */ | |
323 gimple_seq fini_seq; | |
0 | 324 |
325 /* True if there is a use of a variable with the maximal distance | |
326 that comes after the root in the loop. */ | |
327 unsigned has_max_use_after : 1; | |
328 | |
329 /* True if all the memory references in the chain are always accessed. */ | |
330 unsigned all_always_accessed : 1; | |
331 | |
332 /* True if this chain was combined together with some other chain. */ | |
333 unsigned combined : 1; | |
16 | 334 |
335 /* True if this is store elimination chain and eliminated stores store | |
336 loop invariant value into memory. */ | |
337 unsigned inv_store_elimination : 1; | |
0 | 338 } *chain_p; |
339 | |
340 | |
341 /* Describes the knowledge about the step of the memory references in | |
342 the component. */ | |
343 | |
344 enum ref_step_type | |
345 { | |
346 /* The step is zero. */ | |
347 RS_INVARIANT, | |
348 | |
349 /* The step is nonzero. */ | |
350 RS_NONZERO, | |
351 | |
352 /* The step may or may not be nonzero. */ | |
353 RS_ANY | |
354 }; | |
355 | |
356 /* Components of the data dependence graph. */ | |
357 | |
358 struct component | |
359 { | |
360 /* The references in the component. */ | |
16 | 361 vec<dref> refs; |
0 | 362 |
363 /* What we know about the step of the references in the component. */ | |
364 enum ref_step_type comp_step; | |
365 | |
16 | 366 /* True if all references in component are stores and we try to do |
367 intra/inter loop iteration dead store elimination. */ | |
368 bool eliminate_store_p; | |
369 | |
0 | 370 /* Next component in the list. */ |
371 struct component *next; | |
372 }; | |
373 | |
374 /* Bitmap of ssa names defined by looparound phi nodes covered by chains. */ | |
375 | |
376 static bitmap looparound_phis; | |
377 | |
378 /* Cache used by tree_to_aff_combination_expand. */ | |
379 | |
16 | 380 static hash_map<tree, name_expansion *> *name_expansions; |
0 | 381 |
382 /* Dumps data reference REF to FILE. */ | |
383 | |
384 extern void dump_dref (FILE *, dref); | |
385 void | |
386 dump_dref (FILE *file, dref ref) | |
387 { | |
388 if (ref->ref) | |
389 { | |
390 fprintf (file, " "); | |
391 print_generic_expr (file, DR_REF (ref->ref), TDF_SLIM); | |
392 fprintf (file, " (id %u%s)\n", ref->pos, | |
393 DR_IS_READ (ref->ref) ? "" : ", write"); | |
394 | |
395 fprintf (file, " offset "); | |
16 | 396 print_decs (ref->offset, file); |
0 | 397 fprintf (file, "\n"); |
398 | |
399 fprintf (file, " distance %u\n", ref->distance); | |
400 } | |
401 else | |
402 { | |
403 if (gimple_code (ref->stmt) == GIMPLE_PHI) | |
404 fprintf (file, " looparound ref\n"); | |
405 else | |
406 fprintf (file, " combination ref\n"); | |
407 fprintf (file, " in statement "); | |
408 print_gimple_stmt (file, ref->stmt, 0, TDF_SLIM); | |
409 fprintf (file, "\n"); | |
410 fprintf (file, " distance %u\n", ref->distance); | |
411 } | |
412 | |
413 } | |
414 | |
415 /* Dumps CHAIN to FILE. */ | |
416 | |
417 extern void dump_chain (FILE *, chain_p); | |
418 void | |
419 dump_chain (FILE *file, chain_p chain) | |
420 { | |
421 dref a; | |
422 const char *chain_type; | |
423 unsigned i; | |
424 tree var; | |
425 | |
426 switch (chain->type) | |
427 { | |
428 case CT_INVARIANT: | |
429 chain_type = "Load motion"; | |
430 break; | |
431 | |
432 case CT_LOAD: | |
433 chain_type = "Loads-only"; | |
434 break; | |
435 | |
436 case CT_STORE_LOAD: | |
437 chain_type = "Store-loads"; | |
438 break; | |
439 | |
16 | 440 case CT_STORE_STORE: |
441 chain_type = "Store-stores"; | |
442 break; | |
443 | |
0 | 444 case CT_COMBINATION: |
445 chain_type = "Combination"; | |
446 break; | |
447 | |
448 default: | |
449 gcc_unreachable (); | |
450 } | |
451 | |
452 fprintf (file, "%s chain %p%s\n", chain_type, (void *) chain, | |
453 chain->combined ? " (combined)" : ""); | |
454 if (chain->type != CT_INVARIANT) | |
455 fprintf (file, " max distance %u%s\n", chain->length, | |
456 chain->has_max_use_after ? "" : ", may reuse first"); | |
457 | |
458 if (chain->type == CT_COMBINATION) | |
459 { | |
460 fprintf (file, " equal to %p %s %p in type ", | |
461 (void *) chain->ch1, op_symbol_code (chain->op), | |
462 (void *) chain->ch2); | |
463 print_generic_expr (file, chain->rslt_type, TDF_SLIM); | |
464 fprintf (file, "\n"); | |
465 } | |
466 | |
16 | 467 if (chain->vars.exists ()) |
0 | 468 { |
469 fprintf (file, " vars"); | |
16 | 470 FOR_EACH_VEC_ELT (chain->vars, i, var) |
0 | 471 { |
472 fprintf (file, " "); | |
473 print_generic_expr (file, var, TDF_SLIM); | |
474 } | |
475 fprintf (file, "\n"); | |
476 } | |
477 | |
16 | 478 if (chain->inits.exists ()) |
0 | 479 { |
480 fprintf (file, " inits"); | |
16 | 481 FOR_EACH_VEC_ELT (chain->inits, i, var) |
0 | 482 { |
483 fprintf (file, " "); | |
484 print_generic_expr (file, var, TDF_SLIM); | |
485 } | |
486 fprintf (file, "\n"); | |
487 } | |
488 | |
489 fprintf (file, " references:\n"); | |
16 | 490 FOR_EACH_VEC_ELT (chain->refs, i, a) |
0 | 491 dump_dref (file, a); |
492 | |
493 fprintf (file, "\n"); | |
494 } | |
495 | |
496 /* Dumps CHAINS to FILE. */ | |
497 | |
16 | 498 extern void dump_chains (FILE *, vec<chain_p> ); |
0 | 499 void |
16 | 500 dump_chains (FILE *file, vec<chain_p> chains) |
0 | 501 { |
502 chain_p chain; | |
503 unsigned i; | |
504 | |
16 | 505 FOR_EACH_VEC_ELT (chains, i, chain) |
0 | 506 dump_chain (file, chain); |
507 } | |
508 | |
509 /* Dumps COMP to FILE. */ | |
510 | |
511 extern void dump_component (FILE *, struct component *); | |
512 void | |
513 dump_component (FILE *file, struct component *comp) | |
514 { | |
515 dref a; | |
516 unsigned i; | |
517 | |
518 fprintf (file, "Component%s:\n", | |
519 comp->comp_step == RS_INVARIANT ? " (invariant)" : ""); | |
16 | 520 FOR_EACH_VEC_ELT (comp->refs, i, a) |
0 | 521 dump_dref (file, a); |
522 fprintf (file, "\n"); | |
523 } | |
524 | |
525 /* Dumps COMPS to FILE. */ | |
526 | |
527 extern void dump_components (FILE *, struct component *); | |
528 void | |
529 dump_components (FILE *file, struct component *comps) | |
530 { | |
531 struct component *comp; | |
532 | |
533 for (comp = comps; comp; comp = comp->next) | |
534 dump_component (file, comp); | |
535 } | |
536 | |
537 /* Frees a chain CHAIN. */ | |
538 | |
539 static void | |
540 release_chain (chain_p chain) | |
541 { | |
542 dref ref; | |
543 unsigned i; | |
544 | |
545 if (chain == NULL) | |
546 return; | |
547 | |
16 | 548 FOR_EACH_VEC_ELT (chain->refs, i, ref) |
0 | 549 free (ref); |
550 | |
16 | 551 chain->refs.release (); |
552 chain->vars.release (); | |
553 chain->inits.release (); | |
554 if (chain->init_seq) | |
555 gimple_seq_discard (chain->init_seq); | |
556 | |
557 chain->finis.release (); | |
558 if (chain->fini_seq) | |
559 gimple_seq_discard (chain->fini_seq); | |
0 | 560 |
561 free (chain); | |
562 } | |
563 | |
564 /* Frees CHAINS. */ | |
565 | |
566 static void | |
16 | 567 release_chains (vec<chain_p> chains) |
0 | 568 { |
569 unsigned i; | |
570 chain_p chain; | |
571 | |
16 | 572 FOR_EACH_VEC_ELT (chains, i, chain) |
0 | 573 release_chain (chain); |
16 | 574 chains.release (); |
0 | 575 } |
576 | |
577 /* Frees a component COMP. */ | |
578 | |
579 static void | |
580 release_component (struct component *comp) | |
581 { | |
16 | 582 comp->refs.release (); |
0 | 583 free (comp); |
584 } | |
585 | |
586 /* Frees list of components COMPS. */ | |
587 | |
588 static void | |
589 release_components (struct component *comps) | |
590 { | |
591 struct component *act, *next; | |
592 | |
593 for (act = comps; act; act = next) | |
594 { | |
595 next = act->next; | |
596 release_component (act); | |
597 } | |
598 } | |
599 | |
600 /* Finds a root of tree given by FATHERS containing A, and performs path | |
601 shortening. */ | |
602 | |
603 static unsigned | |
604 component_of (unsigned fathers[], unsigned a) | |
605 { | |
606 unsigned root, n; | |
607 | |
608 for (root = a; root != fathers[root]; root = fathers[root]) | |
609 continue; | |
610 | |
611 for (; a != root; a = n) | |
612 { | |
613 n = fathers[a]; | |
614 fathers[a] = root; | |
615 } | |
616 | |
617 return root; | |
618 } | |
619 | |
620 /* Join operation for DFU. FATHERS gives the tree, SIZES are sizes of the | |
621 components, A and B are components to merge. */ | |
622 | |
623 static void | |
624 merge_comps (unsigned fathers[], unsigned sizes[], unsigned a, unsigned b) | |
625 { | |
626 unsigned ca = component_of (fathers, a); | |
627 unsigned cb = component_of (fathers, b); | |
628 | |
629 if (ca == cb) | |
630 return; | |
631 | |
632 if (sizes[ca] < sizes[cb]) | |
633 { | |
634 sizes[cb] += sizes[ca]; | |
635 fathers[ca] = cb; | |
636 } | |
637 else | |
638 { | |
639 sizes[ca] += sizes[cb]; | |
640 fathers[cb] = ca; | |
641 } | |
642 } | |
643 | |
644 /* Returns true if A is a reference that is suitable for predictive commoning | |
645 in the innermost loop that contains it. REF_STEP is set according to the | |
646 step of the reference A. */ | |
647 | |
648 static bool | |
649 suitable_reference_p (struct data_reference *a, enum ref_step_type *ref_step) | |
650 { | |
651 tree ref = DR_REF (a), step = DR_STEP (a); | |
652 | |
653 if (!step | |
16 | 654 || TREE_THIS_VOLATILE (ref) |
0 | 655 || !is_gimple_reg_type (TREE_TYPE (ref)) |
656 || tree_could_throw_p (ref)) | |
657 return false; | |
658 | |
659 if (integer_zerop (step)) | |
660 *ref_step = RS_INVARIANT; | |
661 else if (integer_nonzerop (step)) | |
662 *ref_step = RS_NONZERO; | |
663 else | |
664 *ref_step = RS_ANY; | |
665 | |
666 return true; | |
667 } | |
668 | |
669 /* Stores DR_OFFSET (DR) + DR_INIT (DR) to OFFSET. */ | |
670 | |
671 static void | |
672 aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) | |
673 { | |
16 | 674 tree type = TREE_TYPE (DR_OFFSET (dr)); |
0 | 675 aff_tree delta; |
676 | |
16 | 677 tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset, |
0 | 678 &name_expansions); |
16 | 679 aff_combination_const (&delta, type, wi::to_widest (DR_INIT (dr))); |
0 | 680 aff_combination_add (offset, &delta); |
681 } | |
682 | |
683 /* Determines number of iterations of the innermost enclosing loop before B | |
684 refers to exactly the same location as A and stores it to OFF. If A and | |
685 B do not have the same step, they never meet, or anything else fails, | |
686 returns false, otherwise returns true. Both A and B are assumed to | |
687 satisfy suitable_reference_p. */ | |
688 | |
689 static bool | |
690 determine_offset (struct data_reference *a, struct data_reference *b, | |
16 | 691 widest_int *off) |
0 | 692 { |
693 aff_tree diff, baseb, step; | |
694 tree typea, typeb; | |
695 | |
696 /* Check that both the references access the location in the same type. */ | |
697 typea = TREE_TYPE (DR_REF (a)); | |
698 typeb = TREE_TYPE (DR_REF (b)); | |
699 if (!useless_type_conversion_p (typeb, typea)) | |
700 return false; | |
701 | |
702 /* Check whether the base address and the step of both references is the | |
703 same. */ | |
704 if (!operand_equal_p (DR_STEP (a), DR_STEP (b), 0) | |
705 || !operand_equal_p (DR_BASE_ADDRESS (a), DR_BASE_ADDRESS (b), 0)) | |
706 return false; | |
707 | |
708 if (integer_zerop (DR_STEP (a))) | |
709 { | |
710 /* If the references have loop invariant address, check that they access | |
711 exactly the same location. */ | |
16 | 712 *off = 0; |
0 | 713 return (operand_equal_p (DR_OFFSET (a), DR_OFFSET (b), 0) |
714 && operand_equal_p (DR_INIT (a), DR_INIT (b), 0)); | |
715 } | |
716 | |
717 /* Compare the offsets of the addresses, and check whether the difference | |
718 is a multiple of step. */ | |
719 aff_combination_dr_offset (a, &diff); | |
720 aff_combination_dr_offset (b, &baseb); | |
16 | 721 aff_combination_scale (&baseb, -1); |
0 | 722 aff_combination_add (&diff, &baseb); |
723 | |
16 | 724 tree_to_aff_combination_expand (DR_STEP (a), TREE_TYPE (DR_STEP (a)), |
0 | 725 &step, &name_expansions); |
726 return aff_combination_constant_multiple_p (&diff, &step, off); | |
727 } | |
728 | |
729 /* Returns the last basic block in LOOP for that we are sure that | |
730 it is executed whenever the loop is entered. */ | |
731 | |
732 static basic_block | |
733 last_always_executed_block (struct loop *loop) | |
734 { | |
735 unsigned i; | |
16 | 736 vec<edge> exits = get_loop_exit_edges (loop); |
0 | 737 edge ex; |
738 basic_block last = loop->latch; | |
739 | |
16 | 740 FOR_EACH_VEC_ELT (exits, i, ex) |
0 | 741 last = nearest_common_dominator (CDI_DOMINATORS, last, ex->src); |
16 | 742 exits.release (); |
0 | 743 |
744 return last; | |
745 } | |
746 | |
747 /* Splits dependence graph on DATAREFS described by DEPENDS to components. */ | |
748 | |
749 static struct component * | |
750 split_data_refs_to_components (struct loop *loop, | |
16 | 751 vec<data_reference_p> datarefs, |
752 vec<ddr_p> depends) | |
0 | 753 { |
16 | 754 unsigned i, n = datarefs.length (); |
0 | 755 unsigned ca, ia, ib, bad; |
756 unsigned *comp_father = XNEWVEC (unsigned, n + 1); | |
757 unsigned *comp_size = XNEWVEC (unsigned, n + 1); | |
758 struct component **comps; | |
759 struct data_reference *dr, *dra, *drb; | |
760 struct data_dependence_relation *ddr; | |
761 struct component *comp_list = NULL, *comp; | |
762 dref dataref; | |
16 | 763 /* Don't do store elimination if loop has multiple exit edges. */ |
764 bool eliminate_store_p = single_exit (loop) != NULL; | |
0 | 765 basic_block last_always_executed = last_always_executed_block (loop); |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
766 |
16 | 767 FOR_EACH_VEC_ELT (datarefs, i, dr) |
0 | 768 { |
769 if (!DR_REF (dr)) | |
770 { | |
771 /* A fake reference for call or asm_expr that may clobber memory; | |
772 just fail. */ | |
773 goto end; | |
774 } | |
16 | 775 /* predcom pass isn't prepared to handle calls with data references. */ |
776 if (is_gimple_call (DR_STMT (dr))) | |
777 goto end; | |
0 | 778 dr->aux = (void *) (size_t) i; |
779 comp_father[i] = i; | |
780 comp_size[i] = 1; | |
781 } | |
782 | |
783 /* A component reserved for the "bad" data references. */ | |
784 comp_father[n] = n; | |
785 comp_size[n] = 1; | |
786 | |
16 | 787 FOR_EACH_VEC_ELT (datarefs, i, dr) |
0 | 788 { |
789 enum ref_step_type dummy; | |
790 | |
791 if (!suitable_reference_p (dr, &dummy)) | |
792 { | |
793 ia = (unsigned) (size_t) dr->aux; | |
794 merge_comps (comp_father, comp_size, n, ia); | |
795 } | |
796 } | |
797 | |
16 | 798 FOR_EACH_VEC_ELT (depends, i, ddr) |
0 | 799 { |
16 | 800 widest_int dummy_off; |
0 | 801 |
802 if (DDR_ARE_DEPENDENT (ddr) == chrec_known) | |
803 continue; | |
804 | |
805 dra = DDR_A (ddr); | |
806 drb = DDR_B (ddr); | |
16 | 807 |
808 /* Don't do store elimination if there is any unknown dependence for | |
809 any store data reference. */ | |
810 if ((DR_IS_WRITE (dra) || DR_IS_WRITE (drb)) | |
811 && (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know | |
812 || DDR_NUM_DIST_VECTS (ddr) == 0)) | |
813 eliminate_store_p = false; | |
814 | |
0 | 815 ia = component_of (comp_father, (unsigned) (size_t) dra->aux); |
816 ib = component_of (comp_father, (unsigned) (size_t) drb->aux); | |
817 if (ia == ib) | |
818 continue; | |
819 | |
820 bad = component_of (comp_father, n); | |
821 | |
822 /* If both A and B are reads, we may ignore unsuitable dependences. */ | |
16 | 823 if (DR_IS_READ (dra) && DR_IS_READ (drb)) |
824 { | |
825 if (ia == bad || ib == bad | |
826 || !determine_offset (dra, drb, &dummy_off)) | |
827 continue; | |
828 } | |
829 /* If A is read and B write or vice versa and there is unsuitable | |
830 dependence, instead of merging both components into a component | |
831 that will certainly not pass suitable_component_p, just put the | |
832 read into bad component, perhaps at least the write together with | |
833 all the other data refs in it's component will be optimizable. */ | |
834 else if (DR_IS_READ (dra) && ib != bad) | |
835 { | |
836 if (ia == bad) | |
837 continue; | |
838 else if (!determine_offset (dra, drb, &dummy_off)) | |
839 { | |
840 merge_comps (comp_father, comp_size, bad, ia); | |
841 continue; | |
842 } | |
843 } | |
844 else if (DR_IS_READ (drb) && ia != bad) | |
845 { | |
846 if (ib == bad) | |
847 continue; | |
848 else if (!determine_offset (dra, drb, &dummy_off)) | |
849 { | |
850 merge_comps (comp_father, comp_size, bad, ib); | |
851 continue; | |
852 } | |
853 } | |
854 else if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb) | |
855 && ia != bad && ib != bad | |
856 && !determine_offset (dra, drb, &dummy_off)) | |
857 { | |
858 merge_comps (comp_father, comp_size, bad, ia); | |
859 merge_comps (comp_father, comp_size, bad, ib); | |
860 continue; | |
861 } | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
862 |
0 | 863 merge_comps (comp_father, comp_size, ia, ib); |
864 } | |
865 | |
16 | 866 if (eliminate_store_p) |
867 { | |
868 tree niters = number_of_latch_executions (loop); | |
869 | |
870 /* Don't do store elimination if niters info is unknown because stores | |
871 in the last iteration can't be eliminated and we need to recover it | |
872 after loop. */ | |
873 eliminate_store_p = (niters != NULL_TREE && niters != chrec_dont_know); | |
874 } | |
875 | |
0 | 876 comps = XCNEWVEC (struct component *, n); |
877 bad = component_of (comp_father, n); | |
16 | 878 FOR_EACH_VEC_ELT (datarefs, i, dr) |
0 | 879 { |
880 ia = (unsigned) (size_t) dr->aux; | |
881 ca = component_of (comp_father, ia); | |
882 if (ca == bad) | |
883 continue; | |
884 | |
885 comp = comps[ca]; | |
886 if (!comp) | |
887 { | |
888 comp = XCNEW (struct component); | |
16 | 889 comp->refs.create (comp_size[ca]); |
890 comp->eliminate_store_p = eliminate_store_p; | |
0 | 891 comps[ca] = comp; |
892 } | |
893 | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
894 dataref = XCNEW (struct dref_d); |
0 | 895 dataref->ref = dr; |
896 dataref->stmt = DR_STMT (dr); | |
16 | 897 dataref->offset = 0; |
0 | 898 dataref->distance = 0; |
899 | |
900 dataref->always_accessed | |
901 = dominated_by_p (CDI_DOMINATORS, last_always_executed, | |
902 gimple_bb (dataref->stmt)); | |
16 | 903 dataref->pos = comp->refs.length (); |
904 comp->refs.quick_push (dataref); | |
905 if (DR_IS_READ (dr)) | |
906 comp->eliminate_store_p = false; | |
0 | 907 } |
908 | |
909 for (i = 0; i < n; i++) | |
910 { | |
911 comp = comps[i]; | |
912 if (comp) | |
913 { | |
914 comp->next = comp_list; | |
915 comp_list = comp; | |
916 } | |
917 } | |
918 free (comps); | |
919 | |
920 end: | |
921 free (comp_father); | |
922 free (comp_size); | |
923 return comp_list; | |
924 } | |
925 | |
926 /* Returns true if the component COMP satisfies the conditions | |
927 described in 2) at the beginning of this file. LOOP is the current | |
928 loop. */ | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
929 |
0 | 930 static bool |
931 suitable_component_p (struct loop *loop, struct component *comp) | |
932 { | |
933 unsigned i; | |
934 dref a, first; | |
935 basic_block ba, bp = loop->header; | |
936 bool ok, has_write = false; | |
937 | |
16 | 938 FOR_EACH_VEC_ELT (comp->refs, i, a) |
0 | 939 { |
940 ba = gimple_bb (a->stmt); | |
941 | |
942 if (!just_once_each_iteration_p (loop, ba)) | |
943 return false; | |
944 | |
945 gcc_assert (dominated_by_p (CDI_DOMINATORS, ba, bp)); | |
946 bp = ba; | |
947 | |
14
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
11
diff
changeset
|
948 if (DR_IS_WRITE (a->ref)) |
0 | 949 has_write = true; |
950 } | |
951 | |
16 | 952 first = comp->refs[0]; |
0 | 953 ok = suitable_reference_p (first->ref, &comp->comp_step); |
954 gcc_assert (ok); | |
16 | 955 first->offset = 0; |
956 | |
957 for (i = 1; comp->refs.iterate (i, &a); i++) | |
0 | 958 { |
959 if (!determine_offset (first->ref, a->ref, &a->offset)) | |
960 return false; | |
961 | |
16 | 962 enum ref_step_type a_step; |
963 gcc_checking_assert (suitable_reference_p (a->ref, &a_step) | |
964 && a_step == comp->comp_step); | |
0 | 965 } |
966 | |
967 /* If there is a write inside the component, we must know whether the | |
968 step is nonzero or not -- we would not otherwise be able to recognize | |
969 whether the value accessed by reads comes from the OFFSET-th iteration | |
970 or the previous one. */ | |
971 if (has_write && comp->comp_step == RS_ANY) | |
972 return false; | |
973 | |
974 return true; | |
975 } | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
976 |
0 | 977 /* Check the conditions on references inside each of components COMPS, |
978 and remove the unsuitable components from the list. The new list | |
979 of components is returned. The conditions are described in 2) at | |
980 the beginning of this file. LOOP is the current loop. */ | |
981 | |
982 static struct component * | |
983 filter_suitable_components (struct loop *loop, struct component *comps) | |
984 { | |
985 struct component **comp, *act; | |
986 | |
987 for (comp = &comps; *comp; ) | |
988 { | |
989 act = *comp; | |
990 if (suitable_component_p (loop, act)) | |
991 comp = &act->next; | |
992 else | |
993 { | |
994 dref ref; | |
995 unsigned i; | |
996 | |
997 *comp = act->next; | |
16 | 998 FOR_EACH_VEC_ELT (act->refs, i, ref) |
0 | 999 free (ref); |
1000 release_component (act); | |
1001 } | |
1002 } | |
1003 | |
1004 return comps; | |
1005 } | |
1006 | |
1007 /* Compares two drefs A and B by their offset and position. Callback for | |
1008 qsort. */ | |
1009 | |
1010 static int | |
1011 order_drefs (const void *a, const void *b) | |
1012 { | |
1013 const dref *const da = (const dref *) a; | |
1014 const dref *const db = (const dref *) b; | |
16 | 1015 int offcmp = wi::cmps ((*da)->offset, (*db)->offset); |
0 | 1016 |
1017 if (offcmp != 0) | |
1018 return offcmp; | |
1019 | |
1020 return (*da)->pos - (*db)->pos; | |
1021 } | |
1022 | |
1023 /* Returns root of the CHAIN. */ | |
1024 | |
1025 static inline dref | |
1026 get_chain_root (chain_p chain) | |
1027 { | |
16 | 1028 return chain->refs[0]; |
1029 } | |
1030 | |
1031 /* Given CHAIN, returns the last ref at DISTANCE, or NULL if it doesn't | |
1032 exist. */ | |
1033 | |
1034 static inline dref | |
1035 get_chain_last_ref_at (chain_p chain, unsigned distance) | |
1036 { | |
1037 unsigned i; | |
1038 | |
1039 for (i = chain->refs.length (); i > 0; i--) | |
1040 if (distance == chain->refs[i - 1]->distance) | |
1041 break; | |
1042 | |
1043 return (i > 0) ? chain->refs[i - 1] : NULL; | |
0 | 1044 } |
1045 | |
1046 /* Adds REF to the chain CHAIN. */ | |
1047 | |
1048 static void | |
1049 add_ref_to_chain (chain_p chain, dref ref) | |
1050 { | |
1051 dref root = get_chain_root (chain); | |
16 | 1052 |
1053 gcc_assert (wi::les_p (root->offset, ref->offset)); | |
1054 widest_int dist = ref->offset - root->offset; | |
1055 if (wi::leu_p (MAX_DISTANCE, dist)) | |
0 | 1056 { |
1057 free (ref); | |
1058 return; | |
1059 } | |
16 | 1060 gcc_assert (wi::fits_uhwi_p (dist)); |
1061 | |
1062 chain->refs.safe_push (ref); | |
1063 | |
1064 ref->distance = dist.to_uhwi (); | |
0 | 1065 |
1066 if (ref->distance >= chain->length) | |
1067 { | |
1068 chain->length = ref->distance; | |
1069 chain->has_max_use_after = false; | |
1070 } | |
1071 | |
16 | 1072 /* Don't set the flag for store-store chain since there is no use. */ |
1073 if (chain->type != CT_STORE_STORE | |
1074 && ref->distance == chain->length | |
0 | 1075 && ref->pos > root->pos) |
1076 chain->has_max_use_after = true; | |
1077 | |
1078 chain->all_always_accessed &= ref->always_accessed; | |
1079 } | |
1080 | |
1081 /* Returns the chain for invariant component COMP. */ | |
1082 | |
1083 static chain_p | |
1084 make_invariant_chain (struct component *comp) | |
1085 { | |
1086 chain_p chain = XCNEW (struct chain); | |
1087 unsigned i; | |
1088 dref ref; | |
1089 | |
1090 chain->type = CT_INVARIANT; | |
1091 | |
1092 chain->all_always_accessed = true; | |
1093 | |
16 | 1094 FOR_EACH_VEC_ELT (comp->refs, i, ref) |
0 | 1095 { |
16 | 1096 chain->refs.safe_push (ref); |
0 | 1097 chain->all_always_accessed &= ref->always_accessed; |
1098 } | |
1099 | |
16 | 1100 chain->inits = vNULL; |
1101 chain->finis = vNULL; | |
1102 | |
0 | 1103 return chain; |
1104 } | |
1105 | |
16 | 1106 /* Make a new chain of type TYPE rooted at REF. */ |
0 | 1107 |
1108 static chain_p | |
16 | 1109 make_rooted_chain (dref ref, enum chain_type type) |
0 | 1110 { |
1111 chain_p chain = XCNEW (struct chain); | |
1112 | |
16 | 1113 chain->type = type; |
1114 chain->refs.safe_push (ref); | |
0 | 1115 chain->all_always_accessed = ref->always_accessed; |
1116 ref->distance = 0; | |
1117 | |
16 | 1118 chain->inits = vNULL; |
1119 chain->finis = vNULL; | |
1120 | |
0 | 1121 return chain; |
1122 } | |
1123 | |
1124 /* Returns true if CHAIN is not trivial. */ | |
1125 | |
1126 static bool | |
1127 nontrivial_chain_p (chain_p chain) | |
1128 { | |
16 | 1129 return chain != NULL && chain->refs.length () > 1; |
0 | 1130 } |
1131 | |
1132 /* Returns the ssa name that contains the value of REF, or NULL_TREE if there | |
1133 is no such name. */ | |
1134 | |
1135 static tree | |
1136 name_for_ref (dref ref) | |
1137 { | |
1138 tree name; | |
1139 | |
1140 if (is_gimple_assign (ref->stmt)) | |
1141 { | |
1142 if (!ref->ref || DR_IS_READ (ref->ref)) | |
1143 name = gimple_assign_lhs (ref->stmt); | |
1144 else | |
1145 name = gimple_assign_rhs1 (ref->stmt); | |
1146 } | |
1147 else | |
1148 name = PHI_RESULT (ref->stmt); | |
1149 | |
1150 return (TREE_CODE (name) == SSA_NAME ? name : NULL_TREE); | |
1151 } | |
1152 | |
1153 /* Returns true if REF is a valid initializer for ROOT with given DISTANCE (in | |
1154 iterations of the innermost enclosing loop). */ | |
1155 | |
1156 static bool | |
1157 valid_initializer_p (struct data_reference *ref, | |
1158 unsigned distance, struct data_reference *root) | |
1159 { | |
1160 aff_tree diff, base, step; | |
16 | 1161 widest_int off; |
0 | 1162 |
1163 /* Both REF and ROOT must be accessing the same object. */ | |
1164 if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0)) | |
1165 return false; | |
1166 | |
1167 /* The initializer is defined outside of loop, hence its address must be | |
1168 invariant inside the loop. */ | |
1169 gcc_assert (integer_zerop (DR_STEP (ref))); | |
1170 | |
1171 /* If the address of the reference is invariant, initializer must access | |
1172 exactly the same location. */ | |
1173 if (integer_zerop (DR_STEP (root))) | |
1174 return (operand_equal_p (DR_OFFSET (ref), DR_OFFSET (root), 0) | |
1175 && operand_equal_p (DR_INIT (ref), DR_INIT (root), 0)); | |
1176 | |
1177 /* Verify that this index of REF is equal to the root's index at | |
1178 -DISTANCE-th iteration. */ | |
1179 aff_combination_dr_offset (root, &diff); | |
1180 aff_combination_dr_offset (ref, &base); | |
16 | 1181 aff_combination_scale (&base, -1); |
0 | 1182 aff_combination_add (&diff, &base); |
1183 | |
16 | 1184 tree_to_aff_combination_expand (DR_STEP (root), TREE_TYPE (DR_STEP (root)), |
1185 &step, &name_expansions); | |
0 | 1186 if (!aff_combination_constant_multiple_p (&diff, &step, &off)) |
1187 return false; | |
1188 | |
16 | 1189 if (off != distance) |
0 | 1190 return false; |
1191 | |
1192 return true; | |
1193 } | |
1194 | |
1195 /* Finds looparound phi node of LOOP that copies the value of REF, and if its | |
1196 initial value is correct (equal to initial value of REF shifted by one | |
1197 iteration), returns the phi node. Otherwise, NULL_TREE is returned. ROOT | |
1198 is the root of the current chain. */ | |
1199 | |
16 | 1200 static gphi * |
0 | 1201 find_looparound_phi (struct loop *loop, dref ref, dref root) |
1202 { | |
1203 tree name, init, init_ref; | |
16 | 1204 gphi *phi = NULL; |
1205 gimple *init_stmt; | |
0 | 1206 edge latch = loop_latch_edge (loop); |
1207 struct data_reference init_dr; | |
16 | 1208 gphi_iterator psi; |
0 | 1209 |
1210 if (is_gimple_assign (ref->stmt)) | |
1211 { | |
1212 if (DR_IS_READ (ref->ref)) | |
1213 name = gimple_assign_lhs (ref->stmt); | |
1214 else | |
1215 name = gimple_assign_rhs1 (ref->stmt); | |
1216 } | |
1217 else | |
1218 name = PHI_RESULT (ref->stmt); | |
1219 if (!name) | |
1220 return NULL; | |
1221 | |
1222 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi)) | |
1223 { | |
16 | 1224 phi = psi.phi (); |
0 | 1225 if (PHI_ARG_DEF_FROM_EDGE (phi, latch) == name) |
1226 break; | |
1227 } | |
1228 | |
1229 if (gsi_end_p (psi)) | |
1230 return NULL; | |
1231 | |
1232 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop)); | |
1233 if (TREE_CODE (init) != SSA_NAME) | |
1234 return NULL; | |
1235 init_stmt = SSA_NAME_DEF_STMT (init); | |
1236 if (gimple_code (init_stmt) != GIMPLE_ASSIGN) | |
1237 return NULL; | |
1238 gcc_assert (gimple_assign_lhs (init_stmt) == init); | |
1239 | |
1240 init_ref = gimple_assign_rhs1 (init_stmt); | |
1241 if (!REFERENCE_CLASS_P (init_ref) | |
1242 && !DECL_P (init_ref)) | |
1243 return NULL; | |
1244 | |
1245 /* Analyze the behavior of INIT_REF with respect to LOOP (innermost | |
1246 loop enclosing PHI). */ | |
1247 memset (&init_dr, 0, sizeof (struct data_reference)); | |
1248 DR_REF (&init_dr) = init_ref; | |
1249 DR_STMT (&init_dr) = phi; | |
16 | 1250 if (!dr_analyze_innermost (&DR_INNERMOST (&init_dr), init_ref, loop)) |
0 | 1251 return NULL; |
1252 | |
1253 if (!valid_initializer_p (&init_dr, ref->distance + 1, root->ref)) | |
1254 return NULL; | |
1255 | |
1256 return phi; | |
1257 } | |
1258 | |
1259 /* Adds a reference for the looparound copy of REF in PHI to CHAIN. */ | |
1260 | |
1261 static void | |
16 | 1262 insert_looparound_copy (chain_p chain, dref ref, gphi *phi) |
0 | 1263 { |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1264 dref nw = XCNEW (struct dref_d), aref; |
0 | 1265 unsigned i; |
1266 | |
1267 nw->stmt = phi; | |
1268 nw->distance = ref->distance + 1; | |
1269 nw->always_accessed = 1; | |
1270 | |
16 | 1271 FOR_EACH_VEC_ELT (chain->refs, i, aref) |
0 | 1272 if (aref->distance >= nw->distance) |
1273 break; | |
16 | 1274 chain->refs.safe_insert (i, nw); |
0 | 1275 |
1276 if (nw->distance > chain->length) | |
1277 { | |
1278 chain->length = nw->distance; | |
1279 chain->has_max_use_after = false; | |
1280 } | |
1281 } | |
1282 | |
1283 /* For references in CHAIN that are copied around the LOOP (created previously | |
1284 by PRE, or by user), add the results of such copies to the chain. This | |
1285 enables us to remove the copies by unrolling, and may need less registers | |
1286 (also, it may allow us to combine chains together). */ | |
1287 | |
1288 static void | |
1289 add_looparound_copies (struct loop *loop, chain_p chain) | |
1290 { | |
1291 unsigned i; | |
1292 dref ref, root = get_chain_root (chain); | |
16 | 1293 gphi *phi; |
1294 | |
1295 if (chain->type == CT_STORE_STORE) | |
1296 return; | |
1297 | |
1298 FOR_EACH_VEC_ELT (chain->refs, i, ref) | |
0 | 1299 { |
1300 phi = find_looparound_phi (loop, ref, root); | |
1301 if (!phi) | |
1302 continue; | |
1303 | |
1304 bitmap_set_bit (looparound_phis, SSA_NAME_VERSION (PHI_RESULT (phi))); | |
1305 insert_looparound_copy (chain, ref, phi); | |
1306 } | |
1307 } | |
1308 | |
1309 /* Find roots of the values and determine distances in the component COMP. | |
1310 The references are redistributed into CHAINS. LOOP is the current | |
1311 loop. */ | |
1312 | |
1313 static void | |
1314 determine_roots_comp (struct loop *loop, | |
1315 struct component *comp, | |
16 | 1316 vec<chain_p> *chains) |
0 | 1317 { |
1318 unsigned i; | |
1319 dref a; | |
1320 chain_p chain = NULL; | |
16 | 1321 widest_int last_ofs = 0; |
1322 enum chain_type type; | |
0 | 1323 |
1324 /* Invariants are handled specially. */ | |
1325 if (comp->comp_step == RS_INVARIANT) | |
1326 { | |
1327 chain = make_invariant_chain (comp); | |
16 | 1328 chains->safe_push (chain); |
0 | 1329 return; |
1330 } | |
1331 | |
16 | 1332 /* Trivial component. */ |
1333 if (comp->refs.length () <= 1) | |
1334 return; | |
1335 | |
1336 comp->refs.qsort (order_drefs); | |
1337 FOR_EACH_VEC_ELT (comp->refs, i, a) | |
0 | 1338 { |
16 | 1339 if (!chain |
1340 || (!comp->eliminate_store_p && DR_IS_WRITE (a->ref)) | |
1341 || wi::leu_p (MAX_DISTANCE, a->offset - last_ofs)) | |
0 | 1342 { |
1343 if (nontrivial_chain_p (chain)) | |
11
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
9
diff
changeset
|
1344 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
9
diff
changeset
|
1345 add_looparound_copies (loop, chain); |
16 | 1346 chains->safe_push (chain); |
11
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
9
diff
changeset
|
1347 } |
0 | 1348 else |
1349 release_chain (chain); | |
16 | 1350 |
1351 if (DR_IS_READ (a->ref)) | |
1352 type = CT_LOAD; | |
1353 else | |
1354 type = comp->eliminate_store_p ? CT_STORE_STORE : CT_STORE_LOAD; | |
1355 | |
1356 chain = make_rooted_chain (a, type); | |
11
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
9
diff
changeset
|
1357 last_ofs = a->offset; |
0 | 1358 continue; |
1359 } | |
1360 | |
1361 add_ref_to_chain (chain, a); | |
1362 } | |
1363 | |
1364 if (nontrivial_chain_p (chain)) | |
1365 { | |
1366 add_looparound_copies (loop, chain); | |
16 | 1367 chains->safe_push (chain); |
0 | 1368 } |
1369 else | |
1370 release_chain (chain); | |
1371 } | |
1372 | |
1373 /* Find roots of the values and determine distances in components COMPS, and | |
1374 separates the references to CHAINS. LOOP is the current loop. */ | |
1375 | |
1376 static void | |
1377 determine_roots (struct loop *loop, | |
16 | 1378 struct component *comps, vec<chain_p> *chains) |
0 | 1379 { |
1380 struct component *comp; | |
1381 | |
1382 for (comp = comps; comp; comp = comp->next) | |
1383 determine_roots_comp (loop, comp, chains); | |
1384 } | |
1385 | |
1386 /* Replace the reference in statement STMT with temporary variable | |
1387 NEW_TREE. If SET is true, NEW_TREE is instead initialized to the value of | |
1388 the reference in the statement. IN_LHS is true if the reference | |
1389 is in the lhs of STMT, false if it is in rhs. */ | |
1390 | |
1391 static void | |
16 | 1392 replace_ref_with (gimple *stmt, tree new_tree, bool set, bool in_lhs) |
0 | 1393 { |
1394 tree val; | |
16 | 1395 gassign *new_stmt; |
0 | 1396 gimple_stmt_iterator bsi, psi; |
1397 | |
1398 if (gimple_code (stmt) == GIMPLE_PHI) | |
1399 { | |
1400 gcc_assert (!in_lhs && !set); | |
1401 | |
1402 val = PHI_RESULT (stmt); | |
1403 bsi = gsi_after_labels (gimple_bb (stmt)); | |
1404 psi = gsi_for_stmt (stmt); | |
1405 remove_phi_node (&psi, false); | |
1406 | |
1407 /* Turn the phi node into GIMPLE_ASSIGN. */ | |
1408 new_stmt = gimple_build_assign (val, new_tree); | |
1409 gsi_insert_before (&bsi, new_stmt, GSI_NEW_STMT); | |
1410 return; | |
1411 } | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1412 |
0 | 1413 /* Since the reference is of gimple_reg type, it should only |
1414 appear as lhs or rhs of modify statement. */ | |
1415 gcc_assert (is_gimple_assign (stmt)); | |
1416 | |
1417 bsi = gsi_for_stmt (stmt); | |
1418 | |
1419 /* If we do not need to initialize NEW_TREE, just replace the use of OLD. */ | |
1420 if (!set) | |
1421 { | |
1422 gcc_assert (!in_lhs); | |
1423 gimple_assign_set_rhs_from_tree (&bsi, new_tree); | |
1424 stmt = gsi_stmt (bsi); | |
1425 update_stmt (stmt); | |
1426 return; | |
1427 } | |
1428 | |
1429 if (in_lhs) | |
1430 { | |
1431 /* We have statement | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1432 |
0 | 1433 OLD = VAL |
1434 | |
1435 If OLD is a memory reference, then VAL is gimple_val, and we transform | |
1436 this to | |
1437 | |
1438 OLD = VAL | |
1439 NEW = VAL | |
1440 | |
9
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1441 Otherwise, we are replacing a combination chain, |
0 | 1442 VAL is the expression that performs the combination, and OLD is an |
1443 SSA name. In this case, we transform the assignment to | |
1444 | |
1445 OLD = VAL | |
1446 NEW = OLD | |
1447 | |
1448 */ | |
1449 | |
1450 val = gimple_assign_lhs (stmt); | |
1451 if (TREE_CODE (val) != SSA_NAME) | |
1452 { | |
1453 val = gimple_assign_rhs1 (stmt); | |
16 | 1454 gcc_assert (gimple_assign_single_p (stmt)); |
1455 if (TREE_CLOBBER_P (val)) | |
1456 val = get_or_create_ssa_default_def (cfun, SSA_NAME_VAR (new_tree)); | |
1457 else | |
1458 gcc_assert (gimple_assign_copy_p (stmt)); | |
0 | 1459 } |
1460 } | |
1461 else | |
1462 { | |
1463 /* VAL = OLD | |
1464 | |
1465 is transformed to | |
1466 | |
1467 VAL = OLD | |
1468 NEW = VAL */ | |
1469 | |
1470 val = gimple_assign_lhs (stmt); | |
1471 } | |
1472 | |
1473 new_stmt = gimple_build_assign (new_tree, unshare_expr (val)); | |
1474 gsi_insert_after (&bsi, new_stmt, GSI_NEW_STMT); | |
1475 } | |
1476 | |
16 | 1477 /* Returns a memory reference to DR in the (NITERS + ITER)-th iteration |
1478 of the loop it was analyzed in. Append init stmts to STMTS. */ | |
0 | 1479 |
1480 static tree | |
16 | 1481 ref_at_iteration (data_reference_p dr, int iter, |
1482 gimple_seq *stmts, tree niters = NULL_TREE) | |
0 | 1483 { |
16 | 1484 tree off = DR_OFFSET (dr); |
1485 tree coff = DR_INIT (dr); | |
1486 tree ref = DR_REF (dr); | |
1487 enum tree_code ref_code = ERROR_MARK; | |
1488 tree ref_type = NULL_TREE; | |
1489 tree ref_op1 = NULL_TREE; | |
1490 tree ref_op2 = NULL_TREE; | |
1491 tree new_offset; | |
1492 | |
1493 if (iter != 0) | |
0 | 1494 { |
16 | 1495 new_offset = size_binop (MULT_EXPR, DR_STEP (dr), ssize_int (iter)); |
1496 if (TREE_CODE (new_offset) == INTEGER_CST) | |
1497 coff = size_binop (PLUS_EXPR, coff, new_offset); | |
1498 else | |
1499 off = size_binop (PLUS_EXPR, off, new_offset); | |
0 | 1500 } |
16 | 1501 |
1502 if (niters != NULL_TREE) | |
0 | 1503 { |
16 | 1504 niters = fold_convert (ssizetype, niters); |
1505 new_offset = size_binop (MULT_EXPR, DR_STEP (dr), niters); | |
1506 if (TREE_CODE (niters) == INTEGER_CST) | |
1507 coff = size_binop (PLUS_EXPR, coff, new_offset); | |
1508 else | |
1509 off = size_binop (PLUS_EXPR, off, new_offset); | |
0 | 1510 } |
16 | 1511 |
1512 /* While data-ref analysis punts on bit offsets it still handles | |
1513 bitfield accesses at byte boundaries. Cope with that. Note that | |
1514 if the bitfield object also starts at a byte-boundary we can simply | |
1515 replicate the COMPONENT_REF, but we have to subtract the component's | |
1516 byte-offset from the MEM_REF address first. | |
1517 Otherwise we simply build a BIT_FIELD_REF knowing that the bits | |
1518 start at offset zero. */ | |
1519 if (TREE_CODE (ref) == COMPONENT_REF | |
1520 && DECL_BIT_FIELD (TREE_OPERAND (ref, 1))) | |
0 | 1521 { |
16 | 1522 unsigned HOST_WIDE_INT boff; |
1523 tree field = TREE_OPERAND (ref, 1); | |
1524 tree offset = component_ref_field_offset (ref); | |
1525 ref_type = TREE_TYPE (ref); | |
1526 boff = tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)); | |
1527 /* This can occur in Ada. See the comment in get_bit_range. */ | |
1528 if (boff % BITS_PER_UNIT != 0 | |
1529 || !tree_fits_uhwi_p (offset)) | |
0 | 1530 { |
16 | 1531 ref_code = BIT_FIELD_REF; |
1532 ref_op1 = DECL_SIZE (field); | |
1533 ref_op2 = bitsize_zero_node; | |
0 | 1534 } |
1535 else | |
1536 { | |
16 | 1537 boff >>= LOG2_BITS_PER_UNIT; |