Mercurial > hg > CbC > CbC_gcc
comparison gcc/tree-ssa-loop-prefetch.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* Array prefetching. | |
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc. | |
3 | |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it | |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 3, or (at your option) any | |
9 later version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT | |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
23 #include "tm.h" | |
24 #include "tree.h" | |
25 #include "rtl.h" | |
26 #include "tm_p.h" | |
27 #include "hard-reg-set.h" | |
28 #include "basic-block.h" | |
29 #include "output.h" | |
30 #include "diagnostic.h" | |
31 #include "tree-flow.h" | |
32 #include "tree-dump.h" | |
33 #include "timevar.h" | |
34 #include "cfgloop.h" | |
35 #include "varray.h" | |
36 #include "expr.h" | |
37 #include "tree-pass.h" | |
38 #include "ggc.h" | |
39 #include "insn-config.h" | |
40 #include "recog.h" | |
41 #include "hashtab.h" | |
42 #include "tree-chrec.h" | |
43 #include "tree-scalar-evolution.h" | |
44 #include "toplev.h" | |
45 #include "params.h" | |
46 #include "langhooks.h" | |
47 #include "tree-inline.h" | |
48 #include "tree-data-ref.h" | |
49 #include "optabs.h" | |
50 | |
51 /* This pass inserts prefetch instructions to optimize cache usage during | |
52 accesses to arrays in loops. It processes loops sequentially and: | |
53 | |
54 1) Gathers all memory references in the single loop. | |
55 2) For each of the references it decides when it is profitable to prefetch | |
56 it. To do it, we evaluate the reuse among the accesses, and determines | |
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do | |
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and | |
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the | |
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example | |
61 (assuming cache line size is 64 bytes, char has size 1 byte and there | |
62 is no hardware sequential prefetch): | |
63 | |
64 char *a; | |
65 for (i = 0; i < max; i++) | |
66 { | |
67 a[255] = ...; (0) | |
68 a[i] = ...; (1) | |
69 a[i + 64] = ...; (2) | |
70 a[16*i] = ...; (3) | |
71 a[187*i] = ...; (4) | |
72 a[187*i + 50] = ...; (5) | |
73 } | |
74 | |
75 (0) obviously has PREFETCH_BEFORE 1 | |
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory | |
77 location 64 iterations before it, and PREFETCH_MOD 64 (since | |
78 it hits the same cache line otherwise). | |
79 (2) has PREFETCH_MOD 64 | |
80 (3) has PREFETCH_MOD 4 | |
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since | |
82 the cache line accessed by (4) is the same with probability only | |
83 7/32. | |
84 (5) has PREFETCH_MOD 1 as well. | |
85 | |
86 Additionally, we use data dependence analysis to determine for each | |
87 reference the distance till the first reuse; this information is used | |
88 to determine the temporality of the issued prefetch instruction. | |
89 | |
90 3) We determine how much ahead we need to prefetch. The number of | |
91 iterations needed is time to fetch / time spent in one iteration of | |
92 the loop. The problem is that we do not know either of these values, | |
93 so we just make a heuristic guess based on a magic (possibly) | |
94 target-specific constant and size of the loop. | |
95 | |
96 4) Determine which of the references we prefetch. We take into account | |
97 that there is a maximum number of simultaneous prefetches (provided | |
98 by machine description). We prefetch as many prefetches as possible | |
99 while still within this bound (starting with those with lowest | |
100 prefetch_mod, since they are responsible for most of the cache | |
101 misses). | |
102 | |
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD | |
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid | |
105 prefetching nonaccessed memory. | |
106 TODO -- actually implement peeling. | |
107 | |
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the | |
109 prefetch instructions with guards in cases where 5) was not sufficient | |
110 to satisfy the constraints? | |
111 | |
112 Some other TODO: | |
113 -- write and use more general reuse analysis (that could be also used | |
114 in other cache aimed loop optimizations) | |
115 -- make it behave sanely together with the prefetches given by user | |
116 (now we just ignore them; at the very least we should avoid | |
117 optimizing loops in that user put his own prefetches) | |
118 -- we assume cache line size alignment of arrays; this could be | |
119 improved. */ | |
120 | |
121 /* Magic constants follow. These should be replaced by machine specific | |
122 numbers. */ | |
123 | |
124 /* True if write can be prefetched by a read prefetch. */ | |
125 | |
126 #ifndef WRITE_CAN_USE_READ_PREFETCH | |
127 #define WRITE_CAN_USE_READ_PREFETCH 1 | |
128 #endif | |
129 | |
130 /* True if read can be prefetched by a write prefetch. */ | |
131 | |
132 #ifndef READ_CAN_USE_WRITE_PREFETCH | |
133 #define READ_CAN_USE_WRITE_PREFETCH 0 | |
134 #endif | |
135 | |
136 /* The size of the block loaded by a single prefetch. Usually, this is | |
137 the same as cache line size (at the moment, we only consider one level | |
138 of cache hierarchy). */ | |
139 | |
140 #ifndef PREFETCH_BLOCK | |
141 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE | |
142 #endif | |
143 | |
144 /* Do we have a forward hardware sequential prefetching? */ | |
145 | |
146 #ifndef HAVE_FORWARD_PREFETCH | |
147 #define HAVE_FORWARD_PREFETCH 0 | |
148 #endif | |
149 | |
150 /* Do we have a backward hardware sequential prefetching? */ | |
151 | |
152 #ifndef HAVE_BACKWARD_PREFETCH | |
153 #define HAVE_BACKWARD_PREFETCH 0 | |
154 #endif | |
155 | |
156 /* In some cases we are only able to determine that there is a certain | |
157 probability that the two accesses hit the same cache line. In this | |
158 case, we issue the prefetches for both of them if this probability | |
159 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */ | |
160 | |
161 #ifndef ACCEPTABLE_MISS_RATE | |
162 #define ACCEPTABLE_MISS_RATE 50 | |
163 #endif | |
164 | |
165 #ifndef HAVE_prefetch | |
166 #define HAVE_prefetch 0 | |
167 #endif | |
168 | |
169 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024)) | |
170 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024)) | |
171 | |
172 /* We consider a memory access nontemporal if it is not reused sooner than | |
173 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore | |
174 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
175 so that we use nontemporal prefetches e.g. if single memory location | |
176 is accessed several times in a single iteration of the loop. */ | |
177 #define NONTEMPORAL_FRACTION 16 | |
178 | |
179 /* In case we have to emit a memory fence instruction after the loop that | |
180 uses nontemporal stores, this defines the builtin to use. */ | |
181 | |
182 #ifndef FENCE_FOLLOWING_MOVNT | |
183 #define FENCE_FOLLOWING_MOVNT NULL_TREE | |
184 #endif | |
185 | |
186 /* The group of references between that reuse may occur. */ | |
187 | |
188 struct mem_ref_group | |
189 { | |
190 tree base; /* Base of the reference. */ | |
191 HOST_WIDE_INT step; /* Step of the reference. */ | |
192 struct mem_ref *refs; /* References in the group. */ | |
193 struct mem_ref_group *next; /* Next group of references. */ | |
194 }; | |
195 | |
196 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ | |
197 | |
198 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) | |
199 | |
200 /* The memory reference. */ | |
201 | |
202 struct mem_ref | |
203 { | |
204 gimple stmt; /* Statement in that the reference appears. */ | |
205 tree mem; /* The reference. */ | |
206 HOST_WIDE_INT delta; /* Constant offset of the reference. */ | |
207 struct mem_ref_group *group; /* The group of references it belongs to. */ | |
208 unsigned HOST_WIDE_INT prefetch_mod; | |
209 /* Prefetch only each PREFETCH_MOD-th | |
210 iteration. */ | |
211 unsigned HOST_WIDE_INT prefetch_before; | |
212 /* Prefetch only first PREFETCH_BEFORE | |
213 iterations. */ | |
214 unsigned reuse_distance; /* The amount of data accessed before the first | |
215 reuse of this value. */ | |
216 struct mem_ref *next; /* The next reference in the group. */ | |
217 unsigned write_p : 1; /* Is it a write? */ | |
218 unsigned independent_p : 1; /* True if the reference is independent on | |
219 all other references inside the loop. */ | |
220 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */ | |
221 unsigned storent_p : 1; /* True if we changed the store to a | |
222 nontemporal one. */ | |
223 }; | |
224 | |
225 /* Dumps information about reference REF to FILE. */ | |
226 | |
227 static void | |
228 dump_mem_ref (FILE *file, struct mem_ref *ref) | |
229 { | |
230 fprintf (file, "Reference %p:\n", (void *) ref); | |
231 | |
232 fprintf (file, " group %p (base ", (void *) ref->group); | |
233 print_generic_expr (file, ref->group->base, TDF_SLIM); | |
234 fprintf (file, ", step "); | |
235 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step); | |
236 fprintf (file, ")\n"); | |
237 | |
238 fprintf (file, " delta "); | |
239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta); | |
240 fprintf (file, "\n"); | |
241 | |
242 fprintf (file, " %s\n", ref->write_p ? "write" : "read"); | |
243 | |
244 fprintf (file, "\n"); | |
245 } | |
246 | |
247 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not | |
248 exist. */ | |
249 | |
250 static struct mem_ref_group * | |
251 find_or_create_group (struct mem_ref_group **groups, tree base, | |
252 HOST_WIDE_INT step) | |
253 { | |
254 struct mem_ref_group *group; | |
255 | |
256 for (; *groups; groups = &(*groups)->next) | |
257 { | |
258 if ((*groups)->step == step | |
259 && operand_equal_p ((*groups)->base, base, 0)) | |
260 return *groups; | |
261 | |
262 /* Keep the list of groups sorted by decreasing step. */ | |
263 if ((*groups)->step < step) | |
264 break; | |
265 } | |
266 | |
267 group = XNEW (struct mem_ref_group); | |
268 group->base = base; | |
269 group->step = step; | |
270 group->refs = NULL; | |
271 group->next = *groups; | |
272 *groups = group; | |
273 | |
274 return group; | |
275 } | |
276 | |
277 /* Records a memory reference MEM in GROUP with offset DELTA and write status | |
278 WRITE_P. The reference occurs in statement STMT. */ | |
279 | |
280 static void | |
281 record_ref (struct mem_ref_group *group, gimple stmt, tree mem, | |
282 HOST_WIDE_INT delta, bool write_p) | |
283 { | |
284 struct mem_ref **aref; | |
285 | |
286 /* Do not record the same address twice. */ | |
287 for (aref = &group->refs; *aref; aref = &(*aref)->next) | |
288 { | |
289 /* It does not have to be possible for write reference to reuse the read | |
290 prefetch, or vice versa. */ | |
291 if (!WRITE_CAN_USE_READ_PREFETCH | |
292 && write_p | |
293 && !(*aref)->write_p) | |
294 continue; | |
295 if (!READ_CAN_USE_WRITE_PREFETCH | |
296 && !write_p | |
297 && (*aref)->write_p) | |
298 continue; | |
299 | |
300 if ((*aref)->delta == delta) | |
301 return; | |
302 } | |
303 | |
304 (*aref) = XNEW (struct mem_ref); | |
305 (*aref)->stmt = stmt; | |
306 (*aref)->mem = mem; | |
307 (*aref)->delta = delta; | |
308 (*aref)->write_p = write_p; | |
309 (*aref)->prefetch_before = PREFETCH_ALL; | |
310 (*aref)->prefetch_mod = 1; | |
311 (*aref)->reuse_distance = 0; | |
312 (*aref)->issue_prefetch_p = false; | |
313 (*aref)->group = group; | |
314 (*aref)->next = NULL; | |
315 (*aref)->independent_p = false; | |
316 (*aref)->storent_p = false; | |
317 | |
318 if (dump_file && (dump_flags & TDF_DETAILS)) | |
319 dump_mem_ref (dump_file, *aref); | |
320 } | |
321 | |
322 /* Release memory references in GROUPS. */ | |
323 | |
324 static void | |
325 release_mem_refs (struct mem_ref_group *groups) | |
326 { | |
327 struct mem_ref_group *next_g; | |
328 struct mem_ref *ref, *next_r; | |
329 | |
330 for (; groups; groups = next_g) | |
331 { | |
332 next_g = groups->next; | |
333 for (ref = groups->refs; ref; ref = next_r) | |
334 { | |
335 next_r = ref->next; | |
336 free (ref); | |
337 } | |
338 free (groups); | |
339 } | |
340 } | |
341 | |
342 /* A structure used to pass arguments to idx_analyze_ref. */ | |
343 | |
344 struct ar_data | |
345 { | |
346 struct loop *loop; /* Loop of the reference. */ | |
347 gimple stmt; /* Statement of the reference. */ | |
348 HOST_WIDE_INT *step; /* Step of the memory reference. */ | |
349 HOST_WIDE_INT *delta; /* Offset of the memory reference. */ | |
350 }; | |
351 | |
352 /* Analyzes a single INDEX of a memory reference to obtain information | |
353 described at analyze_ref. Callback for for_each_index. */ | |
354 | |
355 static bool | |
356 idx_analyze_ref (tree base, tree *index, void *data) | |
357 { | |
358 struct ar_data *ar_data = (struct ar_data *) data; | |
359 tree ibase, step, stepsize; | |
360 HOST_WIDE_INT istep, idelta = 0, imult = 1; | |
361 affine_iv iv; | |
362 | |
363 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF | |
364 || TREE_CODE (base) == ALIGN_INDIRECT_REF) | |
365 return false; | |
366 | |
367 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt), | |
368 *index, &iv, false)) | |
369 return false; | |
370 ibase = iv.base; | |
371 step = iv.step; | |
372 | |
373 if (!cst_and_fits_in_hwi (step)) | |
374 return false; | |
375 istep = int_cst_value (step); | |
376 | |
377 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR | |
378 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) | |
379 { | |
380 idelta = int_cst_value (TREE_OPERAND (ibase, 1)); | |
381 ibase = TREE_OPERAND (ibase, 0); | |
382 } | |
383 if (cst_and_fits_in_hwi (ibase)) | |
384 { | |
385 idelta += int_cst_value (ibase); | |
386 ibase = build_int_cst (TREE_TYPE (ibase), 0); | |
387 } | |
388 | |
389 if (TREE_CODE (base) == ARRAY_REF) | |
390 { | |
391 stepsize = array_ref_element_size (base); | |
392 if (!cst_and_fits_in_hwi (stepsize)) | |
393 return false; | |
394 imult = int_cst_value (stepsize); | |
395 | |
396 istep *= imult; | |
397 idelta *= imult; | |
398 } | |
399 | |
400 *ar_data->step += istep; | |
401 *ar_data->delta += idelta; | |
402 *index = ibase; | |
403 | |
404 return true; | |
405 } | |
406 | |
407 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and | |
408 STEP are integer constants and iter is number of iterations of LOOP. The | |
409 reference occurs in statement STMT. Strips nonaddressable component | |
410 references from REF_P. */ | |
411 | |
412 static bool | |
413 analyze_ref (struct loop *loop, tree *ref_p, tree *base, | |
414 HOST_WIDE_INT *step, HOST_WIDE_INT *delta, | |
415 gimple stmt) | |
416 { | |
417 struct ar_data ar_data; | |
418 tree off; | |
419 HOST_WIDE_INT bit_offset; | |
420 tree ref = *ref_p; | |
421 | |
422 *step = 0; | |
423 *delta = 0; | |
424 | |
425 /* First strip off the component references. Ignore bitfields. */ | |
426 if (TREE_CODE (ref) == COMPONENT_REF | |
427 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))) | |
428 ref = TREE_OPERAND (ref, 0); | |
429 | |
430 *ref_p = ref; | |
431 | |
432 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) | |
433 { | |
434 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); | |
435 bit_offset = TREE_INT_CST_LOW (off); | |
436 gcc_assert (bit_offset % BITS_PER_UNIT == 0); | |
437 | |
438 *delta += bit_offset / BITS_PER_UNIT; | |
439 } | |
440 | |
441 *base = unshare_expr (ref); | |
442 ar_data.loop = loop; | |
443 ar_data.stmt = stmt; | |
444 ar_data.step = step; | |
445 ar_data.delta = delta; | |
446 return for_each_index (base, idx_analyze_ref, &ar_data); | |
447 } | |
448 | |
449 /* Record a memory reference REF to the list REFS. The reference occurs in | |
450 LOOP in statement STMT and it is write if WRITE_P. Returns true if the | |
451 reference was recorded, false otherwise. */ | |
452 | |
453 static bool | |
454 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs, | |
455 tree ref, bool write_p, gimple stmt) | |
456 { | |
457 tree base; | |
458 HOST_WIDE_INT step, delta; | |
459 struct mem_ref_group *agrp; | |
460 | |
461 if (get_base_address (ref) == NULL) | |
462 return false; | |
463 | |
464 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt)) | |
465 return false; | |
466 | |
467 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP | |
468 are integer constants. */ | |
469 agrp = find_or_create_group (refs, base, step); | |
470 record_ref (agrp, stmt, ref, delta, write_p); | |
471 | |
472 return true; | |
473 } | |
474 | |
475 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to | |
476 true if there are no other memory references inside the loop. */ | |
477 | |
478 static struct mem_ref_group * | |
479 gather_memory_references (struct loop *loop, bool *no_other_refs) | |
480 { | |
481 basic_block *body = get_loop_body_in_dom_order (loop); | |
482 basic_block bb; | |
483 unsigned i; | |
484 gimple_stmt_iterator bsi; | |
485 gimple stmt; | |
486 tree lhs, rhs; | |
487 struct mem_ref_group *refs = NULL; | |
488 | |
489 *no_other_refs = true; | |
490 | |
491 /* Scan the loop body in order, so that the former references precede the | |
492 later ones. */ | |
493 for (i = 0; i < loop->num_nodes; i++) | |
494 { | |
495 bb = body[i]; | |
496 if (bb->loop_father != loop) | |
497 continue; | |
498 | |
499 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
500 { | |
501 stmt = gsi_stmt (bsi); | |
502 | |
503 if (gimple_code (stmt) != GIMPLE_ASSIGN) | |
504 { | |
505 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS) | |
506 || (is_gimple_call (stmt) | |
507 && !(gimple_call_flags (stmt) & ECF_CONST))) | |
508 *no_other_refs = false; | |
509 continue; | |
510 } | |
511 | |
512 lhs = gimple_assign_lhs (stmt); | |
513 rhs = gimple_assign_rhs1 (stmt); | |
514 | |
515 if (REFERENCE_CLASS_P (rhs)) | |
516 *no_other_refs &= gather_memory_references_ref (loop, &refs, | |
517 rhs, false, stmt); | |
518 if (REFERENCE_CLASS_P (lhs)) | |
519 *no_other_refs &= gather_memory_references_ref (loop, &refs, | |
520 lhs, true, stmt); | |
521 } | |
522 } | |
523 free (body); | |
524 | |
525 return refs; | |
526 } | |
527 | |
528 /* Prune the prefetch candidate REF using the self-reuse. */ | |
529 | |
530 static void | |
531 prune_ref_by_self_reuse (struct mem_ref *ref) | |
532 { | |
533 HOST_WIDE_INT step = ref->group->step; | |
534 bool backward = step < 0; | |
535 | |
536 if (step == 0) | |
537 { | |
538 /* Prefetch references to invariant address just once. */ | |
539 ref->prefetch_before = 1; | |
540 return; | |
541 } | |
542 | |
543 if (backward) | |
544 step = -step; | |
545 | |
546 if (step > PREFETCH_BLOCK) | |
547 return; | |
548 | |
549 if ((backward && HAVE_BACKWARD_PREFETCH) | |
550 || (!backward && HAVE_FORWARD_PREFETCH)) | |
551 { | |
552 ref->prefetch_before = 1; | |
553 return; | |
554 } | |
555 | |
556 ref->prefetch_mod = PREFETCH_BLOCK / step; | |
557 } | |
558 | |
559 /* Divides X by BY, rounding down. */ | |
560 | |
561 static HOST_WIDE_INT | |
562 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by) | |
563 { | |
564 gcc_assert (by > 0); | |
565 | |
566 if (x >= 0) | |
567 return x / by; | |
568 else | |
569 return (x + by - 1) / by; | |
570 } | |
571 | |
572 /* Prune the prefetch candidate REF using the reuse with BY. | |
573 If BY_IS_BEFORE is true, BY is before REF in the loop. */ | |
574 | |
575 static void | |
576 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, | |
577 bool by_is_before) | |
578 { | |
579 HOST_WIDE_INT step = ref->group->step; | |
580 bool backward = step < 0; | |
581 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta; | |
582 HOST_WIDE_INT delta = delta_b - delta_r; | |
583 HOST_WIDE_INT hit_from; | |
584 unsigned HOST_WIDE_INT prefetch_before, prefetch_block; | |
585 | |
586 if (delta == 0) | |
587 { | |
588 /* If the references has the same address, only prefetch the | |
589 former. */ | |
590 if (by_is_before) | |
591 ref->prefetch_before = 0; | |
592 | |
593 return; | |
594 } | |
595 | |
596 if (!step) | |
597 { | |
598 /* If the reference addresses are invariant and fall into the | |
599 same cache line, prefetch just the first one. */ | |
600 if (!by_is_before) | |
601 return; | |
602 | |
603 if (ddown (ref->delta, PREFETCH_BLOCK) | |
604 != ddown (by->delta, PREFETCH_BLOCK)) | |
605 return; | |
606 | |
607 ref->prefetch_before = 0; | |
608 return; | |
609 } | |
610 | |
611 /* Only prune the reference that is behind in the array. */ | |
612 if (backward) | |
613 { | |
614 if (delta > 0) | |
615 return; | |
616 | |
617 /* Transform the data so that we may assume that the accesses | |
618 are forward. */ | |
619 delta = - delta; | |
620 step = -step; | |
621 delta_r = PREFETCH_BLOCK - 1 - delta_r; | |
622 delta_b = PREFETCH_BLOCK - 1 - delta_b; | |
623 } | |
624 else | |
625 { | |
626 if (delta < 0) | |
627 return; | |
628 } | |
629 | |
630 /* Check whether the two references are likely to hit the same cache | |
631 line, and how distant the iterations in that it occurs are from | |
632 each other. */ | |
633 | |
634 if (step <= PREFETCH_BLOCK) | |
635 { | |
636 /* The accesses are sure to meet. Let us check when. */ | |
637 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK; | |
638 prefetch_before = (hit_from - delta_r + step - 1) / step; | |
639 | |
640 if (prefetch_before < ref->prefetch_before) | |
641 ref->prefetch_before = prefetch_before; | |
642 | |
643 return; | |
644 } | |
645 | |
646 /* A more complicated case. First let us ensure that size of cache line | |
647 and step are coprime (here we assume that PREFETCH_BLOCK is a power | |
648 of two. */ | |
649 prefetch_block = PREFETCH_BLOCK; | |
650 while ((step & 1) == 0 | |
651 && prefetch_block > 1) | |
652 { | |
653 step >>= 1; | |
654 prefetch_block >>= 1; | |
655 delta >>= 1; | |
656 } | |
657 | |
658 /* Now step > prefetch_block, and step and prefetch_block are coprime. | |
659 Determine the probability that the accesses hit the same cache line. */ | |
660 | |
661 prefetch_before = delta / step; | |
662 delta %= step; | |
663 if ((unsigned HOST_WIDE_INT) delta | |
664 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000)) | |
665 { | |
666 if (prefetch_before < ref->prefetch_before) | |
667 ref->prefetch_before = prefetch_before; | |
668 | |
669 return; | |
670 } | |
671 | |
672 /* Try also the following iteration. */ | |
673 prefetch_before++; | |
674 delta = step - delta; | |
675 if ((unsigned HOST_WIDE_INT) delta | |
676 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000)) | |
677 { | |
678 if (prefetch_before < ref->prefetch_before) | |
679 ref->prefetch_before = prefetch_before; | |
680 | |
681 return; | |
682 } | |
683 | |
684 /* The ref probably does not reuse by. */ | |
685 return; | |
686 } | |
687 | |
688 /* Prune the prefetch candidate REF using the reuses with other references | |
689 in REFS. */ | |
690 | |
691 static void | |
692 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs) | |
693 { | |
694 struct mem_ref *prune_by; | |
695 bool before = true; | |
696 | |
697 prune_ref_by_self_reuse (ref); | |
698 | |
699 for (prune_by = refs; prune_by; prune_by = prune_by->next) | |
700 { | |
701 if (prune_by == ref) | |
702 { | |
703 before = false; | |
704 continue; | |
705 } | |
706 | |
707 if (!WRITE_CAN_USE_READ_PREFETCH | |
708 && ref->write_p | |
709 && !prune_by->write_p) | |
710 continue; | |
711 if (!READ_CAN_USE_WRITE_PREFETCH | |
712 && !ref->write_p | |
713 && prune_by->write_p) | |
714 continue; | |
715 | |
716 prune_ref_by_group_reuse (ref, prune_by, before); | |
717 } | |
718 } | |
719 | |
720 /* Prune the prefetch candidates in GROUP using the reuse analysis. */ | |
721 | |
722 static void | |
723 prune_group_by_reuse (struct mem_ref_group *group) | |
724 { | |
725 struct mem_ref *ref_pruned; | |
726 | |
727 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next) | |
728 { | |
729 prune_ref_by_reuse (ref_pruned, group->refs); | |
730 | |
731 if (dump_file && (dump_flags & TDF_DETAILS)) | |
732 { | |
733 fprintf (dump_file, "Reference %p:", (void *) ref_pruned); | |
734 | |
735 if (ref_pruned->prefetch_before == PREFETCH_ALL | |
736 && ref_pruned->prefetch_mod == 1) | |
737 fprintf (dump_file, " no restrictions"); | |
738 else if (ref_pruned->prefetch_before == 0) | |
739 fprintf (dump_file, " do not prefetch"); | |
740 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod) | |
741 fprintf (dump_file, " prefetch once"); | |
742 else | |
743 { | |
744 if (ref_pruned->prefetch_before != PREFETCH_ALL) | |
745 { | |
746 fprintf (dump_file, " prefetch before "); | |
747 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
748 ref_pruned->prefetch_before); | |
749 } | |
750 if (ref_pruned->prefetch_mod != 1) | |
751 { | |
752 fprintf (dump_file, " prefetch mod "); | |
753 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
754 ref_pruned->prefetch_mod); | |
755 } | |
756 } | |
757 fprintf (dump_file, "\n"); | |
758 } | |
759 } | |
760 } | |
761 | |
762 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */ | |
763 | |
764 static void | |
765 prune_by_reuse (struct mem_ref_group *groups) | |
766 { | |
767 for (; groups; groups = groups->next) | |
768 prune_group_by_reuse (groups); | |
769 } | |
770 | |
771 /* Returns true if we should issue prefetch for REF. */ | |
772 | |
773 static bool | |
774 should_issue_prefetch_p (struct mem_ref *ref) | |
775 { | |
776 /* For now do not issue prefetches for only first few of the | |
777 iterations. */ | |
778 if (ref->prefetch_before != PREFETCH_ALL) | |
779 return false; | |
780 | |
781 /* Do not prefetch nontemporal stores. */ | |
782 if (ref->storent_p) | |
783 return false; | |
784 | |
785 return true; | |
786 } | |
787 | |
788 /* Decide which of the prefetch candidates in GROUPS to prefetch. | |
789 AHEAD is the number of iterations to prefetch ahead (which corresponds | |
790 to the number of simultaneous instances of one prefetch running at a | |
791 time). UNROLL_FACTOR is the factor by that the loop is going to be | |
792 unrolled. Returns true if there is anything to prefetch. */ | |
793 | |
794 static bool | |
795 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, | |
796 unsigned ahead) | |
797 { | |
798 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots; | |
799 unsigned slots_per_prefetch; | |
800 struct mem_ref *ref; | |
801 bool any = false; | |
802 | |
803 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */ | |
804 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES; | |
805 | |
806 /* The prefetch will run for AHEAD iterations of the original loop, i.e., | |
807 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration, | |
808 it will need a prefetch slot. */ | |
809 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor; | |
810 if (dump_file && (dump_flags & TDF_DETAILS)) | |
811 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n", | |
812 slots_per_prefetch); | |
813 | |
814 /* For now we just take memory references one by one and issue | |
815 prefetches for as many as possible. The groups are sorted | |
816 starting with the largest step, since the references with | |
817 large step are more likely to cause many cache misses. */ | |
818 | |
819 for (; groups; groups = groups->next) | |
820 for (ref = groups->refs; ref; ref = ref->next) | |
821 { | |
822 if (!should_issue_prefetch_p (ref)) | |
823 continue; | |
824 | |
825 /* If we need to prefetch the reference each PREFETCH_MOD iterations, | |
826 and we unroll the loop UNROLL_FACTOR times, we need to insert | |
827 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each | |
828 iteration. */ | |
829 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
830 / ref->prefetch_mod); | |
831 prefetch_slots = n_prefetches * slots_per_prefetch; | |
832 | |
833 /* If more than half of the prefetches would be lost anyway, do not | |
834 issue the prefetch. */ | |
835 if (2 * remaining_prefetch_slots < prefetch_slots) | |
836 continue; | |
837 | |
838 ref->issue_prefetch_p = true; | |
839 | |
840 if (remaining_prefetch_slots <= prefetch_slots) | |
841 return true; | |
842 remaining_prefetch_slots -= prefetch_slots; | |
843 any = true; | |
844 } | |
845 | |
846 return any; | |
847 } | |
848 | |
849 /* Determine whether there is any reference suitable for prefetching | |
850 in GROUPS. */ | |
851 | |
852 static bool | |
853 anything_to_prefetch_p (struct mem_ref_group *groups) | |
854 { | |
855 struct mem_ref *ref; | |
856 | |
857 for (; groups; groups = groups->next) | |
858 for (ref = groups->refs; ref; ref = ref->next) | |
859 if (should_issue_prefetch_p (ref)) | |
860 return true; | |
861 | |
862 return false; | |
863 } | |
864 | |
865 /* Issue prefetches for the reference REF into loop as decided before. | |
866 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR | |
867 is the factor by which LOOP was unrolled. */ | |
868 | |
869 static void | |
870 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) | |
871 { | |
872 HOST_WIDE_INT delta; | |
873 tree addr, addr_base, write_p, local; | |
874 gimple prefetch; | |
875 gimple_stmt_iterator bsi; | |
876 unsigned n_prefetches, ap; | |
877 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES; | |
878 | |
879 if (dump_file && (dump_flags & TDF_DETAILS)) | |
880 fprintf (dump_file, "Issued%s prefetch for %p.\n", | |
881 nontemporal ? " nontemporal" : "", | |
882 (void *) ref); | |
883 | |
884 bsi = gsi_for_stmt (ref->stmt); | |
885 | |
886 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
887 / ref->prefetch_mod); | |
888 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node); | |
889 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base), | |
890 true, NULL, true, GSI_SAME_STMT); | |
891 write_p = ref->write_p ? integer_one_node : integer_zero_node; | |
892 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3); | |
893 | |
894 for (ap = 0; ap < n_prefetches; ap++) | |
895 { | |
896 /* Determine the address to prefetch. */ | |
897 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step; | |
898 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, | |
899 addr_base, size_int (delta)); | |
900 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL, | |
901 true, GSI_SAME_STMT); | |
902 | |
903 /* Create the prefetch instruction. */ | |
904 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH], | |
905 3, addr, write_p, local); | |
906 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); | |
907 } | |
908 } | |
909 | |
910 /* Issue prefetches for the references in GROUPS into loop as decided before. | |
911 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the | |
912 factor by that LOOP was unrolled. */ | |
913 | |
914 static void | |
915 issue_prefetches (struct mem_ref_group *groups, | |
916 unsigned unroll_factor, unsigned ahead) | |
917 { | |
918 struct mem_ref *ref; | |
919 | |
920 for (; groups; groups = groups->next) | |
921 for (ref = groups->refs; ref; ref = ref->next) | |
922 if (ref->issue_prefetch_p) | |
923 issue_prefetch_ref (ref, unroll_factor, ahead); | |
924 } | |
925 | |
926 /* Returns true if REF is a memory write for that a nontemporal store insn | |
927 can be used. */ | |
928 | |
929 static bool | |
930 nontemporal_store_p (struct mem_ref *ref) | |
931 { | |
932 enum machine_mode mode; | |
933 enum insn_code code; | |
934 | |
935 /* REF must be a write that is not reused. We require it to be independent | |
936 on all other memory references in the loop, as the nontemporal stores may | |
937 be reordered with respect to other memory references. */ | |
938 if (!ref->write_p | |
939 || !ref->independent_p | |
940 || ref->reuse_distance < L2_CACHE_SIZE_BYTES) | |
941 return false; | |
942 | |
943 /* Check that we have the storent instruction for the mode. */ | |
944 mode = TYPE_MODE (TREE_TYPE (ref->mem)); | |
945 if (mode == BLKmode) | |
946 return false; | |
947 | |
948 code = optab_handler (storent_optab, mode)->insn_code; | |
949 return code != CODE_FOR_nothing; | |
950 } | |
951 | |
952 /* If REF is a nontemporal store, we mark the corresponding modify statement | |
953 and return true. Otherwise, we return false. */ | |
954 | |
955 static bool | |
956 mark_nontemporal_store (struct mem_ref *ref) | |
957 { | |
958 if (!nontemporal_store_p (ref)) | |
959 return false; | |
960 | |
961 if (dump_file && (dump_flags & TDF_DETAILS)) | |
962 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n", | |
963 (void *) ref); | |
964 | |
965 gimple_assign_set_nontemporal_move (ref->stmt, true); | |
966 ref->storent_p = true; | |
967 | |
968 return true; | |
969 } | |
970 | |
971 /* Issue a memory fence instruction after LOOP. */ | |
972 | |
973 static void | |
974 emit_mfence_after_loop (struct loop *loop) | |
975 { | |
976 VEC (edge, heap) *exits = get_loop_exit_edges (loop); | |
977 edge exit; | |
978 gimple call; | |
979 gimple_stmt_iterator bsi; | |
980 unsigned i; | |
981 | |
982 for (i = 0; VEC_iterate (edge, exits, i, exit); i++) | |
983 { | |
984 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0); | |
985 | |
986 if (!single_pred_p (exit->dest) | |
987 /* If possible, we prefer not to insert the fence on other paths | |
988 in cfg. */ | |
989 && !(exit->flags & EDGE_ABNORMAL)) | |
990 split_loop_exit_edge (exit); | |
991 bsi = gsi_after_labels (exit->dest); | |
992 | |
993 gsi_insert_before (&bsi, call, GSI_NEW_STMT); | |
994 mark_virtual_ops_for_renaming (call); | |
995 } | |
996 | |
997 VEC_free (edge, heap, exits); | |
998 update_ssa (TODO_update_ssa_only_virtuals); | |
999 } | |
1000 | |
1001 /* Returns true if we can use storent in loop, false otherwise. */ | |
1002 | |
1003 static bool | |
1004 may_use_storent_in_loop_p (struct loop *loop) | |
1005 { | |
1006 bool ret = true; | |
1007 | |
1008 if (loop->inner != NULL) | |
1009 return false; | |
1010 | |
1011 /* If we must issue a mfence insn after using storent, check that there | |
1012 is a suitable place for it at each of the loop exits. */ | |
1013 if (FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1014 { | |
1015 VEC (edge, heap) *exits = get_loop_exit_edges (loop); | |
1016 unsigned i; | |
1017 edge exit; | |
1018 | |
1019 for (i = 0; VEC_iterate (edge, exits, i, exit); i++) | |
1020 if ((exit->flags & EDGE_ABNORMAL) | |
1021 && exit->dest == EXIT_BLOCK_PTR) | |
1022 ret = false; | |
1023 | |
1024 VEC_free (edge, heap, exits); | |
1025 } | |
1026 | |
1027 return ret; | |
1028 } | |
1029 | |
1030 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory | |
1031 references in the loop. */ | |
1032 | |
1033 static void | |
1034 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups) | |
1035 { | |
1036 struct mem_ref *ref; | |
1037 bool any = false; | |
1038 | |
1039 if (!may_use_storent_in_loop_p (loop)) | |
1040 return; | |
1041 | |
1042 for (; groups; groups = groups->next) | |
1043 for (ref = groups->refs; ref; ref = ref->next) | |
1044 any |= mark_nontemporal_store (ref); | |
1045 | |
1046 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1047 emit_mfence_after_loop (loop); | |
1048 } | |
1049 | |
1050 /* Determines whether we can profitably unroll LOOP FACTOR times, and if | |
1051 this is the case, fill in DESC by the description of number of | |
1052 iterations. */ | |
1053 | |
1054 static bool | |
1055 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc, | |
1056 unsigned factor) | |
1057 { | |
1058 if (!can_unroll_loop_p (loop, factor, desc)) | |
1059 return false; | |
1060 | |
1061 /* We only consider loops without control flow for unrolling. This is not | |
1062 a hard restriction -- tree_unroll_loop works with arbitrary loops | |
1063 as well; but the unrolling/prefetching is usually more profitable for | |
1064 loops consisting of a single basic block, and we want to limit the | |
1065 code growth. */ | |
1066 if (loop->num_nodes > 2) | |
1067 return false; | |
1068 | |
1069 return true; | |
1070 } | |
1071 | |
1072 /* Determine the coefficient by that unroll LOOP, from the information | |
1073 contained in the list of memory references REFS. Description of | |
1074 umber of iterations of LOOP is stored to DESC. NINSNS is the number of | |
1075 insns of the LOOP. EST_NITER is the estimated number of iterations of | |
1076 the loop, or -1 if no estimate is available. */ | |
1077 | |
1078 static unsigned | |
1079 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs, | |
1080 unsigned ninsns, struct tree_niter_desc *desc, | |
1081 HOST_WIDE_INT est_niter) | |
1082 { | |
1083 unsigned upper_bound; | |
1084 unsigned nfactor, factor, mod_constraint; | |
1085 struct mem_ref_group *agp; | |
1086 struct mem_ref *ref; | |
1087 | |
1088 /* First check whether the loop is not too large to unroll. We ignore | |
1089 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us | |
1090 from unrolling them enough to make exactly one cache line covered by each | |
1091 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent | |
1092 us from unrolling the loops too many times in cases where we only expect | |
1093 gains from better scheduling and decreasing loop overhead, which is not | |
1094 the case here. */ | |
1095 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns; | |
1096 | |
1097 /* If we unrolled the loop more times than it iterates, the unrolled version | |
1098 of the loop would be never entered. */ | |
1099 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound) | |
1100 upper_bound = est_niter; | |
1101 | |
1102 if (upper_bound <= 1) | |
1103 return 1; | |
1104 | |
1105 /* Choose the factor so that we may prefetch each cache just once, | |
1106 but bound the unrolling by UPPER_BOUND. */ | |
1107 factor = 1; | |
1108 for (agp = refs; agp; agp = agp->next) | |
1109 for (ref = agp->refs; ref; ref = ref->next) | |
1110 if (should_issue_prefetch_p (ref)) | |
1111 { | |
1112 mod_constraint = ref->prefetch_mod; | |
1113 nfactor = least_common_multiple (mod_constraint, factor); | |
1114 if (nfactor <= upper_bound) | |
1115 factor = nfactor; | |
1116 } | |
1117 | |
1118 if (!should_unroll_loop_p (loop, desc, factor)) | |
1119 return 1; | |
1120 | |
1121 return factor; | |
1122 } | |
1123 | |
1124 /* Returns the total volume of the memory references REFS, taking into account | |
1125 reuses in the innermost loop and cache line size. TODO -- we should also | |
1126 take into account reuses across the iterations of the loops in the loop | |
1127 nest. */ | |
1128 | |
1129 static unsigned | |
1130 volume_of_references (struct mem_ref_group *refs) | |
1131 { | |
1132 unsigned volume = 0; | |
1133 struct mem_ref_group *gr; | |
1134 struct mem_ref *ref; | |
1135 | |
1136 for (gr = refs; gr; gr = gr->next) | |
1137 for (ref = gr->refs; ref; ref = ref->next) | |
1138 { | |
1139 /* Almost always reuses another value? */ | |
1140 if (ref->prefetch_before != PREFETCH_ALL) | |
1141 continue; | |
1142 | |
1143 /* If several iterations access the same cache line, use the size of | |
1144 the line divided by this number. Otherwise, a cache line is | |
1145 accessed in each iteration. TODO -- in the latter case, we should | |
1146 take the size of the reference into account, rounding it up on cache | |
1147 line size multiple. */ | |
1148 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod; | |
1149 } | |
1150 return volume; | |
1151 } | |
1152 | |
1153 /* Returns the volume of memory references accessed across VEC iterations of | |
1154 loops, whose sizes are described in the LOOP_SIZES array. N is the number | |
1155 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */ | |
1156 | |
1157 static unsigned | |
1158 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n) | |
1159 { | |
1160 unsigned i; | |
1161 | |
1162 for (i = 0; i < n; i++) | |
1163 if (vec[i] != 0) | |
1164 break; | |
1165 | |
1166 if (i == n) | |
1167 return 0; | |
1168 | |
1169 gcc_assert (vec[i] > 0); | |
1170 | |
1171 /* We ignore the parts of the distance vector in subloops, since usually | |
1172 the numbers of iterations are much smaller. */ | |
1173 return loop_sizes[i] * vec[i]; | |
1174 } | |
1175 | |
1176 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE | |
1177 at the position corresponding to the loop of the step. N is the depth | |
1178 of the considered loop nest, and, LOOP is its innermost loop. */ | |
1179 | |
1180 static void | |
1181 add_subscript_strides (tree access_fn, unsigned stride, | |
1182 HOST_WIDE_INT *strides, unsigned n, struct loop *loop) | |
1183 { | |
1184 struct loop *aloop; | |
1185 tree step; | |
1186 HOST_WIDE_INT astep; | |
1187 unsigned min_depth = loop_depth (loop) - n; | |
1188 | |
1189 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC) | |
1190 { | |
1191 aloop = get_chrec_loop (access_fn); | |
1192 step = CHREC_RIGHT (access_fn); | |
1193 access_fn = CHREC_LEFT (access_fn); | |
1194 | |
1195 if ((unsigned) loop_depth (aloop) <= min_depth) | |
1196 continue; | |
1197 | |
1198 if (host_integerp (step, 0)) | |
1199 astep = tree_low_cst (step, 0); | |
1200 else | |
1201 astep = L1_CACHE_LINE_SIZE; | |
1202 | |
1203 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride; | |
1204 | |
1205 } | |
1206 } | |
1207 | |
1208 /* Returns the volume of memory references accessed between two consecutive | |
1209 self-reuses of the reference DR. We consider the subscripts of DR in N | |
1210 loops, and LOOP_SIZES contains the volumes of accesses in each of the | |
1211 loops. LOOP is the innermost loop of the current loop nest. */ | |
1212 | |
1213 static unsigned | |
1214 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n, | |
1215 struct loop *loop) | |
1216 { | |
1217 tree stride, access_fn; | |
1218 HOST_WIDE_INT *strides, astride; | |
1219 VEC (tree, heap) *access_fns; | |
1220 tree ref = DR_REF (dr); | |
1221 unsigned i, ret = ~0u; | |
1222 | |
1223 /* In the following example: | |
1224 | |
1225 for (i = 0; i < N; i++) | |
1226 for (j = 0; j < N; j++) | |
1227 use (a[j][i]); | |
1228 the same cache line is accessed each N steps (except if the change from | |
1229 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse, | |
1230 we cannot rely purely on the results of the data dependence analysis. | |
1231 | |
1232 Instead, we compute the stride of the reference in each loop, and consider | |
1233 the innermost loop in that the stride is less than cache size. */ | |
1234 | |
1235 strides = XCNEWVEC (HOST_WIDE_INT, n); | |
1236 access_fns = DR_ACCESS_FNS (dr); | |
1237 | |
1238 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++) | |
1239 { | |
1240 /* Keep track of the reference corresponding to the subscript, so that we | |
1241 know its stride. */ | |
1242 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF) | |
1243 ref = TREE_OPERAND (ref, 0); | |
1244 | |
1245 if (TREE_CODE (ref) == ARRAY_REF) | |
1246 { | |
1247 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref)); | |
1248 if (host_integerp (stride, 1)) | |
1249 astride = tree_low_cst (stride, 1); | |
1250 else | |
1251 astride = L1_CACHE_LINE_SIZE; | |
1252 | |
1253 ref = TREE_OPERAND (ref, 0); | |
1254 } | |
1255 else | |
1256 astride = 1; | |
1257 | |
1258 add_subscript_strides (access_fn, astride, strides, n, loop); | |
1259 } | |
1260 | |
1261 for (i = n; i-- > 0; ) | |
1262 { | |
1263 unsigned HOST_WIDE_INT s; | |
1264 | |
1265 s = strides[i] < 0 ? -strides[i] : strides[i]; | |
1266 | |
1267 if (s < (unsigned) L1_CACHE_LINE_SIZE | |
1268 && (loop_sizes[i] | |
1269 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION))) | |
1270 { | |
1271 ret = loop_sizes[i]; | |
1272 break; | |
1273 } | |
1274 } | |
1275 | |
1276 free (strides); | |
1277 return ret; | |
1278 } | |
1279 | |
1280 /* Determines the distance till the first reuse of each reference in REFS | |
1281 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other | |
1282 memory references in the loop. */ | |
1283 | |
1284 static void | |
1285 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, | |
1286 bool no_other_refs) | |
1287 { | |
1288 struct loop *nest, *aloop; | |
1289 VEC (data_reference_p, heap) *datarefs = NULL; | |
1290 VEC (ddr_p, heap) *dependences = NULL; | |
1291 struct mem_ref_group *gr; | |
1292 struct mem_ref *ref, *refb; | |
1293 VEC (loop_p, heap) *vloops = NULL; | |
1294 unsigned *loop_data_size; | |
1295 unsigned i, j, n; | |
1296 unsigned volume, dist, adist; | |
1297 HOST_WIDE_INT vol; | |
1298 data_reference_p dr; | |
1299 ddr_p dep; | |
1300 | |
1301 if (loop->inner) | |
1302 return; | |
1303 | |
1304 /* Find the outermost loop of the loop nest of loop (we require that | |
1305 there are no sibling loops inside the nest). */ | |
1306 nest = loop; | |
1307 while (1) | |
1308 { | |
1309 aloop = loop_outer (nest); | |
1310 | |
1311 if (aloop == current_loops->tree_root | |
1312 || aloop->inner->next) | |
1313 break; | |
1314 | |
1315 nest = aloop; | |
1316 } | |
1317 | |
1318 /* For each loop, determine the amount of data accessed in each iteration. | |
1319 We use this to estimate whether the reference is evicted from the | |
1320 cache before its reuse. */ | |
1321 find_loop_nest (nest, &vloops); | |
1322 n = VEC_length (loop_p, vloops); | |
1323 loop_data_size = XNEWVEC (unsigned, n); | |
1324 volume = volume_of_references (refs); | |
1325 i = n; | |
1326 while (i-- != 0) | |
1327 { | |
1328 loop_data_size[i] = volume; | |
1329 /* Bound the volume by the L2 cache size, since above this bound, | |
1330 all dependence distances are equivalent. */ | |
1331 if (volume > L2_CACHE_SIZE_BYTES) | |
1332 continue; | |
1333 | |
1334 aloop = VEC_index (loop_p, vloops, i); | |
1335 vol = estimated_loop_iterations_int (aloop, false); | |
1336 if (vol < 0) | |
1337 vol = expected_loop_iterations (aloop); | |
1338 volume *= vol; | |
1339 } | |
1340 | |
1341 /* Prepare the references in the form suitable for data dependence | |
1342 analysis. We ignore unanalyzable data references (the results | |
1343 are used just as a heuristics to estimate temporality of the | |
1344 references, hence we do not need to worry about correctness). */ | |
1345 for (gr = refs; gr; gr = gr->next) | |
1346 for (ref = gr->refs; ref; ref = ref->next) | |
1347 { | |
1348 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p); | |
1349 | |
1350 if (dr) | |
1351 { | |
1352 ref->reuse_distance = volume; | |
1353 dr->aux = ref; | |
1354 VEC_safe_push (data_reference_p, heap, datarefs, dr); | |
1355 } | |
1356 else | |
1357 no_other_refs = false; | |
1358 } | |
1359 | |
1360 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) | |
1361 { | |
1362 dist = self_reuse_distance (dr, loop_data_size, n, loop); | |
1363 ref = (struct mem_ref *) dr->aux; | |
1364 if (ref->reuse_distance > dist) | |
1365 ref->reuse_distance = dist; | |
1366 | |
1367 if (no_other_refs) | |
1368 ref->independent_p = true; | |
1369 } | |
1370 | |
1371 compute_all_dependences (datarefs, &dependences, vloops, true); | |
1372 | |
1373 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++) | |
1374 { | |
1375 if (DDR_ARE_DEPENDENT (dep) == chrec_known) | |
1376 continue; | |
1377 | |
1378 ref = (struct mem_ref *) DDR_A (dep)->aux; | |
1379 refb = (struct mem_ref *) DDR_B (dep)->aux; | |
1380 | |
1381 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know | |
1382 || DDR_NUM_DIST_VECTS (dep) == 0) | |
1383 { | |
1384 /* If the dependence cannot be analyzed, assume that there might be | |
1385 a reuse. */ | |
1386 dist = 0; | |
1387 | |
1388 ref->independent_p = false; | |
1389 refb->independent_p = false; | |
1390 } | |
1391 else | |
1392 { | |
1393 /* The distance vectors are normalized to be always lexicographically | |
1394 positive, hence we cannot tell just from them whether DDR_A comes | |
1395 before DDR_B or vice versa. However, it is not important, | |
1396 anyway -- if DDR_A is close to DDR_B, then it is either reused in | |
1397 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B | |
1398 in cache (and marking it as nontemporal would not affect | |
1399 anything). */ | |
1400 | |
1401 dist = volume; | |
1402 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++) | |
1403 { | |
1404 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j), | |
1405 loop_data_size, n); | |
1406 | |
1407 /* If this is a dependence in the innermost loop (i.e., the | |
1408 distances in all superloops are zero) and it is not | |
1409 the trivial self-dependence with distance zero, record that | |
1410 the references are not completely independent. */ | |
1411 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1) | |
1412 && (ref != refb | |
1413 || DDR_DIST_VECT (dep, j)[n-1] != 0)) | |
1414 { | |
1415 ref->independent_p = false; | |
1416 refb->independent_p = false; | |
1417 } | |
1418 | |
1419 /* Ignore accesses closer than | |
1420 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
1421 so that we use nontemporal prefetches e.g. if single memory | |
1422 location is accessed several times in a single iteration of | |
1423 the loop. */ | |
1424 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION) | |
1425 continue; | |
1426 | |
1427 if (adist < dist) | |
1428 dist = adist; | |
1429 } | |
1430 } | |
1431 | |
1432 if (ref->reuse_distance > dist) | |
1433 ref->reuse_distance = dist; | |
1434 if (refb->reuse_distance > dist) | |
1435 refb->reuse_distance = dist; | |
1436 } | |
1437 | |
1438 free_dependence_relations (dependences); | |
1439 free_data_refs (datarefs); | |
1440 free (loop_data_size); | |
1441 | |
1442 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1443 { | |
1444 fprintf (dump_file, "Reuse distances:\n"); | |
1445 for (gr = refs; gr; gr = gr->next) | |
1446 for (ref = gr->refs; ref; ref = ref->next) | |
1447 fprintf (dump_file, " ref %p distance %u\n", | |
1448 (void *) ref, ref->reuse_distance); | |
1449 } | |
1450 } | |
1451 | |
1452 /* Issue prefetch instructions for array references in LOOP. Returns | |
1453 true if the LOOP was unrolled. */ | |
1454 | |
1455 static bool | |
1456 loop_prefetch_arrays (struct loop *loop) | |
1457 { | |
1458 struct mem_ref_group *refs; | |
1459 unsigned ahead, ninsns, time, unroll_factor; | |
1460 HOST_WIDE_INT est_niter; | |
1461 struct tree_niter_desc desc; | |
1462 bool unrolled = false, no_other_refs; | |
1463 | |
1464 if (optimize_loop_nest_for_size_p (loop)) | |
1465 { | |
1466 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1467 fprintf (dump_file, " ignored (cold area)\n"); | |
1468 return false; | |
1469 } | |
1470 | |
1471 /* Step 1: gather the memory references. */ | |
1472 refs = gather_memory_references (loop, &no_other_refs); | |
1473 | |
1474 /* Step 2: estimate the reuse effects. */ | |
1475 prune_by_reuse (refs); | |
1476 | |
1477 if (!anything_to_prefetch_p (refs)) | |
1478 goto fail; | |
1479 | |
1480 determine_loop_nest_reuse (loop, refs, no_other_refs); | |
1481 | |
1482 /* Step 3: determine the ahead and unroll factor. */ | |
1483 | |
1484 /* FIXME: the time should be weighted by the probabilities of the blocks in | |
1485 the loop body. */ | |
1486 time = tree_num_loop_insns (loop, &eni_time_weights); | |
1487 ahead = (PREFETCH_LATENCY + time - 1) / time; | |
1488 est_niter = estimated_loop_iterations_int (loop, false); | |
1489 | |
1490 /* The prefetches will run for AHEAD iterations of the original loop. Unless | |
1491 the loop rolls at least AHEAD times, prefetching the references does not | |
1492 make sense. */ | |
1493 if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead) | |
1494 { | |
1495 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1496 fprintf (dump_file, | |
1497 "Not prefetching -- loop estimated to roll only %d times\n", | |
1498 (int) est_niter); | |
1499 goto fail; | |
1500 } | |
1501 | |
1502 mark_nontemporal_stores (loop, refs); | |
1503 | |
1504 ninsns = tree_num_loop_insns (loop, &eni_size_weights); | |
1505 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc, | |
1506 est_niter); | |
1507 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1508 fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor); | |
1509 | |
1510 /* Step 4: what to prefetch? */ | |
1511 if (!schedule_prefetches (refs, unroll_factor, ahead)) | |
1512 goto fail; | |
1513 | |
1514 /* Step 5: unroll the loop. TODO -- peeling of first and last few | |
1515 iterations so that we do not issue superfluous prefetches. */ | |
1516 if (unroll_factor != 1) | |
1517 { | |
1518 tree_unroll_loop (loop, unroll_factor, | |
1519 single_dom_exit (loop), &desc); | |
1520 unrolled = true; | |
1521 } | |
1522 | |
1523 /* Step 6: issue the prefetches. */ | |
1524 issue_prefetches (refs, unroll_factor, ahead); | |
1525 | |
1526 fail: | |
1527 release_mem_refs (refs); | |
1528 return unrolled; | |
1529 } | |
1530 | |
1531 /* Issue prefetch instructions for array references in loops. */ | |
1532 | |
1533 unsigned int | |
1534 tree_ssa_prefetch_arrays (void) | |
1535 { | |
1536 loop_iterator li; | |
1537 struct loop *loop; | |
1538 bool unrolled = false; | |
1539 int todo_flags = 0; | |
1540 | |
1541 if (!HAVE_prefetch | |
1542 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4. | |
1543 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part | |
1544 of processor costs and i486 does not have prefetch, but | |
1545 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */ | |
1546 || PREFETCH_BLOCK == 0) | |
1547 return 0; | |
1548 | |
1549 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1550 { | |
1551 fprintf (dump_file, "Prefetching parameters:\n"); | |
1552 fprintf (dump_file, " simultaneous prefetches: %d\n", | |
1553 SIMULTANEOUS_PREFETCHES); | |
1554 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY); | |
1555 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK); | |
1556 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n", | |
1557 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE); | |
1558 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE); | |
1559 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE); | |
1560 fprintf (dump_file, "\n"); | |
1561 } | |
1562 | |
1563 initialize_original_copy_tables (); | |
1564 | |
1565 if (!built_in_decls[BUILT_IN_PREFETCH]) | |
1566 { | |
1567 tree type = build_function_type (void_type_node, | |
1568 tree_cons (NULL_TREE, | |
1569 const_ptr_type_node, | |
1570 NULL_TREE)); | |
1571 tree decl = add_builtin_function ("__builtin_prefetch", type, | |
1572 BUILT_IN_PREFETCH, BUILT_IN_NORMAL, | |
1573 NULL, NULL_TREE); | |
1574 DECL_IS_NOVOPS (decl) = true; | |
1575 built_in_decls[BUILT_IN_PREFETCH] = decl; | |
1576 } | |
1577 | |
1578 /* We assume that size of cache line is a power of two, so verify this | |
1579 here. */ | |
1580 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0); | |
1581 | |
1582 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) | |
1583 { | |
1584 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1585 fprintf (dump_file, "Processing loop %d:\n", loop->num); | |
1586 | |
1587 unrolled |= loop_prefetch_arrays (loop); | |
1588 | |
1589 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1590 fprintf (dump_file, "\n\n"); | |
1591 } | |
1592 | |
1593 if (unrolled) | |
1594 { | |
1595 scev_reset (); | |
1596 todo_flags |= TODO_cleanup_cfg; | |
1597 } | |
1598 | |
1599 free_original_copy_tables (); | |
1600 return todo_flags; | |
1601 } |