Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-ssa-loop-prefetch.c @ 145:1830386684a0
gcc-9.2.0
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 11:34:05 +0900 |
parents | 84e7813d76e9 |
children |
rev | line source |
---|---|
0 | 1 /* Array prefetching. |
145 | 2 Copyright (C) 2005-2020 Free Software Foundation, Inc. |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3 |
0 | 4 This file is part of GCC. |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
5 |
0 | 6 GCC is free software; you can redistribute it and/or modify it |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 3, or (at your option) any | |
9 later version. | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
10 |
0 | 11 GCC is distributed in the hope that it will be useful, but WITHOUT |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
15 |
0 | 16 You should have received a copy of the GNU General Public License |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
111 | 23 #include "backend.h" |
24 #include "target.h" | |
25 #include "rtl.h" | |
0 | 26 #include "tree.h" |
111 | 27 #include "gimple.h" |
28 #include "predict.h" | |
29 #include "tree-pass.h" | |
30 #include "gimple-ssa.h" | |
31 #include "optabs-query.h" | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
32 #include "tree-pretty-print.h" |
111 | 33 #include "fold-const.h" |
34 #include "stor-layout.h" | |
35 #include "gimplify.h" | |
36 #include "gimple-iterator.h" | |
37 #include "gimplify-me.h" | |
38 #include "tree-ssa-loop-ivopts.h" | |
39 #include "tree-ssa-loop-manip.h" | |
40 #include "tree-ssa-loop-niter.h" | |
41 #include "tree-ssa-loop.h" | |
42 #include "ssa.h" | |
43 #include "tree-into-ssa.h" | |
0 | 44 #include "cfgloop.h" |
45 #include "tree-scalar-evolution.h" | |
46 #include "langhooks.h" | |
47 #include "tree-inline.h" | |
48 #include "tree-data-ref.h" | |
111 | 49 #include "diagnostic-core.h" |
50 #include "dbgcnt.h" | |
0 | 51 |
52 /* This pass inserts prefetch instructions to optimize cache usage during | |
53 accesses to arrays in loops. It processes loops sequentially and: | |
54 | |
55 1) Gathers all memory references in the single loop. | |
56 2) For each of the references it decides when it is profitable to prefetch | |
57 it. To do it, we evaluate the reuse among the accesses, and determines | |
58 two values: PREFETCH_BEFORE (meaning that it only makes sense to do | |
59 prefetching in the first PREFETCH_BEFORE iterations of the loop) and | |
60 PREFETCH_MOD (meaning that it only makes sense to prefetch in the | |
61 iterations of the loop that are zero modulo PREFETCH_MOD). For example | |
62 (assuming cache line size is 64 bytes, char has size 1 byte and there | |
63 is no hardware sequential prefetch): | |
64 | |
65 char *a; | |
66 for (i = 0; i < max; i++) | |
67 { | |
68 a[255] = ...; (0) | |
69 a[i] = ...; (1) | |
70 a[i + 64] = ...; (2) | |
71 a[16*i] = ...; (3) | |
72 a[187*i] = ...; (4) | |
73 a[187*i + 50] = ...; (5) | |
74 } | |
75 | |
76 (0) obviously has PREFETCH_BEFORE 1 | |
77 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory | |
78 location 64 iterations before it, and PREFETCH_MOD 64 (since | |
79 it hits the same cache line otherwise). | |
80 (2) has PREFETCH_MOD 64 | |
81 (3) has PREFETCH_MOD 4 | |
82 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
83 the cache line accessed by (5) is the same with probability only |
0 | 84 7/32. |
85 (5) has PREFETCH_MOD 1 as well. | |
86 | |
87 Additionally, we use data dependence analysis to determine for each | |
88 reference the distance till the first reuse; this information is used | |
89 to determine the temporality of the issued prefetch instruction. | |
90 | |
91 3) We determine how much ahead we need to prefetch. The number of | |
92 iterations needed is time to fetch / time spent in one iteration of | |
93 the loop. The problem is that we do not know either of these values, | |
94 so we just make a heuristic guess based on a magic (possibly) | |
95 target-specific constant and size of the loop. | |
96 | |
97 4) Determine which of the references we prefetch. We take into account | |
98 that there is a maximum number of simultaneous prefetches (provided | |
99 by machine description). We prefetch as many prefetches as possible | |
100 while still within this bound (starting with those with lowest | |
101 prefetch_mod, since they are responsible for most of the cache | |
102 misses). | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
103 |
0 | 104 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD |
105 and PREFETCH_BEFORE requirements (within some bounds), and to avoid | |
106 prefetching nonaccessed memory. | |
107 TODO -- actually implement peeling. | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
108 |
0 | 109 6) We actually emit the prefetch instructions. ??? Perhaps emit the |
110 prefetch instructions with guards in cases where 5) was not sufficient | |
111 to satisfy the constraints? | |
112 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
113 A cost model is implemented to determine whether or not prefetching is |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
114 profitable for a given loop. The cost model has three heuristics: |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
115 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
116 1. Function trip_count_to_ahead_ratio_too_small_p implements a |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
117 heuristic that determines whether or not the loop has too few |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
118 iterations (compared to ahead). Prefetching is not likely to be |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
119 beneficial if the trip count to ahead ratio is below a certain |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
120 minimum. |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
121 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
122 2. Function mem_ref_count_reasonable_p implements a heuristic that |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
123 determines whether the given loop has enough CPU ops that can be |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
124 overlapped with cache missing memory ops. If not, the loop |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
125 won't benefit from prefetching. In the implementation, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
126 prefetching is not considered beneficial if the ratio between |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
127 the instruction count and the mem ref count is below a certain |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
128 minimum. |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
129 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
130 3. Function insn_to_prefetch_ratio_too_small_p implements a |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
131 heuristic that disables prefetching in a loop if the prefetching |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
132 cost is above a certain limit. The relative prefetching cost is |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
133 estimated by taking the ratio between the prefetch count and the |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
134 total intruction count (this models the I-cache cost). |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
135 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
136 The limits used in these heuristics are defined as parameters with |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
137 reasonable default values. Machine-specific default values will be |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
138 added later. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
139 |
0 | 140 Some other TODO: |
141 -- write and use more general reuse analysis (that could be also used | |
142 in other cache aimed loop optimizations) | |
143 -- make it behave sanely together with the prefetches given by user | |
144 (now we just ignore them; at the very least we should avoid | |
145 optimizing loops in that user put his own prefetches) | |
146 -- we assume cache line size alignment of arrays; this could be | |
147 improved. */ | |
148 | |
149 /* Magic constants follow. These should be replaced by machine specific | |
150 numbers. */ | |
151 | |
152 /* True if write can be prefetched by a read prefetch. */ | |
153 | |
154 #ifndef WRITE_CAN_USE_READ_PREFETCH | |
155 #define WRITE_CAN_USE_READ_PREFETCH 1 | |
156 #endif | |
157 | |
158 /* True if read can be prefetched by a write prefetch. */ | |
159 | |
160 #ifndef READ_CAN_USE_WRITE_PREFETCH | |
161 #define READ_CAN_USE_WRITE_PREFETCH 0 | |
162 #endif | |
163 | |
164 /* The size of the block loaded by a single prefetch. Usually, this is | |
165 the same as cache line size (at the moment, we only consider one level | |
166 of cache hierarchy). */ | |
167 | |
168 #ifndef PREFETCH_BLOCK | |
145 | 169 #define PREFETCH_BLOCK param_l1_cache_line_size |
0 | 170 #endif |
171 | |
172 /* Do we have a forward hardware sequential prefetching? */ | |
173 | |
174 #ifndef HAVE_FORWARD_PREFETCH | |
175 #define HAVE_FORWARD_PREFETCH 0 | |
176 #endif | |
177 | |
178 /* Do we have a backward hardware sequential prefetching? */ | |
179 | |
180 #ifndef HAVE_BACKWARD_PREFETCH | |
181 #define HAVE_BACKWARD_PREFETCH 0 | |
182 #endif | |
183 | |
184 /* In some cases we are only able to determine that there is a certain | |
185 probability that the two accesses hit the same cache line. In this | |
186 case, we issue the prefetches for both of them if this probability | |
187 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */ | |
188 | |
189 #ifndef ACCEPTABLE_MISS_RATE | |
190 #define ACCEPTABLE_MISS_RATE 50 | |
191 #endif | |
192 | |
145 | 193 #define L1_CACHE_SIZE_BYTES ((unsigned) (param_l1_cache_size * 1024)) |
194 #define L2_CACHE_SIZE_BYTES ((unsigned) (param_l2_cache_size * 1024)) | |
0 | 195 |
196 /* We consider a memory access nontemporal if it is not reused sooner than | |
197 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore | |
198 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
199 so that we use nontemporal prefetches e.g. if single memory location | |
200 is accessed several times in a single iteration of the loop. */ | |
201 #define NONTEMPORAL_FRACTION 16 | |
202 | |
203 /* In case we have to emit a memory fence instruction after the loop that | |
204 uses nontemporal stores, this defines the builtin to use. */ | |
205 | |
206 #ifndef FENCE_FOLLOWING_MOVNT | |
207 #define FENCE_FOLLOWING_MOVNT NULL_TREE | |
208 #endif | |
209 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
210 /* It is not profitable to prefetch when the trip count is not at |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
211 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance. |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
212 For example, in a loop with a prefetch ahead distance of 10, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
213 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
214 profitable to prefetch when the trip count is greater or equal to |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
215 40. In that case, 30 out of the 40 iterations will benefit from |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
216 prefetching. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
217 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
218 #ifndef TRIP_COUNT_TO_AHEAD_RATIO |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
219 #define TRIP_COUNT_TO_AHEAD_RATIO 4 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
220 #endif |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
221 |
0 | 222 /* The group of references between that reuse may occur. */ |
223 | |
224 struct mem_ref_group | |
225 { | |
226 tree base; /* Base of the reference. */ | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
227 tree step; /* Step of the reference. */ |
0 | 228 struct mem_ref *refs; /* References in the group. */ |
229 struct mem_ref_group *next; /* Next group of references. */ | |
111 | 230 unsigned int uid; /* Group UID, used only for debugging. */ |
0 | 231 }; |
232 | |
233 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ | |
234 | |
111 | 235 #define PREFETCH_ALL HOST_WIDE_INT_M1U |
0 | 236 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
237 /* Do not generate a prefetch if the unroll factor is significantly less |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
238 than what is required by the prefetch. This is to avoid redundant |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
239 prefetches. For example, when prefetch_mod is 16 and unroll_factor is |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
240 2, prefetching requires unrolling the loop 16 times, but |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
241 the loop is actually unrolled twice. In this case (ratio = 8), |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
242 prefetching is not likely to be beneficial. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
243 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
244 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
245 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
246 #endif |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
247 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
248 /* Some of the prefetch computations have quadratic complexity. We want to |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
249 avoid huge compile times and, therefore, want to limit the amount of |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
250 memory references per loop where we consider prefetching. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
251 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
252 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
253 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
254 #endif |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
255 |
0 | 256 /* The memory reference. */ |
257 | |
258 struct mem_ref | |
259 { | |
111 | 260 gimple *stmt; /* Statement in that the reference appears. */ |
0 | 261 tree mem; /* The reference. */ |
262 HOST_WIDE_INT delta; /* Constant offset of the reference. */ | |
263 struct mem_ref_group *group; /* The group of references it belongs to. */ | |
264 unsigned HOST_WIDE_INT prefetch_mod; | |
265 /* Prefetch only each PREFETCH_MOD-th | |
266 iteration. */ | |
267 unsigned HOST_WIDE_INT prefetch_before; | |
268 /* Prefetch only first PREFETCH_BEFORE | |
269 iterations. */ | |
270 unsigned reuse_distance; /* The amount of data accessed before the first | |
271 reuse of this value. */ | |
272 struct mem_ref *next; /* The next reference in the group. */ | |
111 | 273 unsigned int uid; /* Ref UID, used only for debugging. */ |
0 | 274 unsigned write_p : 1; /* Is it a write? */ |
275 unsigned independent_p : 1; /* True if the reference is independent on | |
276 all other references inside the loop. */ | |
277 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */ | |
278 unsigned storent_p : 1; /* True if we changed the store to a | |
279 nontemporal one. */ | |
280 }; | |
281 | |
111 | 282 /* Dumps information about memory reference */ |
283 static void | |
284 dump_mem_details (FILE *file, tree base, tree step, | |
285 HOST_WIDE_INT delta, bool write_p) | |
286 { | |
287 fprintf (file, "(base "); | |
288 print_generic_expr (file, base, TDF_SLIM); | |
289 fprintf (file, ", step "); | |
290 if (cst_and_fits_in_hwi (step)) | |
291 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step)); | |
292 else | |
293 print_generic_expr (file, step, TDF_SLIM); | |
294 fprintf (file, ")\n"); | |
295 fprintf (file, " delta " HOST_WIDE_INT_PRINT_DEC "\n", delta); | |
296 fprintf (file, " %s\n\n", write_p ? "write" : "read"); | |
297 } | |
298 | |
0 | 299 /* Dumps information about reference REF to FILE. */ |
300 | |
301 static void | |
302 dump_mem_ref (FILE *file, struct mem_ref *ref) | |
303 { | |
111 | 304 fprintf (file, "reference %u:%u (", ref->group->uid, ref->uid); |
305 print_generic_expr (file, ref->mem, TDF_SLIM); | |
0 | 306 fprintf (file, ")\n"); |
307 } | |
308 | |
309 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not | |
310 exist. */ | |
311 | |
312 static struct mem_ref_group * | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
313 find_or_create_group (struct mem_ref_group **groups, tree base, tree step) |
0 | 314 { |
111 | 315 /* Global count for setting struct mem_ref_group->uid. */ |
316 static unsigned int last_mem_ref_group_uid = 0; | |
317 | |
0 | 318 struct mem_ref_group *group; |
319 | |
320 for (; *groups; groups = &(*groups)->next) | |
321 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
322 if (operand_equal_p ((*groups)->step, step, 0) |
0 | 323 && operand_equal_p ((*groups)->base, base, 0)) |
324 return *groups; | |
325 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
326 /* If step is an integer constant, keep the list of groups sorted |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
327 by decreasing step. */ |
111 | 328 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step) |
329 && int_cst_value ((*groups)->step) < int_cst_value (step)) | |
0 | 330 break; |
331 } | |
332 | |
333 group = XNEW (struct mem_ref_group); | |
334 group->base = base; | |
335 group->step = step; | |
336 group->refs = NULL; | |
111 | 337 group->uid = ++last_mem_ref_group_uid; |
0 | 338 group->next = *groups; |
339 *groups = group; | |
340 | |
341 return group; | |
342 } | |
343 | |
344 /* Records a memory reference MEM in GROUP with offset DELTA and write status | |
345 WRITE_P. The reference occurs in statement STMT. */ | |
346 | |
347 static void | |
111 | 348 record_ref (struct mem_ref_group *group, gimple *stmt, tree mem, |
0 | 349 HOST_WIDE_INT delta, bool write_p) |
350 { | |
111 | 351 unsigned int last_mem_ref_uid = 0; |
0 | 352 struct mem_ref **aref; |
353 | |
354 /* Do not record the same address twice. */ | |
355 for (aref = &group->refs; *aref; aref = &(*aref)->next) | |
356 { | |
111 | 357 last_mem_ref_uid = (*aref)->uid; |
358 | |
0 | 359 /* It does not have to be possible for write reference to reuse the read |
360 prefetch, or vice versa. */ | |
361 if (!WRITE_CAN_USE_READ_PREFETCH | |
362 && write_p | |
363 && !(*aref)->write_p) | |
364 continue; | |
365 if (!READ_CAN_USE_WRITE_PREFETCH | |
366 && !write_p | |
367 && (*aref)->write_p) | |
368 continue; | |
369 | |
370 if ((*aref)->delta == delta) | |
371 return; | |
372 } | |
373 | |
374 (*aref) = XNEW (struct mem_ref); | |
375 (*aref)->stmt = stmt; | |
376 (*aref)->mem = mem; | |
377 (*aref)->delta = delta; | |
378 (*aref)->write_p = write_p; | |
379 (*aref)->prefetch_before = PREFETCH_ALL; | |
380 (*aref)->prefetch_mod = 1; | |
381 (*aref)->reuse_distance = 0; | |
382 (*aref)->issue_prefetch_p = false; | |
383 (*aref)->group = group; | |
384 (*aref)->next = NULL; | |
385 (*aref)->independent_p = false; | |
386 (*aref)->storent_p = false; | |
111 | 387 (*aref)->uid = last_mem_ref_uid + 1; |
0 | 388 |
389 if (dump_file && (dump_flags & TDF_DETAILS)) | |
111 | 390 { |
391 dump_mem_ref (dump_file, *aref); | |
392 | |
393 fprintf (dump_file, " group %u ", group->uid); | |
394 dump_mem_details (dump_file, group->base, group->step, delta, | |
395 write_p); | |
396 } | |
0 | 397 } |
398 | |
399 /* Release memory references in GROUPS. */ | |
400 | |
401 static void | |
402 release_mem_refs (struct mem_ref_group *groups) | |
403 { | |
404 struct mem_ref_group *next_g; | |
405 struct mem_ref *ref, *next_r; | |
406 | |
407 for (; groups; groups = next_g) | |
408 { | |
409 next_g = groups->next; | |
410 for (ref = groups->refs; ref; ref = next_r) | |
411 { | |
412 next_r = ref->next; | |
413 free (ref); | |
414 } | |
415 free (groups); | |
416 } | |
417 } | |
418 | |
419 /* A structure used to pass arguments to idx_analyze_ref. */ | |
420 | |
421 struct ar_data | |
422 { | |
145 | 423 class loop *loop; /* Loop of the reference. */ |
111 | 424 gimple *stmt; /* Statement of the reference. */ |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
425 tree *step; /* Step of the memory reference. */ |
0 | 426 HOST_WIDE_INT *delta; /* Offset of the memory reference. */ |
427 }; | |
428 | |
429 /* Analyzes a single INDEX of a memory reference to obtain information | |
430 described at analyze_ref. Callback for for_each_index. */ | |
431 | |
432 static bool | |
433 idx_analyze_ref (tree base, tree *index, void *data) | |
434 { | |
435 struct ar_data *ar_data = (struct ar_data *) data; | |
436 tree ibase, step, stepsize; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
437 HOST_WIDE_INT idelta = 0, imult = 1; |
0 | 438 affine_iv iv; |
439 | |
440 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt), | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
441 *index, &iv, true)) |
0 | 442 return false; |
443 ibase = iv.base; | |
444 step = iv.step; | |
445 | |
446 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR | |
447 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) | |
448 { | |
449 idelta = int_cst_value (TREE_OPERAND (ibase, 1)); | |
450 ibase = TREE_OPERAND (ibase, 0); | |
451 } | |
452 if (cst_and_fits_in_hwi (ibase)) | |
453 { | |
454 idelta += int_cst_value (ibase); | |
455 ibase = build_int_cst (TREE_TYPE (ibase), 0); | |
456 } | |
457 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
458 if (TREE_CODE (base) == ARRAY_REF) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
459 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
460 stepsize = array_ref_element_size (base); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
461 if (!cst_and_fits_in_hwi (stepsize)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
462 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
463 imult = int_cst_value (stepsize); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
464 step = fold_build2 (MULT_EXPR, sizetype, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
465 fold_convert (sizetype, step), |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
466 fold_convert (sizetype, stepsize)); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
467 idelta *= imult; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
468 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
469 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
470 if (*ar_data->step == NULL_TREE) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
471 *ar_data->step = step; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
472 else |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
473 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
474 fold_convert (sizetype, *ar_data->step), |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
475 fold_convert (sizetype, step)); |
0 | 476 *ar_data->delta += idelta; |
477 *index = ibase; | |
478 | |
479 return true; | |
480 } | |
481 | |
482 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and | |
483 STEP are integer constants and iter is number of iterations of LOOP. The | |
484 reference occurs in statement STMT. Strips nonaddressable component | |
485 references from REF_P. */ | |
486 | |
487 static bool | |
145 | 488 analyze_ref (class loop *loop, tree *ref_p, tree *base, |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
489 tree *step, HOST_WIDE_INT *delta, |
111 | 490 gimple *stmt) |
0 | 491 { |
492 struct ar_data ar_data; | |
493 tree off; | |
494 HOST_WIDE_INT bit_offset; | |
495 tree ref = *ref_p; | |
496 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
497 *step = NULL_TREE; |
0 | 498 *delta = 0; |
499 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
500 /* First strip off the component references. Ignore bitfields. |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
501 Also strip off the real and imagine parts of a complex, so that |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
502 they can have the same base. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
503 if (TREE_CODE (ref) == REALPART_EXPR |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
504 || TREE_CODE (ref) == IMAGPART_EXPR |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
505 || (TREE_CODE (ref) == COMPONENT_REF |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
506 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
507 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
508 if (TREE_CODE (ref) == IMAGPART_EXPR) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
509 *delta += int_size_in_bytes (TREE_TYPE (ref)); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
510 ref = TREE_OPERAND (ref, 0); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
511 } |
0 | 512 |
513 *ref_p = ref; | |
514 | |
515 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) | |
516 { | |
517 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); | |
518 bit_offset = TREE_INT_CST_LOW (off); | |
519 gcc_assert (bit_offset % BITS_PER_UNIT == 0); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
520 |
0 | 521 *delta += bit_offset / BITS_PER_UNIT; |
522 } | |
523 | |
524 *base = unshare_expr (ref); | |
525 ar_data.loop = loop; | |
526 ar_data.stmt = stmt; | |
527 ar_data.step = step; | |
528 ar_data.delta = delta; | |
529 return for_each_index (base, idx_analyze_ref, &ar_data); | |
530 } | |
531 | |
532 /* Record a memory reference REF to the list REFS. The reference occurs in | |
533 LOOP in statement STMT and it is write if WRITE_P. Returns true if the | |
534 reference was recorded, false otherwise. */ | |
535 | |
536 static bool | |
145 | 537 gather_memory_references_ref (class loop *loop, struct mem_ref_group **refs, |
111 | 538 tree ref, bool write_p, gimple *stmt) |
0 | 539 { |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
540 tree base, step; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
541 HOST_WIDE_INT delta; |
0 | 542 struct mem_ref_group *agrp; |
543 | |
544 if (get_base_address (ref) == NULL) | |
545 return false; | |
546 | |
547 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt)) | |
548 return false; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
549 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
550 if (step == NULL_TREE) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
551 return false; |
0 | 552 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
553 /* Stop if the address of BASE could not be taken. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
554 if (may_be_nonaddressable_p (base)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
555 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
556 |
111 | 557 /* Limit non-constant step prefetching only to the innermost loops and |
558 only when the step is loop invariant in the entire loop nest. */ | |
559 if (!cst_and_fits_in_hwi (step)) | |
560 { | |
561 if (loop->inner != NULL) | |
562 { | |
563 if (dump_file && (dump_flags & TDF_DETAILS)) | |
564 { | |
565 fprintf (dump_file, "Memory expression %p\n",(void *) ref ); | |
566 print_generic_expr (dump_file, ref, TDF_SLIM); | |
567 fprintf (dump_file,":"); | |
568 dump_mem_details (dump_file, base, step, delta, write_p); | |
569 fprintf (dump_file, | |
570 "Ignoring %p, non-constant step prefetching is " | |
571 "limited to inner most loops \n", | |
572 (void *) ref); | |
573 } | |
574 return false; | |
575 } | |
576 else | |
577 { | |
578 if (!expr_invariant_in_loop_p (loop_outermost (loop), step)) | |
579 { | |
580 if (dump_file && (dump_flags & TDF_DETAILS)) | |
581 { | |
582 fprintf (dump_file, "Memory expression %p\n",(void *) ref ); | |
583 print_generic_expr (dump_file, ref, TDF_SLIM); | |
584 fprintf (dump_file,":"); | |
585 dump_mem_details (dump_file, base, step, delta, write_p); | |
586 fprintf (dump_file, | |
587 "Not prefetching, ignoring %p due to " | |
588 "loop variant step\n", | |
589 (void *) ref); | |
590 } | |
591 return false; | |
592 } | |
593 } | |
594 } | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
595 |
0 | 596 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP |
597 are integer constants. */ | |
598 agrp = find_or_create_group (refs, base, step); | |
599 record_ref (agrp, stmt, ref, delta, write_p); | |
600 | |
601 return true; | |
602 } | |
603 | |
604 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to | |
605 true if there are no other memory references inside the loop. */ | |
606 | |
607 static struct mem_ref_group * | |
145 | 608 gather_memory_references (class loop *loop, bool *no_other_refs, unsigned *ref_count) |
0 | 609 { |
610 basic_block *body = get_loop_body_in_dom_order (loop); | |
611 basic_block bb; | |
612 unsigned i; | |
613 gimple_stmt_iterator bsi; | |
111 | 614 gimple *stmt; |
0 | 615 tree lhs, rhs; |
616 struct mem_ref_group *refs = NULL; | |
617 | |
618 *no_other_refs = true; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
619 *ref_count = 0; |
0 | 620 |
621 /* Scan the loop body in order, so that the former references precede the | |
622 later ones. */ | |
623 for (i = 0; i < loop->num_nodes; i++) | |
624 { | |
625 bb = body[i]; | |
626 if (bb->loop_father != loop) | |
627 continue; | |
628 | |
629 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
630 { | |
631 stmt = gsi_stmt (bsi); | |
632 | |
633 if (gimple_code (stmt) != GIMPLE_ASSIGN) | |
634 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
635 if (gimple_vuse (stmt) |
0 | 636 || (is_gimple_call (stmt) |
637 && !(gimple_call_flags (stmt) & ECF_CONST))) | |
638 *no_other_refs = false; | |
639 continue; | |
640 } | |
641 | |
111 | 642 if (! gimple_vuse (stmt)) |
643 continue; | |
644 | |
0 | 645 lhs = gimple_assign_lhs (stmt); |
646 rhs = gimple_assign_rhs1 (stmt); | |
647 | |
648 if (REFERENCE_CLASS_P (rhs)) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
649 { |
0 | 650 *no_other_refs &= gather_memory_references_ref (loop, &refs, |
651 rhs, false, stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
652 *ref_count += 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
653 } |
0 | 654 if (REFERENCE_CLASS_P (lhs)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
655 { |
0 | 656 *no_other_refs &= gather_memory_references_ref (loop, &refs, |
657 lhs, true, stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
658 *ref_count += 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
659 } |
0 | 660 } |
661 } | |
662 free (body); | |
663 | |
664 return refs; | |
665 } | |
666 | |
667 /* Prune the prefetch candidate REF using the self-reuse. */ | |
668 | |
669 static void | |
670 prune_ref_by_self_reuse (struct mem_ref *ref) | |
671 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
672 HOST_WIDE_INT step; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
673 bool backward; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
674 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
675 /* If the step size is non constant, we cannot calculate prefetch_mod. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
676 if (!cst_and_fits_in_hwi (ref->group->step)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
677 return; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
678 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
679 step = int_cst_value (ref->group->step); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
680 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
681 backward = step < 0; |
0 | 682 |
683 if (step == 0) | |
684 { | |
685 /* Prefetch references to invariant address just once. */ | |
686 ref->prefetch_before = 1; | |
687 return; | |
688 } | |
689 | |
690 if (backward) | |
691 step = -step; | |
692 | |
693 if (step > PREFETCH_BLOCK) | |
694 return; | |
695 | |
696 if ((backward && HAVE_BACKWARD_PREFETCH) | |
697 || (!backward && HAVE_FORWARD_PREFETCH)) | |
698 { | |
699 ref->prefetch_before = 1; | |
700 return; | |
701 } | |
702 | |
703 ref->prefetch_mod = PREFETCH_BLOCK / step; | |
704 } | |
705 | |
706 /* Divides X by BY, rounding down. */ | |
707 | |
708 static HOST_WIDE_INT | |
709 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by) | |
710 { | |
711 gcc_assert (by > 0); | |
712 | |
713 if (x >= 0) | |
111 | 714 return x / (HOST_WIDE_INT) by; |
0 | 715 else |
111 | 716 return (x + (HOST_WIDE_INT) by - 1) / (HOST_WIDE_INT) by; |
0 | 717 } |
718 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
719 /* Given a CACHE_LINE_SIZE and two inductive memory references |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
720 with a common STEP greater than CACHE_LINE_SIZE and an address |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
721 difference DELTA, compute the probability that they will fall |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
722 in different cache lines. Return true if the computed miss rate |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
723 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
724 number of distinct iterations after which the pattern repeats itself. |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
725 ALIGN_UNIT is the unit of alignment in bytes. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
726 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
727 static bool |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
728 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size, |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
729 HOST_WIDE_INT step, HOST_WIDE_INT delta, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
730 unsigned HOST_WIDE_INT distinct_iters, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
731 int align_unit) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
732 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
733 unsigned align, iter; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
734 int total_positions, miss_positions, max_allowed_miss_positions; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
735 int address1, address2, cache_line1, cache_line2; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
736 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
737 /* It always misses if delta is greater than or equal to the cache |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
738 line size. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
739 if (delta >= (HOST_WIDE_INT) cache_line_size) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
740 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
741 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
742 miss_positions = 0; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
743 total_positions = (cache_line_size / align_unit) * distinct_iters; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
744 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
745 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
746 /* Iterate through all possible alignments of the first |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
747 memory reference within its cache line. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
748 for (align = 0; align < cache_line_size; align += align_unit) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
749 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
750 /* Iterate through all distinct iterations. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
751 for (iter = 0; iter < distinct_iters; iter++) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
752 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
753 address1 = align + step * iter; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
754 address2 = address1 + delta; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
755 cache_line1 = address1 / cache_line_size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
756 cache_line2 = address2 / cache_line_size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
757 if (cache_line1 != cache_line2) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
758 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
759 miss_positions += 1; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
760 if (miss_positions > max_allowed_miss_positions) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
761 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
762 } |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
763 } |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
764 return true; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
765 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
766 |
0 | 767 /* Prune the prefetch candidate REF using the reuse with BY. |
768 If BY_IS_BEFORE is true, BY is before REF in the loop. */ | |
769 | |
770 static void | |
771 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, | |
772 bool by_is_before) | |
773 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
774 HOST_WIDE_INT step; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
775 bool backward; |
0 | 776 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta; |
777 HOST_WIDE_INT delta = delta_b - delta_r; | |
778 HOST_WIDE_INT hit_from; | |
779 unsigned HOST_WIDE_INT prefetch_before, prefetch_block; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
780 HOST_WIDE_INT reduced_step; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
781 unsigned HOST_WIDE_INT reduced_prefetch_block; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
782 tree ref_type; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
783 int align_unit; |
0 | 784 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
785 /* If the step is non constant we cannot calculate prefetch_before. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
786 if (!cst_and_fits_in_hwi (ref->group->step)) { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
787 return; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
788 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
789 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
790 step = int_cst_value (ref->group->step); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
791 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
792 backward = step < 0; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
793 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
794 |
0 | 795 if (delta == 0) |
796 { | |
797 /* If the references has the same address, only prefetch the | |
798 former. */ | |
799 if (by_is_before) | |
800 ref->prefetch_before = 0; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
801 |
0 | 802 return; |
803 } | |
804 | |
805 if (!step) | |
806 { | |
807 /* If the reference addresses are invariant and fall into the | |
808 same cache line, prefetch just the first one. */ | |
809 if (!by_is_before) | |
810 return; | |
811 | |
812 if (ddown (ref->delta, PREFETCH_BLOCK) | |
813 != ddown (by->delta, PREFETCH_BLOCK)) | |
814 return; | |
815 | |
816 ref->prefetch_before = 0; | |
817 return; | |
818 } | |
819 | |
820 /* Only prune the reference that is behind in the array. */ | |
821 if (backward) | |
822 { | |
823 if (delta > 0) | |
824 return; | |
825 | |
826 /* Transform the data so that we may assume that the accesses | |
827 are forward. */ | |
828 delta = - delta; | |
829 step = -step; | |
830 delta_r = PREFETCH_BLOCK - 1 - delta_r; | |
831 delta_b = PREFETCH_BLOCK - 1 - delta_b; | |
832 } | |
833 else | |
834 { | |
835 if (delta < 0) | |
836 return; | |
837 } | |
838 | |
839 /* Check whether the two references are likely to hit the same cache | |
840 line, and how distant the iterations in that it occurs are from | |
841 each other. */ | |
842 | |
843 if (step <= PREFETCH_BLOCK) | |
844 { | |
845 /* The accesses are sure to meet. Let us check when. */ | |
846 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK; | |
847 prefetch_before = (hit_from - delta_r + step - 1) / step; | |
848 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
849 /* Do not reduce prefetch_before if we meet beyond cache size. */ |
111 | 850 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step)) |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
851 prefetch_before = PREFETCH_ALL; |
0 | 852 if (prefetch_before < ref->prefetch_before) |
853 ref->prefetch_before = prefetch_before; | |
854 | |
855 return; | |
856 } | |
857 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
858 /* A more complicated case with step > prefetch_block. First reduce |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
859 the ratio between the step and the cache line size to its simplest |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
860 terms. The resulting denominator will then represent the number of |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
861 distinct iterations after which each address will go back to its |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
862 initial location within the cache line. This computation assumes |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
863 that PREFETCH_BLOCK is a power of two. */ |
0 | 864 prefetch_block = PREFETCH_BLOCK; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
865 reduced_prefetch_block = prefetch_block; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
866 reduced_step = step; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
867 while ((reduced_step & 1) == 0 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
868 && reduced_prefetch_block > 1) |
0 | 869 { |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
870 reduced_step >>= 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
871 reduced_prefetch_block >>= 1; |
0 | 872 } |
873 | |
874 prefetch_before = delta / step; | |
875 delta %= step; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
876 ref_type = TREE_TYPE (ref->mem); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
877 align_unit = TYPE_ALIGN (ref_type) / 8; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
878 if (is_miss_rate_acceptable (prefetch_block, step, delta, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
879 reduced_prefetch_block, align_unit)) |
0 | 880 { |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
881 /* Do not reduce prefetch_before if we meet beyond cache size. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
882 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
883 prefetch_before = PREFETCH_ALL; |
0 | 884 if (prefetch_before < ref->prefetch_before) |
885 ref->prefetch_before = prefetch_before; | |
886 | |
887 return; | |
888 } | |
889 | |
890 /* Try also the following iteration. */ | |
891 prefetch_before++; | |
892 delta = step - delta; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
893 if (is_miss_rate_acceptable (prefetch_block, step, delta, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
894 reduced_prefetch_block, align_unit)) |
0 | 895 { |
896 if (prefetch_before < ref->prefetch_before) | |
897 ref->prefetch_before = prefetch_before; | |
898 | |
899 return; | |
900 } | |
901 | |
902 /* The ref probably does not reuse by. */ | |
903 return; | |
904 } | |
905 | |
906 /* Prune the prefetch candidate REF using the reuses with other references | |
907 in REFS. */ | |
908 | |
909 static void | |
910 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs) | |
911 { | |
912 struct mem_ref *prune_by; | |
913 bool before = true; | |
914 | |
915 prune_ref_by_self_reuse (ref); | |
916 | |
917 for (prune_by = refs; prune_by; prune_by = prune_by->next) | |
918 { | |
919 if (prune_by == ref) | |
920 { | |
921 before = false; | |
922 continue; | |
923 } | |
924 | |
925 if (!WRITE_CAN_USE_READ_PREFETCH | |
926 && ref->write_p | |
927 && !prune_by->write_p) | |
928 continue; | |
929 if (!READ_CAN_USE_WRITE_PREFETCH | |
930 && !ref->write_p | |
931 && prune_by->write_p) | |
932 continue; | |
933 | |
934 prune_ref_by_group_reuse (ref, prune_by, before); | |
935 } | |
936 } | |
937 | |
938 /* Prune the prefetch candidates in GROUP using the reuse analysis. */ | |
939 | |
940 static void | |
941 prune_group_by_reuse (struct mem_ref_group *group) | |
942 { | |
943 struct mem_ref *ref_pruned; | |
944 | |
945 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next) | |
946 { | |
947 prune_ref_by_reuse (ref_pruned, group->refs); | |
948 | |
949 if (dump_file && (dump_flags & TDF_DETAILS)) | |
950 { | |
111 | 951 dump_mem_ref (dump_file, ref_pruned); |
0 | 952 |
953 if (ref_pruned->prefetch_before == PREFETCH_ALL | |
954 && ref_pruned->prefetch_mod == 1) | |
955 fprintf (dump_file, " no restrictions"); | |
956 else if (ref_pruned->prefetch_before == 0) | |
957 fprintf (dump_file, " do not prefetch"); | |
958 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod) | |
959 fprintf (dump_file, " prefetch once"); | |
960 else | |
961 { | |
962 if (ref_pruned->prefetch_before != PREFETCH_ALL) | |
963 { | |
964 fprintf (dump_file, " prefetch before "); | |
965 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
966 ref_pruned->prefetch_before); | |
967 } | |
968 if (ref_pruned->prefetch_mod != 1) | |
969 { | |
970 fprintf (dump_file, " prefetch mod "); | |
971 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
972 ref_pruned->prefetch_mod); | |
973 } | |
974 } | |
975 fprintf (dump_file, "\n"); | |
976 } | |
977 } | |
978 } | |
979 | |
980 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */ | |
981 | |
982 static void | |
983 prune_by_reuse (struct mem_ref_group *groups) | |
984 { | |
985 for (; groups; groups = groups->next) | |
986 prune_group_by_reuse (groups); | |
987 } | |
988 | |
989 /* Returns true if we should issue prefetch for REF. */ | |
990 | |
991 static bool | |
992 should_issue_prefetch_p (struct mem_ref *ref) | |
993 { | |
131 | 994 /* Do we want to issue prefetches for non-constant strides? */ |
145 | 995 if (!cst_and_fits_in_hwi (ref->group->step) |
996 && param_prefetch_dynamic_strides == 0) | |
131 | 997 { |
998 if (dump_file && (dump_flags & TDF_DETAILS)) | |
999 fprintf (dump_file, | |
1000 "Skipping non-constant step for reference %u:%u\n", | |
1001 ref->group->uid, ref->uid); | |
1002 return false; | |
1003 } | |
1004 | |
1005 /* Some processors may have a hardware prefetcher that may conflict with | |
1006 prefetch hints for a range of strides. Make sure we don't issue | |
1007 prefetches for such cases if the stride is within this particular | |
1008 range. */ | |
1009 if (cst_and_fits_in_hwi (ref->group->step) | |
1010 && abs_hwi (int_cst_value (ref->group->step)) | |
145 | 1011 < (HOST_WIDE_INT) param_prefetch_minimum_stride) |
131 | 1012 { |
1013 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1014 fprintf (dump_file, | |
1015 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC | |
1016 ") is less than the mininum required stride of %d\n", | |
1017 ref->group->uid, ref->uid, int_cst_value (ref->group->step), | |
145 | 1018 param_prefetch_minimum_stride); |
131 | 1019 return false; |
1020 } | |
1021 | |
0 | 1022 /* For now do not issue prefetches for only first few of the |
1023 iterations. */ | |
1024 if (ref->prefetch_before != PREFETCH_ALL) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1025 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1026 if (dump_file && (dump_flags & TDF_DETAILS)) |
111 | 1027 fprintf (dump_file, "Ignoring reference %u:%u due to prefetch_before\n", |
1028 ref->group->uid, ref->uid); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1029 return false; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1030 } |
0 | 1031 |
1032 /* Do not prefetch nontemporal stores. */ | |
1033 if (ref->storent_p) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1034 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1035 if (dump_file && (dump_flags & TDF_DETAILS)) |
111 | 1036 fprintf (dump_file, "Ignoring nontemporal store reference %u:%u\n", ref->group->uid, ref->uid); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1037 return false; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1038 } |
0 | 1039 |
1040 return true; | |
1041 } | |
1042 | |
1043 /* Decide which of the prefetch candidates in GROUPS to prefetch. | |
1044 AHEAD is the number of iterations to prefetch ahead (which corresponds | |
1045 to the number of simultaneous instances of one prefetch running at a | |
1046 time). UNROLL_FACTOR is the factor by that the loop is going to be | |
1047 unrolled. Returns true if there is anything to prefetch. */ | |
1048 | |
1049 static bool | |
1050 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, | |
1051 unsigned ahead) | |
1052 { | |
1053 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots; | |
1054 unsigned slots_per_prefetch; | |
1055 struct mem_ref *ref; | |
1056 bool any = false; | |
1057 | |
145 | 1058 /* At most param_simultaneous_prefetches should be running |
1059 at the same time. */ | |
1060 remaining_prefetch_slots = param_simultaneous_prefetches; | |
0 | 1061 |
1062 /* The prefetch will run for AHEAD iterations of the original loop, i.e., | |
1063 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration, | |
1064 it will need a prefetch slot. */ | |
1065 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor; | |
1066 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1067 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n", | |
1068 slots_per_prefetch); | |
1069 | |
1070 /* For now we just take memory references one by one and issue | |
1071 prefetches for as many as possible. The groups are sorted | |
1072 starting with the largest step, since the references with | |
1073 large step are more likely to cause many cache misses. */ | |
1074 | |
1075 for (; groups; groups = groups->next) | |
1076 for (ref = groups->refs; ref; ref = ref->next) | |
1077 { | |
1078 if (!should_issue_prefetch_p (ref)) | |
1079 continue; | |
1080 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1081 /* The loop is far from being sufficiently unrolled for this |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1082 prefetch. Do not generate prefetch to avoid many redudant |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1083 prefetches. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1084 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1085 continue; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1086 |
0 | 1087 /* If we need to prefetch the reference each PREFETCH_MOD iterations, |
1088 and we unroll the loop UNROLL_FACTOR times, we need to insert | |
1089 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each | |
1090 iteration. */ | |
1091 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
1092 / ref->prefetch_mod); | |
1093 prefetch_slots = n_prefetches * slots_per_prefetch; | |
1094 | |
1095 /* If more than half of the prefetches would be lost anyway, do not | |
1096 issue the prefetch. */ | |
1097 if (2 * remaining_prefetch_slots < prefetch_slots) | |
1098 continue; | |
1099 | |
111 | 1100 /* Stop prefetching if debug counter is activated. */ |
1101 if (!dbg_cnt (prefetch)) | |
1102 continue; | |
1103 | |
0 | 1104 ref->issue_prefetch_p = true; |
111 | 1105 if (dump_file && (dump_flags & TDF_DETAILS)) |
1106 fprintf (dump_file, "Decided to issue prefetch for reference %u:%u\n", | |
1107 ref->group->uid, ref->uid); | |
0 | 1108 |
1109 if (remaining_prefetch_slots <= prefetch_slots) | |
1110 return true; | |
1111 remaining_prefetch_slots -= prefetch_slots; | |
1112 any = true; | |
1113 } | |
1114 | |
1115 return any; | |
1116 } | |
1117 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1118 /* Return TRUE if no prefetch is going to be generated in the given |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1119 GROUPS. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1120 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1121 static bool |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1122 nothing_to_prefetch_p (struct mem_ref_group *groups) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1123 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1124 struct mem_ref *ref; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1125 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1126 for (; groups; groups = groups->next) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1127 for (ref = groups->refs; ref; ref = ref->next) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1128 if (should_issue_prefetch_p (ref)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1129 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1130 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1131 return true; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1132 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1133 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1134 /* Estimate the number of prefetches in the given GROUPS. |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1135 UNROLL_FACTOR is the factor by which LOOP was unrolled. */ |
0 | 1136 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1137 static int |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1138 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor) |
0 | 1139 { |
1140 struct mem_ref *ref; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1141 unsigned n_prefetches; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1142 int prefetch_count = 0; |
0 | 1143 |
1144 for (; groups; groups = groups->next) | |
1145 for (ref = groups->refs; ref; ref = ref->next) | |
1146 if (should_issue_prefetch_p (ref)) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1147 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1148 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1149 / ref->prefetch_mod); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1150 prefetch_count += n_prefetches; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1151 } |
0 | 1152 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1153 return prefetch_count; |
0 | 1154 } |
1155 | |
1156 /* Issue prefetches for the reference REF into loop as decided before. | |
1157 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR | |
1158 is the factor by which LOOP was unrolled. */ | |
1159 | |
1160 static void | |
1161 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) | |
1162 { | |
1163 HOST_WIDE_INT delta; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1164 tree addr, addr_base, write_p, local, forward; |
111 | 1165 gcall *prefetch; |
0 | 1166 gimple_stmt_iterator bsi; |
1167 unsigned n_prefetches, ap; | |
1168 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES; | |
1169 | |
1170 if (dump_file && (dump_flags & TDF_DETAILS)) | |
111 | 1171 fprintf (dump_file, "Issued%s prefetch for reference %u:%u.\n", |
0 | 1172 nontemporal ? " nontemporal" : "", |
111 | 1173 ref->group->uid, ref->uid); |
0 | 1174 |
1175 bsi = gsi_for_stmt (ref->stmt); | |
1176 | |
1177 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
1178 / ref->prefetch_mod); | |
1179 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node); | |
1180 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base), | |
1181 true, NULL, true, GSI_SAME_STMT); | |
1182 write_p = ref->write_p ? integer_one_node : integer_zero_node; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1183 local = nontemporal ? integer_zero_node : integer_three_node; |
0 | 1184 |
1185 for (ap = 0; ap < n_prefetches; ap++) | |
1186 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1187 if (cst_and_fits_in_hwi (ref->group->step)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1188 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1189 /* Determine the address to prefetch. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1190 delta = (ahead + ap * ref->prefetch_mod) * |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1191 int_cst_value (ref->group->step); |
111 | 1192 addr = fold_build_pointer_plus_hwi (addr_base, delta); |
1193 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, | |
1194 NULL, true, GSI_SAME_STMT); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1195 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1196 else |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1197 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1198 /* The step size is non-constant but loop-invariant. We use the |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1199 heuristic to simply prefetch ahead iterations ahead. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1200 forward = fold_build2 (MULT_EXPR, sizetype, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1201 fold_convert (sizetype, ref->group->step), |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1202 fold_convert (sizetype, size_int (ahead))); |
111 | 1203 addr = fold_build_pointer_plus (addr_base, forward); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1204 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1205 NULL, true, GSI_SAME_STMT); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1206 } |
111 | 1207 |
1208 if (addr_base != addr | |
1209 && TREE_CODE (addr_base) == SSA_NAME | |
1210 && TREE_CODE (addr) == SSA_NAME) | |
1211 { | |
1212 duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base)); | |
1213 /* As this isn't a plain copy we have to reset alignment | |
1214 information. */ | |
1215 if (SSA_NAME_PTR_INFO (addr)) | |
1216 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr)); | |
1217 } | |
1218 | |
0 | 1219 /* Create the prefetch instruction. */ |
111 | 1220 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH), |
0 | 1221 3, addr, write_p, local); |
1222 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); | |
1223 } | |
1224 } | |
1225 | |
1226 /* Issue prefetches for the references in GROUPS into loop as decided before. | |
1227 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the | |
1228 factor by that LOOP was unrolled. */ | |
1229 | |
1230 static void | |
1231 issue_prefetches (struct mem_ref_group *groups, | |
1232 unsigned unroll_factor, unsigned ahead) | |
1233 { | |
1234 struct mem_ref *ref; | |
1235 | |
1236 for (; groups; groups = groups->next) | |
1237 for (ref = groups->refs; ref; ref = ref->next) | |
1238 if (ref->issue_prefetch_p) | |
1239 issue_prefetch_ref (ref, unroll_factor, ahead); | |
1240 } | |
1241 | |
1242 /* Returns true if REF is a memory write for that a nontemporal store insn | |
1243 can be used. */ | |
1244 | |
1245 static bool | |
1246 nontemporal_store_p (struct mem_ref *ref) | |
1247 { | |
111 | 1248 machine_mode mode; |
0 | 1249 enum insn_code code; |
1250 | |
1251 /* REF must be a write that is not reused. We require it to be independent | |
1252 on all other memory references in the loop, as the nontemporal stores may | |
1253 be reordered with respect to other memory references. */ | |
1254 if (!ref->write_p | |
1255 || !ref->independent_p | |
1256 || ref->reuse_distance < L2_CACHE_SIZE_BYTES) | |
1257 return false; | |
1258 | |
1259 /* Check that we have the storent instruction for the mode. */ | |
1260 mode = TYPE_MODE (TREE_TYPE (ref->mem)); | |
1261 if (mode == BLKmode) | |
1262 return false; | |
1263 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1264 code = optab_handler (storent_optab, mode); |
0 | 1265 return code != CODE_FOR_nothing; |
1266 } | |
1267 | |
1268 /* If REF is a nontemporal store, we mark the corresponding modify statement | |
1269 and return true. Otherwise, we return false. */ | |
1270 | |
1271 static bool | |
1272 mark_nontemporal_store (struct mem_ref *ref) | |
1273 { | |
1274 if (!nontemporal_store_p (ref)) | |
1275 return false; | |
1276 | |
1277 if (dump_file && (dump_flags & TDF_DETAILS)) | |
111 | 1278 fprintf (dump_file, "Marked reference %u:%u as a nontemporal store.\n", |
1279 ref->group->uid, ref->uid); | |
0 | 1280 |
1281 gimple_assign_set_nontemporal_move (ref->stmt, true); | |
1282 ref->storent_p = true; | |
1283 | |
1284 return true; | |
1285 } | |
1286 | |
1287 /* Issue a memory fence instruction after LOOP. */ | |
1288 | |
1289 static void | |
145 | 1290 emit_mfence_after_loop (class loop *loop) |
0 | 1291 { |
111 | 1292 vec<edge> exits = get_loop_exit_edges (loop); |
0 | 1293 edge exit; |
111 | 1294 gcall *call; |
0 | 1295 gimple_stmt_iterator bsi; |
1296 unsigned i; | |
1297 | |
111 | 1298 FOR_EACH_VEC_ELT (exits, i, exit) |
0 | 1299 { |
1300 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0); | |
1301 | |
1302 if (!single_pred_p (exit->dest) | |
1303 /* If possible, we prefer not to insert the fence on other paths | |
1304 in cfg. */ | |
1305 && !(exit->flags & EDGE_ABNORMAL)) | |
1306 split_loop_exit_edge (exit); | |
1307 bsi = gsi_after_labels (exit->dest); | |
1308 | |
1309 gsi_insert_before (&bsi, call, GSI_NEW_STMT); | |
1310 } | |
1311 | |
111 | 1312 exits.release (); |
0 | 1313 update_ssa (TODO_update_ssa_only_virtuals); |
1314 } | |
1315 | |
1316 /* Returns true if we can use storent in loop, false otherwise. */ | |
1317 | |
1318 static bool | |
145 | 1319 may_use_storent_in_loop_p (class loop *loop) |
0 | 1320 { |
1321 bool ret = true; | |
1322 | |
1323 if (loop->inner != NULL) | |
1324 return false; | |
1325 | |
1326 /* If we must issue a mfence insn after using storent, check that there | |
1327 is a suitable place for it at each of the loop exits. */ | |
1328 if (FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1329 { | |
111 | 1330 vec<edge> exits = get_loop_exit_edges (loop); |
0 | 1331 unsigned i; |
1332 edge exit; | |
1333 | |
111 | 1334 FOR_EACH_VEC_ELT (exits, i, exit) |
0 | 1335 if ((exit->flags & EDGE_ABNORMAL) |
111 | 1336 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
0 | 1337 ret = false; |
1338 | |
111 | 1339 exits.release (); |
0 | 1340 } |
1341 | |
1342 return ret; | |
1343 } | |
1344 | |
1345 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory | |
1346 references in the loop. */ | |
1347 | |
1348 static void | |
145 | 1349 mark_nontemporal_stores (class loop *loop, struct mem_ref_group *groups) |
0 | 1350 { |
1351 struct mem_ref *ref; | |
1352 bool any = false; | |
1353 | |
1354 if (!may_use_storent_in_loop_p (loop)) | |
1355 return; | |
1356 | |
1357 for (; groups; groups = groups->next) | |
1358 for (ref = groups->refs; ref; ref = ref->next) | |
1359 any |= mark_nontemporal_store (ref); | |
1360 | |
1361 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1362 emit_mfence_after_loop (loop); | |
1363 } | |
1364 | |
1365 /* Determines whether we can profitably unroll LOOP FACTOR times, and if | |
1366 this is the case, fill in DESC by the description of number of | |
1367 iterations. */ | |
1368 | |
1369 static bool | |
145 | 1370 should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc, |
0 | 1371 unsigned factor) |
1372 { | |
1373 if (!can_unroll_loop_p (loop, factor, desc)) | |
1374 return false; | |
1375 | |
1376 /* We only consider loops without control flow for unrolling. This is not | |
1377 a hard restriction -- tree_unroll_loop works with arbitrary loops | |
1378 as well; but the unrolling/prefetching is usually more profitable for | |
1379 loops consisting of a single basic block, and we want to limit the | |
1380 code growth. */ | |
1381 if (loop->num_nodes > 2) | |
1382 return false; | |
1383 | |
1384 return true; | |
1385 } | |
1386 | |
1387 /* Determine the coefficient by that unroll LOOP, from the information | |
1388 contained in the list of memory references REFS. Description of | |
111 | 1389 number of iterations of LOOP is stored to DESC. NINSNS is the number of |
0 | 1390 insns of the LOOP. EST_NITER is the estimated number of iterations of |
1391 the loop, or -1 if no estimate is available. */ | |
1392 | |
1393 static unsigned | |
145 | 1394 determine_unroll_factor (class loop *loop, struct mem_ref_group *refs, |
1395 unsigned ninsns, class tree_niter_desc *desc, | |
0 | 1396 HOST_WIDE_INT est_niter) |
1397 { | |
1398 unsigned upper_bound; | |
1399 unsigned nfactor, factor, mod_constraint; | |
1400 struct mem_ref_group *agp; | |
1401 struct mem_ref *ref; | |
1402 | |
1403 /* First check whether the loop is not too large to unroll. We ignore | |
1404 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us | |
1405 from unrolling them enough to make exactly one cache line covered by each | |
1406 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent | |
1407 us from unrolling the loops too many times in cases where we only expect | |
1408 gains from better scheduling and decreasing loop overhead, which is not | |
1409 the case here. */ | |
145 | 1410 upper_bound = param_max_unrolled_insns / ninsns; |
0 | 1411 |
1412 /* If we unrolled the loop more times than it iterates, the unrolled version | |
1413 of the loop would be never entered. */ | |
1414 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound) | |
1415 upper_bound = est_niter; | |
1416 | |
1417 if (upper_bound <= 1) | |
1418 return 1; | |
1419 | |
1420 /* Choose the factor so that we may prefetch each cache just once, | |
1421 but bound the unrolling by UPPER_BOUND. */ | |
1422 factor = 1; | |
1423 for (agp = refs; agp; agp = agp->next) | |
1424 for (ref = agp->refs; ref; ref = ref->next) | |
1425 if (should_issue_prefetch_p (ref)) | |
1426 { | |
1427 mod_constraint = ref->prefetch_mod; | |
1428 nfactor = least_common_multiple (mod_constraint, factor); | |
1429 if (nfactor <= upper_bound) | |
1430 factor = nfactor; | |
1431 } | |
1432 | |
1433 if (!should_unroll_loop_p (loop, desc, factor)) | |
1434 return 1; | |
1435 | |
1436 return factor; | |
1437 } | |
1438 | |
1439 /* Returns the total volume of the memory references REFS, taking into account | |
1440 reuses in the innermost loop and cache line size. TODO -- we should also | |
1441 take into account reuses across the iterations of the loops in the loop | |
1442 nest. */ | |
1443 | |
1444 static unsigned | |
1445 volume_of_references (struct mem_ref_group *refs) | |
1446 { | |
1447 unsigned volume = 0; | |
1448 struct mem_ref_group *gr; | |
1449 struct mem_ref *ref; | |
1450 | |
1451 for (gr = refs; gr; gr = gr->next) | |
a06113de4d67
first commit
kent& |