Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-affine.c @ 136:4627f235cf2a
fix c-next example
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Thu, 08 Nov 2018 14:11:56 +0900 |
parents | 84e7813d76e9 |
children | 1830386684a0 |
rev | line source |
---|---|
0 | 1 /* Operations with affine combinations of trees. |
131 | 2 Copyright (C) 2005-2018 Free Software Foundation, Inc. |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3 |
0 | 4 This file is part of GCC. |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
5 |
0 | 6 GCC is free software; you can redistribute it and/or modify it |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 3, or (at your option) any | |
9 later version. | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
10 |
0 | 11 GCC is distributed in the hope that it will be useful, but WITHOUT |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
15 |
0 | 16 You should have received a copy of the GNU General Public License |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
111 | 23 #include "backend.h" |
24 #include "rtl.h" | |
0 | 25 #include "tree.h" |
111 | 26 #include "gimple.h" |
27 #include "ssa.h" | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
28 #include "tree-pretty-print.h" |
111 | 29 #include "fold-const.h" |
0 | 30 #include "tree-affine.h" |
111 | 31 #include "gimplify.h" |
32 #include "dumpfile.h" | |
33 #include "cfgexpand.h" | |
0 | 34 |
35 /* Extends CST as appropriate for the affine combinations COMB. */ | |
36 | |
131 | 37 static widest_int |
111 | 38 wide_int_ext_for_comb (const widest_int &cst, tree type) |
0 | 39 { |
111 | 40 return wi::sext (cst, TYPE_PRECISION (type)); |
0 | 41 } |
42 | |
131 | 43 /* Likewise for polynomial offsets. */ |
44 | |
45 static poly_widest_int | |
46 wide_int_ext_for_comb (const poly_widest_int &cst, tree type) | |
47 { | |
48 return wi::sext (cst, TYPE_PRECISION (type)); | |
49 } | |
50 | |
0 | 51 /* Initializes affine combination COMB so that its value is zero in TYPE. */ |
52 | |
53 static void | |
54 aff_combination_zero (aff_tree *comb, tree type) | |
55 { | |
111 | 56 int i; |
0 | 57 comb->type = type; |
111 | 58 comb->offset = 0; |
0 | 59 comb->n = 0; |
111 | 60 for (i = 0; i < MAX_AFF_ELTS; i++) |
61 comb->elts[i].coef = 0; | |
0 | 62 comb->rest = NULL_TREE; |
63 } | |
64 | |
65 /* Sets COMB to CST. */ | |
66 | |
67 void | |
131 | 68 aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst) |
0 | 69 { |
70 aff_combination_zero (comb, type); | |
111 | 71 comb->offset = wide_int_ext_for_comb (cst, comb->type);; |
0 | 72 } |
73 | |
74 /* Sets COMB to single element ELT. */ | |
75 | |
76 void | |
77 aff_combination_elt (aff_tree *comb, tree type, tree elt) | |
78 { | |
79 aff_combination_zero (comb, type); | |
80 | |
81 comb->n = 1; | |
82 comb->elts[0].val = elt; | |
111 | 83 comb->elts[0].coef = 1; |
0 | 84 } |
85 | |
86 /* Scales COMB by SCALE. */ | |
87 | |
88 void | |
111 | 89 aff_combination_scale (aff_tree *comb, const widest_int &scale_in) |
0 | 90 { |
91 unsigned i, j; | |
92 | |
111 | 93 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type); |
94 if (scale == 1) | |
0 | 95 return; |
96 | |
111 | 97 if (scale == 0) |
0 | 98 { |
99 aff_combination_zero (comb, comb->type); | |
100 return; | |
101 } | |
102 | |
111 | 103 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type); |
0 | 104 for (i = 0, j = 0; i < comb->n; i++) |
105 { | |
111 | 106 widest_int new_coef |
107 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type); | |
0 | 108 /* A coefficient may become zero due to overflow. Remove the zero |
109 elements. */ | |
111 | 110 if (new_coef == 0) |
0 | 111 continue; |
112 comb->elts[j].coef = new_coef; | |
113 comb->elts[j].val = comb->elts[i].val; | |
114 j++; | |
115 } | |
116 comb->n = j; | |
117 | |
118 if (comb->rest) | |
119 { | |
120 tree type = comb->type; | |
121 if (POINTER_TYPE_P (type)) | |
122 type = sizetype; | |
123 if (comb->n < MAX_AFF_ELTS) | |
124 { | |
125 comb->elts[comb->n].coef = scale; | |
126 comb->elts[comb->n].val = comb->rest; | |
127 comb->rest = NULL_TREE; | |
128 comb->n++; | |
129 } | |
130 else | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
131 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest, |
111 | 132 wide_int_to_tree (type, scale)); |
0 | 133 } |
134 } | |
135 | |
136 /* Adds ELT * SCALE to COMB. */ | |
137 | |
138 void | |
111 | 139 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in) |
0 | 140 { |
141 unsigned i; | |
142 tree type; | |
143 | |
111 | 144 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type); |
145 if (scale == 0) | |
0 | 146 return; |
147 | |
148 for (i = 0; i < comb->n; i++) | |
149 if (operand_equal_p (comb->elts[i].val, elt, 0)) | |
150 { | |
111 | 151 widest_int new_coef |
152 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type); | |
153 if (new_coef != 0) | |
0 | 154 { |
155 comb->elts[i].coef = new_coef; | |
156 return; | |
157 } | |
158 | |
159 comb->n--; | |
160 comb->elts[i] = comb->elts[comb->n]; | |
161 | |
162 if (comb->rest) | |
163 { | |
164 gcc_assert (comb->n == MAX_AFF_ELTS - 1); | |
111 | 165 comb->elts[comb->n].coef = 1; |
0 | 166 comb->elts[comb->n].val = comb->rest; |
167 comb->rest = NULL_TREE; | |
168 comb->n++; | |
169 } | |
170 return; | |
171 } | |
172 if (comb->n < MAX_AFF_ELTS) | |
173 { | |
174 comb->elts[comb->n].coef = scale; | |
175 comb->elts[comb->n].val = elt; | |
176 comb->n++; | |
177 return; | |
178 } | |
179 | |
180 type = comb->type; | |
181 if (POINTER_TYPE_P (type)) | |
182 type = sizetype; | |
183 | |
111 | 184 if (scale == 1) |
0 | 185 elt = fold_convert (type, elt); |
186 else | |
187 elt = fold_build2 (MULT_EXPR, type, | |
188 fold_convert (type, elt), | |
111 | 189 wide_int_to_tree (type, scale)); |
0 | 190 |
191 if (comb->rest) | |
192 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest, | |
193 elt); | |
194 else | |
195 comb->rest = elt; | |
196 } | |
197 | |
198 /* Adds CST to C. */ | |
199 | |
200 static void | |
131 | 201 aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst) |
0 | 202 { |
111 | 203 c->offset = wide_int_ext_for_comb (c->offset + cst, c->type); |
0 | 204 } |
205 | |
206 /* Adds COMB2 to COMB1. */ | |
207 | |
208 void | |
209 aff_combination_add (aff_tree *comb1, aff_tree *comb2) | |
210 { | |
211 unsigned i; | |
212 | |
213 aff_combination_add_cst (comb1, comb2->offset); | |
214 for (i = 0; i < comb2->n; i++) | |
215 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef); | |
216 if (comb2->rest) | |
111 | 217 aff_combination_add_elt (comb1, comb2->rest, 1); |
0 | 218 } |
219 | |
220 /* Converts affine combination COMB to TYPE. */ | |
221 | |
222 void | |
223 aff_combination_convert (aff_tree *comb, tree type) | |
224 { | |
225 unsigned i, j; | |
226 tree comb_type = comb->type; | |
227 | |
228 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type)) | |
229 { | |
230 tree val = fold_convert (type, aff_combination_to_tree (comb)); | |
231 tree_to_aff_combination (val, type, comb); | |
232 return; | |
233 } | |
234 | |
235 comb->type = type; | |
236 if (comb->rest && !POINTER_TYPE_P (type)) | |
237 comb->rest = fold_convert (type, comb->rest); | |
238 | |
239 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type)) | |
240 return; | |
241 | |
111 | 242 comb->offset = wide_int_ext_for_comb (comb->offset, comb->type); |
0 | 243 for (i = j = 0; i < comb->n; i++) |
244 { | |
111 | 245 if (comb->elts[i].coef == 0) |
0 | 246 continue; |
111 | 247 comb->elts[j].coef = comb->elts[i].coef; |
0 | 248 comb->elts[j].val = fold_convert (type, comb->elts[i].val); |
249 j++; | |
250 } | |
251 | |
252 comb->n = j; | |
253 if (comb->n < MAX_AFF_ELTS && comb->rest) | |
254 { | |
111 | 255 comb->elts[comb->n].coef = 1; |
0 | 256 comb->elts[comb->n].val = comb->rest; |
257 comb->rest = NULL_TREE; | |
258 comb->n++; | |
259 } | |
260 } | |
261 | |
262 /* Splits EXPR into an affine combination of parts. */ | |
263 | |
264 void | |
265 tree_to_aff_combination (tree expr, tree type, aff_tree *comb) | |
266 { | |
267 aff_tree tmp; | |
268 enum tree_code code; | |
269 tree cst, core, toffset; | |
131 | 270 poly_int64 bitpos, bitsize, bytepos; |
111 | 271 machine_mode mode; |
272 int unsignedp, reversep, volatilep; | |
0 | 273 |
274 STRIP_NOPS (expr); | |
275 | |
276 code = TREE_CODE (expr); | |
277 switch (code) | |
278 { | |
279 case POINTER_PLUS_EXPR: | |
280 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); | |
281 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp); | |
282 aff_combination_add (comb, &tmp); | |
283 return; | |
284 | |
285 case PLUS_EXPR: | |
286 case MINUS_EXPR: | |
287 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); | |
288 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp); | |
289 if (code == MINUS_EXPR) | |
111 | 290 aff_combination_scale (&tmp, -1); |
0 | 291 aff_combination_add (comb, &tmp); |
292 return; | |
293 | |
294 case MULT_EXPR: | |
295 cst = TREE_OPERAND (expr, 1); | |
296 if (TREE_CODE (cst) != INTEGER_CST) | |
297 break; | |
298 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); | |
111 | 299 aff_combination_scale (comb, wi::to_widest (cst)); |
0 | 300 return; |
301 | |
302 case NEGATE_EXPR: | |
303 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); | |
111 | 304 aff_combination_scale (comb, -1); |
0 | 305 return; |
306 | |
307 case BIT_NOT_EXPR: | |
308 /* ~x = -x - 1 */ | |
309 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); | |
111 | 310 aff_combination_scale (comb, -1); |
311 aff_combination_add_cst (comb, -1); | |
0 | 312 return; |
313 | |
314 case ADDR_EXPR: | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
315 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
316 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
317 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
318 expr = TREE_OPERAND (expr, 0); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
319 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
320 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
321 aff_combination_add (comb, &tmp); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
322 return; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
323 } |
0 | 324 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos, |
111 | 325 &toffset, &mode, &unsignedp, &reversep, |
326 &volatilep); | |
131 | 327 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos)) |
0 | 328 break; |
131 | 329 aff_combination_const (comb, type, bytepos); |
111 | 330 if (TREE_CODE (core) == MEM_REF) |
331 { | |
131 | 332 tree mem_offset = TREE_OPERAND (core, 1); |
333 aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset)); | |
111 | 334 core = TREE_OPERAND (core, 0); |
335 } | |
336 else | |
337 core = build_fold_addr_expr (core); | |
338 | |
0 | 339 if (TREE_CODE (core) == ADDR_EXPR) |
111 | 340 aff_combination_add_elt (comb, core, 1); |
0 | 341 else |
342 { | |
343 tree_to_aff_combination (core, type, &tmp); | |
344 aff_combination_add (comb, &tmp); | |
345 } | |
346 if (toffset) | |
347 { | |
348 tree_to_aff_combination (toffset, type, &tmp); | |
349 aff_combination_add (comb, &tmp); | |
350 } | |
351 return; | |
352 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
353 case MEM_REF: |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
354 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
355 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0), |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
356 type, comb); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
357 else if (integer_zerop (TREE_OPERAND (expr, 1))) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
358 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
359 aff_combination_elt (comb, type, expr); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
360 return; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
361 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
362 else |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
363 aff_combination_elt (comb, type, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
364 build2 (MEM_REF, TREE_TYPE (expr), |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
365 TREE_OPERAND (expr, 0), |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
366 build_int_cst |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
367 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0))); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
368 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
369 aff_combination_add (comb, &tmp); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
370 return; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
371 |
111 | 372 CASE_CONVERT: |
373 { | |
374 tree otype = TREE_TYPE (expr); | |
375 tree inner = TREE_OPERAND (expr, 0); | |
376 tree itype = TREE_TYPE (inner); | |
377 enum tree_code icode = TREE_CODE (inner); | |
378 | |
379 /* In principle this is a valid folding, but it isn't necessarily | |
380 an optimization, so do it here and not in fold_unary. */ | |
381 if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR) | |
382 && TREE_CODE (itype) == INTEGER_TYPE | |
383 && TREE_CODE (otype) == INTEGER_TYPE | |
384 && TYPE_PRECISION (otype) > TYPE_PRECISION (itype)) | |
385 { | |
386 tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1); | |
387 | |
388 /* If inner type has undefined overflow behavior, fold conversion | |
389 for below two cases: | |
390 (T1)(X *+- CST) -> (T1)X *+- (T1)CST | |
391 (T1)(X + X) -> (T1)X + (T1)X. */ | |
392 if (TYPE_OVERFLOW_UNDEFINED (itype) | |
393 && (TREE_CODE (op1) == INTEGER_CST | |
394 || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0)))) | |
395 { | |
396 op0 = fold_convert (otype, op0); | |
397 op1 = fold_convert (otype, op1); | |
398 expr = fold_build2 (icode, otype, op0, op1); | |
399 tree_to_aff_combination (expr, type, comb); | |
400 return; | |
401 } | |
402 wide_int minv, maxv; | |
403 /* If inner type has wrapping overflow behavior, fold conversion | |
404 for below case: | |
405 (T1)(X - CST) -> (T1)X - (T1)CST | |
406 if X - CST doesn't overflow by range information. Also handle | |
407 (T1)(X + CST) as (T1)(X - (-CST)). */ | |
408 if (TYPE_UNSIGNED (itype) | |
409 && TYPE_OVERFLOW_WRAPS (itype) | |
410 && TREE_CODE (op0) == SSA_NAME | |
411 && TREE_CODE (op1) == INTEGER_CST | |
412 && icode != MULT_EXPR | |
413 && get_range_info (op0, &minv, &maxv) == VR_RANGE) | |
414 { | |
415 if (icode == PLUS_EXPR) | |
416 op1 = wide_int_to_tree (itype, -wi::to_wide (op1)); | |
417 if (wi::geu_p (minv, wi::to_wide (op1))) | |
418 { | |
419 op0 = fold_convert (otype, op0); | |
420 op1 = fold_convert (otype, op1); | |
421 expr = fold_build2 (MINUS_EXPR, otype, op0, op1); | |
422 tree_to_aff_combination (expr, type, comb); | |
423 return; | |
424 } | |
425 } | |
426 } | |
427 } | |
428 break; | |
429 | |
0 | 430 default: |
131 | 431 { |
432 if (poly_int_tree_p (expr)) | |
433 { | |
434 aff_combination_const (comb, type, wi::to_poly_widest (expr)); | |
435 return; | |
436 } | |
437 break; | |
438 } | |
0 | 439 } |
440 | |
441 aff_combination_elt (comb, type, expr); | |
442 } | |
443 | |
444 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine | |
445 combination COMB. */ | |
446 | |
447 static tree | |
111 | 448 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in) |
0 | 449 { |
450 enum tree_code code; | |
111 | 451 |
452 widest_int scale = wide_int_ext_for_comb (scale_in, type); | |
0 | 453 |
111 | 454 elt = fold_convert (type, elt); |
455 if (scale == 1) | |
0 | 456 { |
457 if (!expr) | |
111 | 458 return elt; |
0 | 459 |
460 return fold_build2 (PLUS_EXPR, type, expr, elt); | |
461 } | |
462 | |
111 | 463 if (scale == -1) |
0 | 464 { |
465 if (!expr) | |
111 | 466 return fold_build1 (NEGATE_EXPR, type, elt); |
0 | 467 |
468 return fold_build2 (MINUS_EXPR, type, expr, elt); | |
469 } | |
470 | |
471 if (!expr) | |
111 | 472 return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale)); |
0 | 473 |
111 | 474 if (wi::neg_p (scale)) |
0 | 475 { |
476 code = MINUS_EXPR; | |
111 | 477 scale = -scale; |
0 | 478 } |
479 else | |
480 code = PLUS_EXPR; | |
481 | |
111 | 482 elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale)); |
0 | 483 return fold_build2 (code, type, expr, elt); |
484 } | |
485 | |
486 /* Makes tree from the affine combination COMB. */ | |
487 | |
488 tree | |
489 aff_combination_to_tree (aff_tree *comb) | |
490 { | |
111 | 491 tree type = comb->type, base = NULL_TREE, expr = NULL_TREE; |
0 | 492 unsigned i; |
131 | 493 poly_widest_int off; |
494 int sgn; | |
0 | 495 |
496 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE); | |
497 | |
111 | 498 i = 0; |
499 if (POINTER_TYPE_P (type)) | |
500 { | |
501 type = sizetype; | |
502 if (comb->n > 0 && comb->elts[0].coef == 1 | |
503 && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val))) | |
504 { | |
505 base = comb->elts[0].val; | |
506 ++i; | |
507 } | |
508 } | |
509 | |
510 for (; i < comb->n; i++) | |
511 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef); | |
0 | 512 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
513 if (comb->rest) |
111 | 514 expr = add_elt_to_tree (expr, type, comb->rest, 1); |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
515 |
0 | 516 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is |
517 unsigned. */ | |
131 | 518 if (known_lt (comb->offset, 0)) |
0 | 519 { |
111 | 520 off = -comb->offset; |
521 sgn = -1; | |
0 | 522 } |
523 else | |
524 { | |
525 off = comb->offset; | |
111 | 526 sgn = 1; |
0 | 527 } |
111 | 528 expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn); |
529 | |
530 if (base) | |
531 return fold_build_pointer_plus (base, expr); | |
532 else | |
533 return fold_convert (comb->type, expr); | |
0 | 534 } |
535 | |
536 /* Copies the tree elements of COMB to ensure that they are not shared. */ | |
537 | |
538 void | |
539 unshare_aff_combination (aff_tree *comb) | |
540 { | |
541 unsigned i; | |
542 | |
543 for (i = 0; i < comb->n; i++) | |
544 comb->elts[i].val = unshare_expr (comb->elts[i].val); | |
545 if (comb->rest) | |
546 comb->rest = unshare_expr (comb->rest); | |
547 } | |
548 | |
549 /* Remove M-th element from COMB. */ | |
550 | |
551 void | |
552 aff_combination_remove_elt (aff_tree *comb, unsigned m) | |
553 { | |
554 comb->n--; | |
555 if (m <= comb->n) | |
556 comb->elts[m] = comb->elts[comb->n]; | |
557 if (comb->rest) | |
558 { | |
111 | 559 comb->elts[comb->n].coef = 1; |
0 | 560 comb->elts[comb->n].val = comb->rest; |
561 comb->rest = NULL_TREE; | |
562 comb->n++; | |
563 } | |
564 } | |
565 | |
566 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only | |
567 C * COEF is added to R. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
568 |
0 | 569 |
570 static void | |
111 | 571 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val, |
0 | 572 aff_tree *r) |
573 { | |
574 unsigned i; | |
575 tree aval, type; | |
576 | |
577 for (i = 0; i < c->n; i++) | |
578 { | |
579 aval = c->elts[i].val; | |
580 if (val) | |
581 { | |
582 type = TREE_TYPE (aval); | |
583 aval = fold_build2 (MULT_EXPR, type, aval, | |
584 fold_convert (type, val)); | |
585 } | |
586 | |
111 | 587 aff_combination_add_elt (r, aval, coef * c->elts[i].coef); |
0 | 588 } |
589 | |
590 if (c->rest) | |
591 { | |
592 aval = c->rest; | |
593 if (val) | |
594 { | |
595 type = TREE_TYPE (aval); | |
596 aval = fold_build2 (MULT_EXPR, type, aval, | |
597 fold_convert (type, val)); | |
598 } | |
599 | |
600 aff_combination_add_elt (r, aval, coef); | |
601 } | |
602 | |
603 if (val) | |
131 | 604 { |
605 if (c->offset.is_constant ()) | |
606 /* Access coeffs[0] directly, for efficiency. */ | |
607 aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]); | |
608 else | |
609 { | |
610 /* c->offset is polynomial, so multiply VAL rather than COEF | |
611 by it. */ | |
612 tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset); | |
613 val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset); | |
614 aff_combination_add_elt (r, val, coef); | |
615 } | |
616 } | |
0 | 617 else |
111 | 618 aff_combination_add_cst (r, coef * c->offset); |
0 | 619 } |
620 | |
621 /* Multiplies C1 by C2, storing the result to R */ | |
622 | |
623 void | |
624 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r) | |
625 { | |
626 unsigned i; | |
627 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type)); | |
628 | |
629 aff_combination_zero (r, c1->type); | |
630 | |
631 for (i = 0; i < c2->n; i++) | |
632 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r); | |
633 if (c2->rest) | |
111 | 634 aff_combination_add_product (c1, 1, c2->rest, r); |
131 | 635 if (c2->offset.is_constant ()) |
636 /* Access coeffs[0] directly, for efficiency. */ | |
637 aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r); | |
638 else | |
639 { | |
640 /* c2->offset is polynomial, so do the multiplication in tree form. */ | |
641 tree offset = wide_int_to_tree (c2->type, c2->offset); | |
642 aff_combination_add_product (c1, 1, offset, r); | |
643 } | |
0 | 644 } |
645 | |
646 /* Returns the element of COMB whose value is VAL, or NULL if no such | |
647 element exists. If IDX is not NULL, it is set to the index of VAL in | |
648 COMB. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
649 |
0 | 650 static struct aff_comb_elt * |
651 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx) | |
652 { | |
653 unsigned i; | |
654 | |
655 for (i = 0; i < comb->n; i++) | |
656 if (operand_equal_p (comb->elts[i].val, val, 0)) | |
657 { | |
658 if (idx) | |
659 *idx = i; | |
660 | |
661 return &comb->elts[i]; | |
662 } | |
663 | |
664 return NULL; | |
665 } | |
666 | |
667 /* Element of the cache that maps ssa name NAME to its expanded form | |
668 as an affine expression EXPANSION. */ | |
669 | |
670 struct name_expansion | |
671 { | |
672 aff_tree expansion; | |
673 | |
674 /* True if the expansion for the name is just being generated. */ | |
675 unsigned in_progress : 1; | |
676 }; | |
677 | |
678 /* Expands SSA names in COMB recursively. CACHE is used to cache the | |
679 results. */ | |
680 | |
681 void | |
682 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED, | |
111 | 683 hash_map<tree, name_expansion *> **cache) |
0 | 684 { |
685 unsigned i; | |
686 aff_tree to_add, current, curre; | |
687 tree e, rhs; | |
111 | 688 gimple *def; |
689 widest_int scale; | |
0 | 690 struct name_expansion *exp; |
691 | |
692 aff_combination_zero (&to_add, comb->type); | |
693 for (i = 0; i < comb->n; i++) | |
694 { | |
695 tree type, name; | |
696 enum tree_code code; | |
697 | |
698 e = comb->elts[i].val; | |
699 type = TREE_TYPE (e); | |
700 name = e; | |
701 /* Look through some conversions. */ | |
111 | 702 if (CONVERT_EXPR_P (e) |
0 | 703 && (TYPE_PRECISION (type) |
704 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0))))) | |
705 name = TREE_OPERAND (e, 0); | |
706 if (TREE_CODE (name) != SSA_NAME) | |
707 continue; | |
708 def = SSA_NAME_DEF_STMT (name); | |
709 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name) | |
710 continue; | |
711 | |
712 code = gimple_assign_rhs_code (def); | |
713 if (code != SSA_NAME | |
714 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)) | |
715 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS | |
716 || !is_gimple_min_invariant (gimple_assign_rhs1 (def)))) | |
717 continue; | |
718 | |
719 /* We do not know whether the reference retains its value at the | |
720 place where the expansion is used. */ | |
721 if (TREE_CODE_CLASS (code) == tcc_reference) | |
722 continue; | |
723 | |
724 if (!*cache) | |
111 | 725 *cache = new hash_map<tree, name_expansion *>; |
726 name_expansion **slot = &(*cache)->get_or_insert (e); | |
727 exp = *slot; | |
0 | 728 |
729 if (!exp) | |
730 { | |
731 exp = XNEW (struct name_expansion); | |
732 exp->in_progress = 1; | |
733 *slot = exp; | |
111 | 734 rhs = gimple_assign_rhs_to_tree (def); |
735 if (e != name) | |
736 rhs = fold_convert (type, rhs); | |
737 | |
0 | 738 tree_to_aff_combination_expand (rhs, comb->type, ¤t, cache); |
739 exp->expansion = current; | |
740 exp->in_progress = 0; | |
741 } | |
742 else | |
743 { | |
744 /* Since we follow the definitions in the SSA form, we should not | |
745 enter a cycle unless we pass through a phi node. */ | |
746 gcc_assert (!exp->in_progress); | |
747 current = exp->expansion; | |
748 } | |
749 | |
750 /* Accumulate the new terms to TO_ADD, so that we do not modify | |
751 COMB while traversing it; include the term -coef * E, to remove | |
752 it from COMB. */ | |
753 scale = comb->elts[i].coef; | |
754 aff_combination_zero (&curre, comb->type); | |
111 | 755 aff_combination_add_elt (&curre, e, -scale); |
0 | 756 aff_combination_scale (¤t, scale); |
757 aff_combination_add (&to_add, ¤t); | |
758 aff_combination_add (&to_add, &curre); | |
759 } | |
760 aff_combination_add (comb, &to_add); | |
761 } | |
762 | |
763 /* Similar to tree_to_aff_combination, but follows SSA name definitions | |
764 and expands them recursively. CACHE is used to cache the expansions | |
765 of the ssa names, to avoid exponential time complexity for cases | |
766 like | |
767 | |
768 a1 = a0 + a0; | |
769 a2 = a1 + a1; | |
770 a3 = a2 + a2; | |
771 ... */ | |
772 | |
773 void | |
774 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb, | |
111 | 775 hash_map<tree, name_expansion *> **cache) |
0 | 776 { |
777 tree_to_aff_combination (expr, type, comb); | |
778 aff_combination_expand (comb, cache); | |
779 } | |
780 | |
781 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for | |
111 | 782 hash_map::traverse. */ |
0 | 783 |
111 | 784 bool |
785 free_name_expansion (tree const &, name_expansion **value, void *) | |
0 | 786 { |
111 | 787 free (*value); |
0 | 788 return true; |
789 } | |
790 | |
791 /* Frees memory allocated for the CACHE used by | |
792 tree_to_aff_combination_expand. */ | |
793 | |
794 void | |
111 | 795 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache) |
0 | 796 { |
797 if (!*cache) | |
798 return; | |
799 | |
111 | 800 (*cache)->traverse<void *, free_name_expansion> (NULL); |
801 delete (*cache); | |
0 | 802 *cache = NULL; |
803 } | |
804 | |
805 /* If VAL != CST * DIV for any constant CST, returns false. | |
111 | 806 Otherwise, if *MULT_SET is true, additionally compares CST and MULT, |
807 and if they are different, returns false. Finally, if neither of these | |
808 two cases occur, true is returned, and CST is stored to MULT and MULT_SET | |
809 is set to true. */ | |
0 | 810 |
811 static bool | |
131 | 812 wide_int_constant_multiple_p (const poly_widest_int &val, |
813 const poly_widest_int &div, | |
814 bool *mult_set, poly_widest_int *mult) | |
0 | 815 { |
131 | 816 poly_widest_int rem, cst; |
0 | 817 |
131 | 818 if (known_eq (val, 0)) |
111 | 819 { |
131 | 820 if (*mult_set && maybe_ne (*mult, 0)) |
111 | 821 return false; |
822 *mult_set = true; | |
823 *mult = 0; | |
824 return true; | |
825 } | |
0 | 826 |
131 | 827 if (maybe_eq (div, 0)) |
0 | 828 return false; |
829 | |
131 | 830 if (!multiple_p (val, div, &cst)) |
0 | 831 return false; |
832 | |
131 | 833 if (*mult_set && maybe_ne (*mult, cst)) |
0 | 834 return false; |
835 | |
836 *mult_set = true; | |
837 *mult = cst; | |
838 return true; | |
839 } | |
840 | |
841 /* Returns true if VAL = X * DIV for some constant X. If this is the case, | |
842 X is stored to MULT. */ | |
843 | |
844 bool | |
845 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, | |
131 | 846 poly_widest_int *mult) |
0 | 847 { |
848 bool mult_set = false; | |
849 unsigned i; | |
850 | |
131 | 851 if (val->n == 0 && known_eq (val->offset, 0)) |
0 | 852 { |
111 | 853 *mult = 0; |
0 | 854 return true; |
855 } | |
856 if (val->n != div->n) | |
857 return false; | |
858 | |
859 if (val->rest || div->rest) | |
860 return false; | |
861 | |
111 | 862 if (!wide_int_constant_multiple_p (val->offset, div->offset, |
863 &mult_set, mult)) | |
0 | 864 return false; |
865 | |
866 for (i = 0; i < div->n; i++) | |
867 { | |
868 struct aff_comb_elt *elt | |
869 = aff_combination_find_elt (val, div->elts[i].val, NULL); | |
870 if (!elt) | |
871 return false; | |
111 | 872 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef, |
873 &mult_set, mult)) | |
0 | 874 return false; |
875 } | |
876 | |
877 gcc_assert (mult_set); | |
878 return true; | |
879 } | |
880 | |
881 /* Prints the affine VAL to the FILE. */ | |
882 | |
111 | 883 static void |
0 | 884 print_aff (FILE *file, aff_tree *val) |
885 { | |
886 unsigned i; | |
111 | 887 signop sgn = TYPE_SIGN (val->type); |
0 | 888 if (POINTER_TYPE_P (val->type)) |
111 | 889 sgn = SIGNED; |
0 | 890 fprintf (file, "{\n type = "); |
891 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS); | |
892 fprintf (file, "\n offset = "); | |
111 | 893 print_dec (val->offset, file, sgn); |
0 | 894 if (val->n > 0) |
895 { | |
896 fprintf (file, "\n elements = {\n"); | |
897 for (i = 0; i < val->n; i++) | |
898 { | |
899 fprintf (file, " [%d] = ", i); | |
900 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
901 |
0 | 902 fprintf (file, " * "); |
111 | 903 print_dec (val->elts[i].coef, file, sgn); |
0 | 904 if (i != val->n - 1) |
905 fprintf (file, ", \n"); | |
906 } | |
907 fprintf (file, "\n }"); | |
908 } | |
909 if (val->rest) | |
910 { | |
911 fprintf (file, "\n rest = "); | |
912 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS); | |
913 } | |
914 fprintf (file, "\n}"); | |
915 } | |
916 | |
917 /* Prints the affine VAL to the standard error, used for debugging. */ | |
918 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
919 DEBUG_FUNCTION void |
0 | 920 debug_aff (aff_tree *val) |
921 { | |
922 print_aff (stderr, val); | |
923 fprintf (stderr, "\n"); | |
924 } | |
925 | |
111 | 926 /* Computes address of the reference REF in ADDR. The size of the accessed |
927 location is stored to SIZE. Returns the ultimate containing object to | |
928 which REF refers. */ | |
0 | 929 |
111 | 930 tree |
131 | 931 get_inner_reference_aff (tree ref, aff_tree *addr, poly_widest_int *size) |
0 | 932 { |
131 | 933 poly_int64 bitsize, bitpos; |
0 | 934 tree toff; |
111 | 935 machine_mode mode; |
936 int uns, rev, vol; | |
0 | 937 aff_tree tmp; |
938 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode, | |
111 | 939 &uns, &rev, &vol); |
0 | 940 tree base_addr = build_fold_addr_expr (base); |
941 | |
942 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */ | |
943 | |
944 tree_to_aff_combination (base_addr, sizetype, addr); | |
945 | |
946 if (toff) | |
947 { | |
948 tree_to_aff_combination (toff, sizetype, &tmp); | |
949 aff_combination_add (addr, &tmp); | |
950 } | |
951 | |
131 | 952 aff_combination_const (&tmp, sizetype, bits_to_bytes_round_down (bitpos)); |
0 | 953 aff_combination_add (addr, &tmp); |
954 | |
131 | 955 *size = bits_to_bytes_round_up (bitsize); |
111 | 956 |
957 return base; | |
0 | 958 } |
959 | |
111 | 960 /* Returns true if a region of size SIZE1 at position 0 and a region of |
961 size SIZE2 at position DIFF cannot overlap. */ | |
962 | |
963 bool | |
131 | 964 aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1, |
965 const poly_widest_int &size2) | |
111 | 966 { |
967 /* Unless the difference is a constant, we fail. */ | |
968 if (diff->n != 0) | |
969 return false; | |
970 | |
131 | 971 if (!ordered_p (diff->offset, 0)) |
972 return false; | |
973 | |
974 if (maybe_lt (diff->offset, 0)) | |
111 | 975 { |
976 /* The second object is before the first one, we succeed if the last | |
977 element of the second object is before the start of the first one. */ | |
131 | 978 return known_le (diff->offset + size2, 0); |
111 | 979 } |
980 else | |
981 { | |
982 /* We succeed if the second object starts after the first one ends. */ | |
131 | 983 return known_le (size1, diff->offset); |
111 | 984 } |
985 } | |
986 |