111
|
1 /****************************************************************************
|
|
2 * *
|
|
3 * GNAT COMPILER COMPONENTS *
|
|
4 * *
|
|
5 * U T I L S 2 *
|
|
6 * *
|
|
7 * C Implementation File *
|
|
8 * *
|
|
9 * Copyright (C) 1992-2017, Free Software Foundation, Inc. *
|
|
10 * *
|
|
11 * GNAT is free software; you can redistribute it and/or modify it under *
|
|
12 * terms of the GNU General Public License as published by the Free Soft- *
|
|
13 * ware Foundation; either version 3, or (at your option) any later ver- *
|
|
14 * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
|
|
15 * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
|
|
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
|
|
17 * for more details. You should have received a copy of the GNU General *
|
|
18 * Public License along with GCC; see the file COPYING3. If not see *
|
|
19 * <http://www.gnu.org/licenses/>. *
|
|
20 * *
|
|
21 * GNAT was originally developed by the GNAT team at New York University. *
|
|
22 * Extensive contributions were provided by Ada Core Technologies Inc. *
|
|
23 * *
|
|
24 ****************************************************************************/
|
|
25
|
|
26 #include "config.h"
|
|
27 #include "system.h"
|
|
28 #include "coretypes.h"
|
|
29 #include "memmodel.h"
|
|
30 #include "tm.h"
|
|
31 #include "vec.h"
|
|
32 #include "alias.h"
|
|
33 #include "tree.h"
|
|
34 #include "inchash.h"
|
|
35 #include "fold-const.h"
|
|
36 #include "stor-layout.h"
|
|
37 #include "stringpool.h"
|
|
38 #include "varasm.h"
|
|
39 #include "flags.h"
|
|
40 #include "toplev.h"
|
|
41 #include "ggc.h"
|
|
42 #include "tree-inline.h"
|
|
43
|
|
44 #include "ada.h"
|
|
45 #include "types.h"
|
|
46 #include "atree.h"
|
|
47 #include "elists.h"
|
|
48 #include "namet.h"
|
|
49 #include "nlists.h"
|
|
50 #include "snames.h"
|
|
51 #include "stringt.h"
|
|
52 #include "uintp.h"
|
|
53 #include "fe.h"
|
|
54 #include "sinfo.h"
|
|
55 #include "einfo.h"
|
|
56 #include "ada-tree.h"
|
|
57 #include "gigi.h"
|
|
58
|
|
59 /* Return the base type of TYPE. */
|
|
60
|
|
61 tree
|
|
62 get_base_type (tree type)
|
|
63 {
|
|
64 if (TREE_CODE (type) == RECORD_TYPE
|
|
65 && TYPE_JUSTIFIED_MODULAR_P (type))
|
|
66 type = TREE_TYPE (TYPE_FIELDS (type));
|
|
67
|
|
68 while (TREE_TYPE (type)
|
|
69 && (TREE_CODE (type) == INTEGER_TYPE
|
|
70 || TREE_CODE (type) == REAL_TYPE))
|
|
71 type = TREE_TYPE (type);
|
|
72
|
|
73 return type;
|
|
74 }
|
|
75
|
|
76 /* EXP is a GCC tree representing an address. See if we can find how strictly
|
|
77 the object at this address is aligned and, if so, return the alignment of
|
|
78 the object in bits. Otherwise return 0. */
|
|
79
|
|
80 unsigned int
|
|
81 known_alignment (tree exp)
|
|
82 {
|
|
83 unsigned int this_alignment;
|
|
84 unsigned int lhs, rhs;
|
|
85
|
|
86 switch (TREE_CODE (exp))
|
|
87 {
|
|
88 CASE_CONVERT:
|
|
89 case VIEW_CONVERT_EXPR:
|
|
90 case NON_LVALUE_EXPR:
|
|
91 /* Conversions between pointers and integers don't change the alignment
|
|
92 of the underlying object. */
|
|
93 this_alignment = known_alignment (TREE_OPERAND (exp, 0));
|
|
94 break;
|
|
95
|
|
96 case COMPOUND_EXPR:
|
|
97 /* The value of a COMPOUND_EXPR is that of its second operand. */
|
|
98 this_alignment = known_alignment (TREE_OPERAND (exp, 1));
|
|
99 break;
|
|
100
|
|
101 case PLUS_EXPR:
|
|
102 case MINUS_EXPR:
|
|
103 /* If two addresses are added, the alignment of the result is the
|
|
104 minimum of the two alignments. */
|
|
105 lhs = known_alignment (TREE_OPERAND (exp, 0));
|
|
106 rhs = known_alignment (TREE_OPERAND (exp, 1));
|
|
107 this_alignment = MIN (lhs, rhs);
|
|
108 break;
|
|
109
|
|
110 case POINTER_PLUS_EXPR:
|
|
111 /* If this is the pattern built for aligning types, decode it. */
|
|
112 if (TREE_CODE (TREE_OPERAND (exp, 1)) == BIT_AND_EXPR
|
|
113 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)) == NEGATE_EXPR)
|
|
114 {
|
|
115 tree op = TREE_OPERAND (TREE_OPERAND (exp, 1), 1);
|
|
116 return
|
|
117 known_alignment (fold_build1 (BIT_NOT_EXPR, TREE_TYPE (op), op));
|
|
118 }
|
|
119
|
|
120 /* If we don't know the alignment of the offset, we assume that
|
|
121 of the base. */
|
|
122 lhs = known_alignment (TREE_OPERAND (exp, 0));
|
|
123 rhs = known_alignment (TREE_OPERAND (exp, 1));
|
|
124
|
|
125 if (rhs == 0)
|
|
126 this_alignment = lhs;
|
|
127 else
|
|
128 this_alignment = MIN (lhs, rhs);
|
|
129 break;
|
|
130
|
|
131 case COND_EXPR:
|
|
132 /* If there is a choice between two values, use the smaller one. */
|
|
133 lhs = known_alignment (TREE_OPERAND (exp, 1));
|
|
134 rhs = known_alignment (TREE_OPERAND (exp, 2));
|
|
135 this_alignment = MIN (lhs, rhs);
|
|
136 break;
|
|
137
|
|
138 case INTEGER_CST:
|
|
139 {
|
|
140 unsigned HOST_WIDE_INT c = TREE_INT_CST_LOW (exp);
|
|
141 /* The first part of this represents the lowest bit in the constant,
|
|
142 but it is originally in bytes, not bits. */
|
|
143 this_alignment = (c & -c) * BITS_PER_UNIT;
|
|
144 }
|
|
145 break;
|
|
146
|
|
147 case MULT_EXPR:
|
|
148 /* If we know the alignment of just one side, use it. Otherwise,
|
|
149 use the product of the alignments. */
|
|
150 lhs = known_alignment (TREE_OPERAND (exp, 0));
|
|
151 rhs = known_alignment (TREE_OPERAND (exp, 1));
|
|
152
|
|
153 if (lhs == 0)
|
|
154 this_alignment = rhs;
|
|
155 else if (rhs == 0)
|
|
156 this_alignment = lhs;
|
|
157 else
|
|
158 this_alignment = MIN (lhs * rhs, BIGGEST_ALIGNMENT);
|
|
159 break;
|
|
160
|
|
161 case BIT_AND_EXPR:
|
|
162 /* A bit-and expression is as aligned as the maximum alignment of the
|
|
163 operands. We typically get here for a complex lhs and a constant
|
|
164 negative power of two on the rhs to force an explicit alignment, so
|
|
165 don't bother looking at the lhs. */
|
|
166 this_alignment = known_alignment (TREE_OPERAND (exp, 1));
|
|
167 break;
|
|
168
|
|
169 case ADDR_EXPR:
|
|
170 this_alignment = expr_align (TREE_OPERAND (exp, 0));
|
|
171 break;
|
|
172
|
|
173 case CALL_EXPR:
|
|
174 {
|
|
175 tree fndecl = get_callee_fndecl (exp);
|
|
176 if (fndecl == malloc_decl || fndecl == realloc_decl)
|
|
177 return get_target_system_allocator_alignment () * BITS_PER_UNIT;
|
|
178
|
|
179 tree t = maybe_inline_call_in_expr (exp);
|
|
180 if (t)
|
|
181 return known_alignment (t);
|
|
182 }
|
|
183
|
|
184 /* ... fall through ... */
|
|
185
|
|
186 default:
|
|
187 /* For other pointer expressions, we assume that the pointed-to object
|
|
188 is at least as aligned as the pointed-to type. Beware that we can
|
|
189 have a dummy type here (e.g. a Taft Amendment type), for which the
|
|
190 alignment is meaningless and should be ignored. */
|
|
191 if (POINTER_TYPE_P (TREE_TYPE (exp))
|
|
192 && !TYPE_IS_DUMMY_P (TREE_TYPE (TREE_TYPE (exp)))
|
|
193 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (exp))))
|
|
194 this_alignment = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
|
|
195 else
|
|
196 this_alignment = 0;
|
|
197 break;
|
|
198 }
|
|
199
|
|
200 return this_alignment;
|
|
201 }
|
|
202
|
|
203 /* We have a comparison or assignment operation on two types, T1 and T2, which
|
|
204 are either both array types or both record types. T1 is assumed to be for
|
|
205 the left hand side operand, and T2 for the right hand side. Return the
|
|
206 type that both operands should be converted to for the operation, if any.
|
|
207 Otherwise return zero. */
|
|
208
|
|
209 static tree
|
|
210 find_common_type (tree t1, tree t2)
|
|
211 {
|
|
212 /* ??? As of today, various constructs lead to here with types of different
|
|
213 sizes even when both constants (e.g. tagged types, packable vs regular
|
|
214 component types, padded vs unpadded types, ...). While some of these
|
|
215 would better be handled upstream (types should be made consistent before
|
|
216 calling into build_binary_op), some others are really expected and we
|
|
217 have to be careful. */
|
|
218
|
|
219 const bool variable_record_on_lhs
|
|
220 = (TREE_CODE (t1) == RECORD_TYPE
|
|
221 && TREE_CODE (t2) == RECORD_TYPE
|
|
222 && get_variant_part (t1)
|
|
223 && !get_variant_part (t2));
|
|
224
|
|
225 const bool variable_array_on_lhs
|
|
226 = (TREE_CODE (t1) == ARRAY_TYPE
|
|
227 && TREE_CODE (t2) == ARRAY_TYPE
|
|
228 && !TREE_CONSTANT (TYPE_MIN_VALUE (TYPE_DOMAIN (t1)))
|
|
229 && TREE_CONSTANT (TYPE_MIN_VALUE (TYPE_DOMAIN (t2))));
|
|
230
|
|
231 /* We must avoid writing more than what the target can hold if this is for
|
|
232 an assignment and the case of tagged types is handled in build_binary_op
|
|
233 so we use the lhs type if it is known to be smaller or of constant size
|
|
234 and the rhs type is not, whatever the modes. We also force t1 in case of
|
|
235 constant size equality to minimize occurrences of view conversions on the
|
|
236 lhs of an assignment, except for the case of types with a variable part
|
|
237 on the lhs but not on the rhs to make the conversion simpler. */
|
|
238 if (TREE_CONSTANT (TYPE_SIZE (t1))
|
|
239 && (!TREE_CONSTANT (TYPE_SIZE (t2))
|
|
240 || tree_int_cst_lt (TYPE_SIZE (t1), TYPE_SIZE (t2))
|
|
241 || (TYPE_SIZE (t1) == TYPE_SIZE (t2)
|
|
242 && !variable_record_on_lhs
|
|
243 && !variable_array_on_lhs)))
|
|
244 return t1;
|
|
245
|
|
246 /* Otherwise, if the lhs type is non-BLKmode, use it, except for the case of
|
|
247 a non-BLKmode rhs and array types with a variable part on the lhs but not
|
|
248 on the rhs to make sure the conversion is preserved during gimplification.
|
|
249 Note that we know that we will not have any alignment problems since, if
|
|
250 we did, the non-BLKmode type could not have been used. */
|
|
251 if (TYPE_MODE (t1) != BLKmode
|
|
252 && (TYPE_MODE (t2) == BLKmode || !variable_array_on_lhs))
|
|
253 return t1;
|
|
254
|
|
255 /* If the rhs type is of constant size, use it whatever the modes. At
|
|
256 this point it is known to be smaller, or of constant size and the
|
|
257 lhs type is not. */
|
|
258 if (TREE_CONSTANT (TYPE_SIZE (t2)))
|
|
259 return t2;
|
|
260
|
|
261 /* Otherwise, if the rhs type is non-BLKmode, use it. */
|
|
262 if (TYPE_MODE (t2) != BLKmode)
|
|
263 return t2;
|
|
264
|
|
265 /* In this case, both types have variable size and BLKmode. It's
|
|
266 probably best to leave the "type mismatch" because changing it
|
|
267 could cause a bad self-referential reference. */
|
|
268 return NULL_TREE;
|
|
269 }
|
|
270
|
|
271 /* Return an expression tree representing an equality comparison of A1 and A2,
|
|
272 two objects of type ARRAY_TYPE. The result should be of type RESULT_TYPE.
|
|
273
|
|
274 Two arrays are equal in one of two ways: (1) if both have zero length in
|
|
275 some dimension (not necessarily the same dimension) or (2) if the lengths
|
|
276 in each dimension are equal and the data is equal. We perform the length
|
|
277 tests in as efficient a manner as possible. */
|
|
278
|
|
279 static tree
|
|
280 compare_arrays (location_t loc, tree result_type, tree a1, tree a2)
|
|
281 {
|
|
282 tree result = convert (result_type, boolean_true_node);
|
|
283 tree a1_is_null = convert (result_type, boolean_false_node);
|
|
284 tree a2_is_null = convert (result_type, boolean_false_node);
|
|
285 tree t1 = TREE_TYPE (a1);
|
|
286 tree t2 = TREE_TYPE (a2);
|
|
287 bool a1_side_effects_p = TREE_SIDE_EFFECTS (a1);
|
|
288 bool a2_side_effects_p = TREE_SIDE_EFFECTS (a2);
|
|
289 bool length_zero_p = false;
|
|
290
|
|
291 /* If the operands have side-effects, they need to be evaluated only once
|
|
292 in spite of the multiple references in the comparison. */
|
|
293 if (a1_side_effects_p)
|
|
294 a1 = gnat_protect_expr (a1);
|
|
295
|
|
296 if (a2_side_effects_p)
|
|
297 a2 = gnat_protect_expr (a2);
|
|
298
|
|
299 /* Process each dimension separately and compare the lengths. If any
|
|
300 dimension has a length known to be zero, set LENGTH_ZERO_P to true
|
|
301 in order to suppress the comparison of the data at the end. */
|
|
302 while (TREE_CODE (t1) == ARRAY_TYPE && TREE_CODE (t2) == ARRAY_TYPE)
|
|
303 {
|
|
304 tree lb1 = TYPE_MIN_VALUE (TYPE_DOMAIN (t1));
|
|
305 tree ub1 = TYPE_MAX_VALUE (TYPE_DOMAIN (t1));
|
|
306 tree lb2 = TYPE_MIN_VALUE (TYPE_DOMAIN (t2));
|
|
307 tree ub2 = TYPE_MAX_VALUE (TYPE_DOMAIN (t2));
|
|
308 tree length1 = size_binop (PLUS_EXPR, size_binop (MINUS_EXPR, ub1, lb1),
|
|
309 size_one_node);
|
|
310 tree length2 = size_binop (PLUS_EXPR, size_binop (MINUS_EXPR, ub2, lb2),
|
|
311 size_one_node);
|
|
312 tree comparison, this_a1_is_null, this_a2_is_null;
|
|
313
|
|
314 /* If the length of the first array is a constant, swap our operands
|
|
315 unless the length of the second array is the constant zero. */
|
|
316 if (TREE_CODE (length1) == INTEGER_CST && !integer_zerop (length2))
|
|
317 {
|
|
318 tree tem;
|
|
319 bool btem;
|
|
320
|
|
321 tem = a1, a1 = a2, a2 = tem;
|
|
322 tem = t1, t1 = t2, t2 = tem;
|
|
323 tem = lb1, lb1 = lb2, lb2 = tem;
|
|
324 tem = ub1, ub1 = ub2, ub2 = tem;
|
|
325 tem = length1, length1 = length2, length2 = tem;
|
|
326 tem = a1_is_null, a1_is_null = a2_is_null, a2_is_null = tem;
|
|
327 btem = a1_side_effects_p, a1_side_effects_p = a2_side_effects_p,
|
|
328 a2_side_effects_p = btem;
|
|
329 }
|
|
330
|
|
331 /* If the length of the second array is the constant zero, we can just
|
|
332 use the original stored bounds for the first array and see whether
|
|
333 last < first holds. */
|
|
334 if (integer_zerop (length2))
|
|
335 {
|
|
336 tree b = get_base_type (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
|
|
337
|
|
338 length_zero_p = true;
|
|
339
|
|
340 ub1
|
|
341 = convert (b, TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1))));
|
|
342 lb1
|
|
343 = convert (b, TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1))));
|
|
344
|
|
345 comparison = fold_build2_loc (loc, LT_EXPR, result_type, ub1, lb1);
|
|
346 comparison = SUBSTITUTE_PLACEHOLDER_IN_EXPR (comparison, a1);
|
|
347 if (EXPR_P (comparison))
|
|
348 SET_EXPR_LOCATION (comparison, loc);
|
|
349
|
|
350 this_a1_is_null = comparison;
|
|
351 this_a2_is_null = convert (result_type, boolean_true_node);
|
|
352 }
|
|
353
|
|
354 /* Otherwise, if the length is some other constant value, we know that
|
|
355 this dimension in the second array cannot be superflat, so we can
|
|
356 just use its length computed from the actual stored bounds. */
|
|
357 else if (TREE_CODE (length2) == INTEGER_CST)
|
|
358 {
|
|
359 tree b = get_base_type (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1)));
|
|
360
|
|
361 ub1
|
|
362 = convert (b, TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1))));
|
|
363 lb1
|
|
364 = convert (b, TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t1))));
|
|
365 /* Note that we know that UB2 and LB2 are constant and hence
|
|
366 cannot contain a PLACEHOLDER_EXPR. */
|
|
367 ub2
|
|
368 = convert (b, TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t2))));
|
|
369 lb2
|
|
370 = convert (b, TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (t2))));
|
|
371
|
|
372 comparison
|
|
373 = fold_build2_loc (loc, EQ_EXPR, result_type,
|
|
374 build_binary_op (MINUS_EXPR, b, ub1, lb1),
|
|
375 build_binary_op (MINUS_EXPR, b, ub2, lb2));
|
|
376 comparison = SUBSTITUTE_PLACEHOLDER_IN_EXPR (comparison, a1);
|
|
377 if (EXPR_P (comparison))
|
|
378 SET_EXPR_LOCATION (comparison, loc);
|
|
379
|
|
380 this_a1_is_null
|
|
381 = fold_build2_loc (loc, LT_EXPR, result_type, ub1, lb1);
|
|
382
|
|
383 this_a2_is_null = convert (result_type, boolean_false_node);
|
|
384 }
|
|
385
|
|
386 /* Otherwise, compare the computed lengths. */
|
|
387 else
|
|
388 {
|
|
389 length1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (length1, a1);
|
|
390 length2 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (length2, a2);
|
|
391
|
|
392 comparison
|
|
393 = fold_build2_loc (loc, EQ_EXPR, result_type, length1, length2);
|
|
394
|
|
395 /* If the length expression is of the form (cond ? val : 0), assume
|
|
396 that cond is equivalent to (length != 0). That's guaranteed by
|
|
397 construction of the array types in gnat_to_gnu_entity. */
|
|
398 if (TREE_CODE (length1) == COND_EXPR
|
|
399 && integer_zerop (TREE_OPERAND (length1, 2)))
|
|
400 this_a1_is_null
|
|
401 = invert_truthvalue_loc (loc, TREE_OPERAND (length1, 0));
|
|
402 else
|
|
403 this_a1_is_null = fold_build2_loc (loc, EQ_EXPR, result_type,
|
|
404 length1, size_zero_node);
|
|
405
|
|
406 /* Likewise for the second array. */
|
|
407 if (TREE_CODE (length2) == COND_EXPR
|
|
408 && integer_zerop (TREE_OPERAND (length2, 2)))
|
|
409 this_a2_is_null
|
|
410 = invert_truthvalue_loc (loc, TREE_OPERAND (length2, 0));
|
|
411 else
|
|
412 this_a2_is_null = fold_build2_loc (loc, EQ_EXPR, result_type,
|
|
413 length2, size_zero_node);
|
|
414 }
|
|
415
|
|
416 /* Append expressions for this dimension to the final expressions. */
|
|
417 result = build_binary_op (TRUTH_ANDIF_EXPR, result_type,
|
|
418 result, comparison);
|
|
419
|
|
420 a1_is_null = build_binary_op (TRUTH_ORIF_EXPR, result_type,
|
|
421 this_a1_is_null, a1_is_null);
|
|
422
|
|
423 a2_is_null = build_binary_op (TRUTH_ORIF_EXPR, result_type,
|
|
424 this_a2_is_null, a2_is_null);
|
|
425
|
|
426 t1 = TREE_TYPE (t1);
|
|
427 t2 = TREE_TYPE (t2);
|
|
428 }
|
|
429
|
|
430 /* Unless the length of some dimension is known to be zero, compare the
|
|
431 data in the array. */
|
|
432 if (!length_zero_p)
|
|
433 {
|
|
434 tree type = find_common_type (TREE_TYPE (a1), TREE_TYPE (a2));
|
|
435 tree comparison;
|
|
436
|
|
437 if (type)
|
|
438 {
|
|
439 a1 = convert (type, a1),
|
|
440 a2 = convert (type, a2);
|
|
441 }
|
|
442
|
|
443 comparison = fold_build2_loc (loc, EQ_EXPR, result_type, a1, a2);
|
|
444
|
|
445 result
|
|
446 = build_binary_op (TRUTH_ANDIF_EXPR, result_type, result, comparison);
|
|
447 }
|
|
448
|
|
449 /* The result is also true if both sizes are zero. */
|
|
450 result = build_binary_op (TRUTH_ORIF_EXPR, result_type,
|
|
451 build_binary_op (TRUTH_ANDIF_EXPR, result_type,
|
|
452 a1_is_null, a2_is_null),
|
|
453 result);
|
|
454
|
|
455 /* If the operands have side-effects, they need to be evaluated before
|
|
456 doing the tests above since the place they otherwise would end up
|
|
457 being evaluated at run time could be wrong. */
|
|
458 if (a1_side_effects_p)
|
|
459 result = build2 (COMPOUND_EXPR, result_type, a1, result);
|
|
460
|
|
461 if (a2_side_effects_p)
|
|
462 result = build2 (COMPOUND_EXPR, result_type, a2, result);
|
|
463
|
|
464 return result;
|
|
465 }
|
|
466
|
|
467 /* Return an expression tree representing an equality comparison of P1 and P2,
|
|
468 two objects of fat pointer type. The result should be of type RESULT_TYPE.
|
|
469
|
|
470 Two fat pointers are equal in one of two ways: (1) if both have a null
|
|
471 pointer to the array or (2) if they contain the same couple of pointers.
|
|
472 We perform the comparison in as efficient a manner as possible. */
|
|
473
|
|
474 static tree
|
|
475 compare_fat_pointers (location_t loc, tree result_type, tree p1, tree p2)
|
|
476 {
|
|
477 tree p1_array, p2_array, p1_bounds, p2_bounds, same_array, same_bounds;
|
|
478 tree p1_array_is_null, p2_array_is_null;
|
|
479
|
|
480 /* If either operand has side-effects, they have to be evaluated only once
|
|
481 in spite of the multiple references to the operand in the comparison. */
|
|
482 p1 = gnat_protect_expr (p1);
|
|
483 p2 = gnat_protect_expr (p2);
|
|
484
|
|
485 /* The constant folder doesn't fold fat pointer types so we do it here. */
|
|
486 if (TREE_CODE (p1) == CONSTRUCTOR)
|
|
487 p1_array = CONSTRUCTOR_ELT (p1, 0)->value;
|
|
488 else
|
|
489 p1_array = build_component_ref (p1, TYPE_FIELDS (TREE_TYPE (p1)), true);
|
|
490
|
|
491 p1_array_is_null
|
|
492 = fold_build2_loc (loc, EQ_EXPR, result_type, p1_array,
|
|
493 fold_convert_loc (loc, TREE_TYPE (p1_array),
|
|
494 null_pointer_node));
|
|
495
|
|
496 if (TREE_CODE (p2) == CONSTRUCTOR)
|
|
497 p2_array = CONSTRUCTOR_ELT (p2, 0)->value;
|
|
498 else
|
|
499 p2_array = build_component_ref (p2, TYPE_FIELDS (TREE_TYPE (p2)), true);
|
|
500
|
|
501 p2_array_is_null
|
|
502 = fold_build2_loc (loc, EQ_EXPR, result_type, p2_array,
|
|
503 fold_convert_loc (loc, TREE_TYPE (p2_array),
|
|
504 null_pointer_node));
|
|
505
|
|
506 /* If one of the pointers to the array is null, just compare the other. */
|
|
507 if (integer_zerop (p1_array))
|
|
508 return p2_array_is_null;
|
|
509 else if (integer_zerop (p2_array))
|
|
510 return p1_array_is_null;
|
|
511
|
|
512 /* Otherwise, do the fully-fledged comparison. */
|
|
513 same_array
|
|
514 = fold_build2_loc (loc, EQ_EXPR, result_type, p1_array, p2_array);
|
|
515
|
|
516 if (TREE_CODE (p1) == CONSTRUCTOR)
|
|
517 p1_bounds = CONSTRUCTOR_ELT (p1, 1)->value;
|
|
518 else
|
|
519 p1_bounds
|
|
520 = build_component_ref (p1, DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (p1))),
|
|
521 true);
|
|
522
|
|
523 if (TREE_CODE (p2) == CONSTRUCTOR)
|
|
524 p2_bounds = CONSTRUCTOR_ELT (p2, 1)->value;
|
|
525 else
|
|
526 p2_bounds
|
|
527 = build_component_ref (p2, DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (p2))),
|
|
528 true);
|
|
529
|
|
530 same_bounds
|
|
531 = fold_build2_loc (loc, EQ_EXPR, result_type, p1_bounds, p2_bounds);
|
|
532
|
|
533 /* P1_ARRAY == P2_ARRAY && (P1_ARRAY == NULL || P1_BOUNDS == P2_BOUNDS). */
|
|
534 return build_binary_op (TRUTH_ANDIF_EXPR, result_type, same_array,
|
|
535 build_binary_op (TRUTH_ORIF_EXPR, result_type,
|
|
536 p1_array_is_null, same_bounds));
|
|
537 }
|
|
538
|
|
539 /* Compute the result of applying OP_CODE to LHS and RHS, where both are of
|
|
540 type TYPE. We know that TYPE is a modular type with a nonbinary
|
|
541 modulus. */
|
|
542
|
|
543 static tree
|
|
544 nonbinary_modular_operation (enum tree_code op_code, tree type, tree lhs,
|
|
545 tree rhs)
|
|
546 {
|
|
547 tree modulus = TYPE_MODULUS (type);
|
|
548 unsigned int needed_precision = tree_floor_log2 (modulus) + 1;
|
|
549 unsigned int precision;
|
|
550 bool unsignedp = true;
|
|
551 tree op_type = type;
|
|
552 tree result;
|
|
553
|
|
554 /* If this is an addition of a constant, convert it to a subtraction
|
|
555 of a constant since we can do that faster. */
|
|
556 if (op_code == PLUS_EXPR && TREE_CODE (rhs) == INTEGER_CST)
|
|
557 {
|
|
558 rhs = fold_build2 (MINUS_EXPR, type, modulus, rhs);
|
|
559 op_code = MINUS_EXPR;
|
|
560 }
|
|
561
|
|
562 /* For the logical operations, we only need PRECISION bits. For
|
|
563 addition and subtraction, we need one more and for multiplication we
|
|
564 need twice as many. But we never want to make a size smaller than
|
|
565 our size. */
|
|
566 if (op_code == PLUS_EXPR || op_code == MINUS_EXPR)
|
|
567 needed_precision += 1;
|
|
568 else if (op_code == MULT_EXPR)
|
|
569 needed_precision *= 2;
|
|
570
|
|
571 precision = MAX (needed_precision, TYPE_PRECISION (op_type));
|
|
572
|
|
573 /* Unsigned will do for everything but subtraction. */
|
|
574 if (op_code == MINUS_EXPR)
|
|
575 unsignedp = false;
|
|
576
|
|
577 /* If our type is the wrong signedness or isn't wide enough, make a new
|
|
578 type and convert both our operands to it. */
|
|
579 if (TYPE_PRECISION (op_type) < precision
|
|
580 || TYPE_UNSIGNED (op_type) != unsignedp)
|
|
581 {
|
|
582 /* Copy the type so we ensure it can be modified to make it modular. */
|
|
583 op_type = copy_type (gnat_type_for_size (precision, unsignedp));
|
|
584 modulus = convert (op_type, modulus);
|
|
585 SET_TYPE_MODULUS (op_type, modulus);
|
|
586 TYPE_MODULAR_P (op_type) = 1;
|
|
587 lhs = convert (op_type, lhs);
|
|
588 rhs = convert (op_type, rhs);
|
|
589 }
|
|
590
|
|
591 /* Do the operation, then we'll fix it up. */
|
|
592 result = fold_build2 (op_code, op_type, lhs, rhs);
|
|
593
|
|
594 /* For multiplication, we have no choice but to do a full modulus
|
|
595 operation. However, we want to do this in the narrowest
|
|
596 possible size. */
|
|
597 if (op_code == MULT_EXPR)
|
|
598 {
|
|
599 /* Copy the type so we ensure it can be modified to make it modular. */
|
|
600 tree div_type = copy_type (gnat_type_for_size (needed_precision, 1));
|
|
601 modulus = convert (div_type, modulus);
|
|
602 SET_TYPE_MODULUS (div_type, modulus);
|
|
603 TYPE_MODULAR_P (div_type) = 1;
|
|
604 result = convert (op_type,
|
|
605 fold_build2 (TRUNC_MOD_EXPR, div_type,
|
|
606 convert (div_type, result), modulus));
|
|
607 }
|
|
608
|
|
609 /* For subtraction, add the modulus back if we are negative. */
|
|
610 else if (op_code == MINUS_EXPR)
|
|
611 {
|
|
612 result = gnat_protect_expr (result);
|
|
613 result = fold_build3 (COND_EXPR, op_type,
|
|
614 fold_build2 (LT_EXPR, boolean_type_node, result,
|
|
615 build_int_cst (op_type, 0)),
|
|
616 fold_build2 (PLUS_EXPR, op_type, result, modulus),
|
|
617 result);
|
|
618 }
|
|
619
|
|
620 /* For the other operations, subtract the modulus if we are >= it. */
|
|
621 else
|
|
622 {
|
|
623 result = gnat_protect_expr (result);
|
|
624 result = fold_build3 (COND_EXPR, op_type,
|
|
625 fold_build2 (GE_EXPR, boolean_type_node,
|
|
626 result, modulus),
|
|
627 fold_build2 (MINUS_EXPR, op_type,
|
|
628 result, modulus),
|
|
629 result);
|
|
630 }
|
|
631
|
|
632 return convert (type, result);
|
|
633 }
|
|
634
|
|
635 /* This page contains routines that implement the Ada semantics with regard
|
|
636 to atomic objects. They are fully piggybacked on the middle-end support
|
|
637 for atomic loads and stores.
|
|
638
|
|
639 *** Memory barriers and volatile objects ***
|
|
640
|
|
641 We implement the weakened form of the C.6(16) clause that was introduced
|
|
642 in Ada 2012 (AI05-117). Earlier forms of this clause wouldn't have been
|
|
643 implementable without significant performance hits on modern platforms.
|
|
644
|
|
645 We also take advantage of the requirements imposed on shared variables by
|
|
646 9.10 (conditions for sequential actions) to have non-erroneous execution
|
|
647 and consider that C.6(16) and C.6(17) only prescribe an uniform order of
|
|
648 volatile updates with regard to sequential actions, i.e. with regard to
|
|
649 reads or updates of atomic objects.
|
|
650
|
|
651 As such, an update of an atomic object by a task requires that all earlier
|
|
652 accesses to volatile objects have completed. Similarly, later accesses to
|
|
653 volatile objects cannot be reordered before the update of the atomic object.
|
|
654 So, memory barriers both before and after the atomic update are needed.
|
|
655
|
|
656 For a read of an atomic object, to avoid seeing writes of volatile objects
|
|
657 by a task earlier than by the other tasks, a memory barrier is needed before
|
|
658 the atomic read. Finally, to avoid reordering later reads or updates of
|
|
659 volatile objects to before the atomic read, a barrier is needed after the
|
|
660 atomic read.
|
|
661
|
|
662 So, memory barriers are needed before and after atomic reads and updates.
|
|
663 And, in order to simplify the implementation, we use full memory barriers
|
|
664 in all cases, i.e. we enforce sequential consistency for atomic accesses. */
|
|
665
|
|
666 /* Return the size of TYPE, which must be a positive power of 2. */
|
|
667
|
|
668 static unsigned int
|
|
669 resolve_atomic_size (tree type)
|
|
670 {
|
|
671 unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (type));
|
|
672
|
|
673 if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16)
|
|
674 return size;
|
|
675
|
|
676 /* We shouldn't reach here without having already detected that the size
|
|
677 isn't compatible with an atomic access. */
|
|
678 gcc_assert (Serious_Errors_Detected);
|
|
679
|
|
680 return 0;
|
|
681 }
|
|
682
|
|
683 /* Build an atomic load for the underlying atomic object in SRC. SYNC is
|
|
684 true if the load requires synchronization. */
|
|
685
|
|
686 tree
|
|
687 build_atomic_load (tree src, bool sync)
|
|
688 {
|
|
689 tree ptr_type
|
|
690 = build_pointer_type
|
|
691 (build_qualified_type (void_type_node,
|
|
692 TYPE_QUAL_ATOMIC | TYPE_QUAL_VOLATILE));
|
|
693 tree mem_model
|
|
694 = build_int_cst (integer_type_node,
|
|
695 sync ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED);
|
|
696 tree orig_src = src;
|
|
697 tree t, addr, val;
|
|
698 unsigned int size;
|
|
699 int fncode;
|
|
700
|
|
701 /* Remove conversions to get the address of the underlying object. */
|
|
702 src = remove_conversions (src, false);
|
|
703 size = resolve_atomic_size (TREE_TYPE (src));
|
|
704 if (size == 0)
|
|
705 return orig_src;
|
|
706
|
|
707 fncode = (int) BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
|
|
708 t = builtin_decl_implicit ((enum built_in_function) fncode);
|
|
709
|
|
710 addr = build_unary_op (ADDR_EXPR, ptr_type, src);
|
|
711 val = build_call_expr (t, 2, addr, mem_model);
|
|
712
|
|
713 /* First reinterpret the loaded bits in the original type of the load,
|
|
714 then convert to the expected result type. */
|
|
715 t = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (src), val);
|
|
716 return convert (TREE_TYPE (orig_src), t);
|
|
717 }
|
|
718
|
|
719 /* Build an atomic store from SRC to the underlying atomic object in DEST.
|
|
720 SYNC is true if the store requires synchronization. */
|
|
721
|
|
722 tree
|
|
723 build_atomic_store (tree dest, tree src, bool sync)
|
|
724 {
|
|
725 tree ptr_type
|
|
726 = build_pointer_type
|
|
727 (build_qualified_type (void_type_node,
|
|
728 TYPE_QUAL_ATOMIC | TYPE_QUAL_VOLATILE));
|
|
729 tree mem_model
|
|
730 = build_int_cst (integer_type_node,
|
|
731 sync ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED);
|
|
732 tree orig_dest = dest;
|
|
733 tree t, int_type, addr;
|
|
734 unsigned int size;
|
|
735 int fncode;
|
|
736
|
|
737 /* Remove conversions to get the address of the underlying object. */
|
|
738 dest = remove_conversions (dest, false);
|
|
739 size = resolve_atomic_size (TREE_TYPE (dest));
|
|
740 if (size == 0)
|
|
741 return build_binary_op (MODIFY_EXPR, NULL_TREE, orig_dest, src);
|
|
742
|
|
743 fncode = (int) BUILT_IN_ATOMIC_STORE_N + exact_log2 (size) + 1;
|
|
744 t = builtin_decl_implicit ((enum built_in_function) fncode);
|
|
745 int_type = gnat_type_for_size (BITS_PER_UNIT * size, 1);
|
|
746
|
|
747 /* First convert the bits to be stored to the original type of the store,
|
|
748 then reinterpret them in the effective type. But if the original type
|
|
749 is a padded type with the same size, convert to the inner type instead,
|
|
750 as we don't want to artificially introduce a CONSTRUCTOR here. */
|
|
751 if (TYPE_IS_PADDING_P (TREE_TYPE (dest))
|
|
752 && TYPE_SIZE (TREE_TYPE (dest))
|
|
753 == TYPE_SIZE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (dest)))))
|
|
754 src = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (dest))), src);
|
|
755 else
|
|
756 src = convert (TREE_TYPE (dest), src);
|
|
757 src = fold_build1 (VIEW_CONVERT_EXPR, int_type, src);
|
|
758 addr = build_unary_op (ADDR_EXPR, ptr_type, dest);
|
|
759
|
|
760 return build_call_expr (t, 3, addr, src, mem_model);
|
|
761 }
|
|
762
|
|
763 /* Build a load-modify-store sequence from SRC to DEST. GNAT_NODE is used for
|
|
764 the location of the sequence. Note that, even though the load and the store
|
|
765 are both atomic, the sequence itself is not atomic. */
|
|
766
|
|
767 tree
|
|
768 build_load_modify_store (tree dest, tree src, Node_Id gnat_node)
|
|
769 {
|
|
770 /* We will be modifying DEST below so we build a copy. */
|
|
771 dest = copy_node (dest);
|
|
772 tree ref = dest;
|
|
773
|
|
774 while (handled_component_p (ref))
|
|
775 {
|
|
776 /* The load should already have been generated during the translation
|
|
777 of the GNAT destination tree; find it out in the GNU tree. */
|
|
778 if (TREE_CODE (TREE_OPERAND (ref, 0)) == VIEW_CONVERT_EXPR)
|
|
779 {
|
|
780 tree op = TREE_OPERAND (TREE_OPERAND (ref, 0), 0);
|
|
781 if (TREE_CODE (op) == CALL_EXPR && call_is_atomic_load (op))
|
|
782 {
|
|
783 tree type = TREE_TYPE (TREE_OPERAND (ref, 0));
|
|
784 tree t = CALL_EXPR_ARG (op, 0);
|
|
785 tree obj, temp, stmt;
|
|
786
|
|
787 /* Find out the loaded object. */
|
|
788 if (TREE_CODE (t) == NOP_EXPR)
|
|
789 t = TREE_OPERAND (t, 0);
|
|
790 if (TREE_CODE (t) == ADDR_EXPR)
|
|
791 obj = TREE_OPERAND (t, 0);
|
|
792 else
|
|
793 obj = build1 (INDIRECT_REF, type, t);
|
|
794
|
|
795 /* Drop atomic and volatile qualifiers for the temporary. */
|
|
796 type = TYPE_MAIN_VARIANT (type);
|
|
797
|
|
798 /* And drop BLKmode, if need be, to put it into a register. */
|
|
799 if (TYPE_MODE (type) == BLKmode)
|
|
800 {
|
|
801 unsigned int size = tree_to_uhwi (TYPE_SIZE (type));
|
|
802 type = copy_type (type);
|
|
803 machine_mode mode = int_mode_for_size (size, 0).else_blk ();
|
|
804 SET_TYPE_MODE (type, mode);
|
|
805 }
|
|
806
|
|
807 /* Create the temporary by inserting a SAVE_EXPR. */
|
|
808 temp = build1 (SAVE_EXPR, type,
|
|
809 build1 (VIEW_CONVERT_EXPR, type, op));
|
|
810 TREE_OPERAND (ref, 0) = temp;
|
|
811
|
|
812 start_stmt_group ();
|
|
813
|
|
814 /* Build the modify of the temporary. */
|
|
815 stmt = build_binary_op (MODIFY_EXPR, NULL_TREE, dest, src);
|
|
816 add_stmt_with_node (stmt, gnat_node);
|
|
817
|
|
818 /* Build the store to the object. */
|
|
819 stmt = build_atomic_store (obj, temp, false);
|
|
820 add_stmt_with_node (stmt, gnat_node);
|
|
821
|
|
822 return end_stmt_group ();
|
|
823 }
|
|
824 }
|
|
825
|
|
826 TREE_OPERAND (ref, 0) = copy_node (TREE_OPERAND (ref, 0));
|
|
827 ref = TREE_OPERAND (ref, 0);
|
|
828 }
|
|
829
|
|
830 /* Something went wrong earlier if we have not found the atomic load. */
|
|
831 gcc_unreachable ();
|
|
832 }
|
|
833
|
|
834 /* Make a binary operation of kind OP_CODE. RESULT_TYPE is the type
|
|
835 desired for the result. Usually the operation is to be performed
|
|
836 in that type. For INIT_EXPR and MODIFY_EXPR, RESULT_TYPE must be
|
|
837 NULL_TREE. For ARRAY_REF, RESULT_TYPE may be NULL_TREE, in which
|
|
838 case the type to be used will be derived from the operands.
|
|
839 Don't fold the result if NO_FOLD is true.
|
|
840
|
|
841 This function is very much unlike the ones for C and C++ since we
|
|
842 have already done any type conversion and matching required. All we
|
|
843 have to do here is validate the work done by SEM and handle subtypes. */
|
|
844
|
|
845 tree
|
|
846 build_binary_op (enum tree_code op_code, tree result_type,
|
|
847 tree left_operand, tree right_operand,
|
|
848 bool no_fold)
|
|
849 {
|
|
850 tree left_type = TREE_TYPE (left_operand);
|
|
851 tree right_type = TREE_TYPE (right_operand);
|
|
852 tree left_base_type = get_base_type (left_type);
|
|
853 tree right_base_type = get_base_type (right_type);
|
|
854 tree operation_type = result_type;
|
|
855 tree best_type = NULL_TREE;
|
|
856 tree modulus, result;
|
|
857 bool has_side_effects = false;
|
|
858
|
|
859 if (operation_type
|
|
860 && TREE_CODE (operation_type) == RECORD_TYPE
|
|
861 && TYPE_JUSTIFIED_MODULAR_P (operation_type))
|
|
862 operation_type = TREE_TYPE (TYPE_FIELDS (operation_type));
|
|
863
|
|
864 if (operation_type
|
|
865 && TREE_CODE (operation_type) == INTEGER_TYPE
|
|
866 && TYPE_EXTRA_SUBTYPE_P (operation_type))
|
|
867 operation_type = get_base_type (operation_type);
|
|
868
|
|
869 modulus = (operation_type
|
|
870 && TREE_CODE (operation_type) == INTEGER_TYPE
|
|
871 && TYPE_MODULAR_P (operation_type)
|
|
872 ? TYPE_MODULUS (operation_type) : NULL_TREE);
|
|
873
|
|
874 switch (op_code)
|
|
875 {
|
|
876 case INIT_EXPR:
|
|
877 case MODIFY_EXPR:
|
|
878 gcc_checking_assert (!result_type);
|
|
879
|
|
880 /* If there were integral or pointer conversions on the LHS, remove
|
|
881 them; we'll be putting them back below if needed. Likewise for
|
|
882 conversions between array and record types, except for justified
|
|
883 modular types. But don't do this if the right operand is not
|
|
884 BLKmode (for packed arrays) unless we are not changing the mode. */
|
|
885 while ((CONVERT_EXPR_P (left_operand)
|
|
886 || TREE_CODE (left_operand) == VIEW_CONVERT_EXPR)
|
|
887 && (((INTEGRAL_TYPE_P (left_type)
|
|
888 || POINTER_TYPE_P (left_type))
|
|
889 && (INTEGRAL_TYPE_P (TREE_TYPE
|
|
890 (TREE_OPERAND (left_operand, 0)))
|
|
891 || POINTER_TYPE_P (TREE_TYPE
|
|
892 (TREE_OPERAND (left_operand, 0)))))
|
|
893 || (((TREE_CODE (left_type) == RECORD_TYPE
|
|
894 && !TYPE_JUSTIFIED_MODULAR_P (left_type))
|
|
895 || TREE_CODE (left_type) == ARRAY_TYPE)
|
|
896 && ((TREE_CODE (TREE_TYPE
|
|
897 (TREE_OPERAND (left_operand, 0)))
|
|
898 == RECORD_TYPE)
|
|
899 || (TREE_CODE (TREE_TYPE
|
|
900 (TREE_OPERAND (left_operand, 0)))
|
|
901 == ARRAY_TYPE))
|
|
902 && (TYPE_MODE (right_type) == BLKmode
|
|
903 || (TYPE_MODE (left_type)
|
|
904 == TYPE_MODE (TREE_TYPE
|
|
905 (TREE_OPERAND
|
|
906 (left_operand, 0))))))))
|
|
907 {
|
|
908 left_operand = TREE_OPERAND (left_operand, 0);
|
|
909 left_type = TREE_TYPE (left_operand);
|
|
910 }
|
|
911
|
|
912 /* If a class-wide type may be involved, force use of the RHS type. */
|
|
913 if ((TREE_CODE (right_type) == RECORD_TYPE
|
|
914 || TREE_CODE (right_type) == UNION_TYPE)
|
|
915 && TYPE_ALIGN_OK (right_type))
|
|
916 operation_type = right_type;
|
|
917
|
|
918 /* If we are copying between padded objects with compatible types, use
|
|
919 the padded view of the objects, this is very likely more efficient.
|
|
920 Likewise for a padded object that is assigned a constructor, if we
|
|
921 can convert the constructor to the inner type, to avoid putting a
|
|
922 VIEW_CONVERT_EXPR on the LHS. But don't do so if we wouldn't have
|
|
923 actually copied anything. */
|
|
924 else if (TYPE_IS_PADDING_P (left_type)
|
|
925 && TREE_CONSTANT (TYPE_SIZE (left_type))
|
|
926 && ((TREE_CODE (right_operand) == COMPONENT_REF
|
|
927 && TYPE_MAIN_VARIANT (left_type)
|
|
928 == TYPE_MAIN_VARIANT
|
|
929 (TREE_TYPE (TREE_OPERAND (right_operand, 0))))
|
|
930 || (TREE_CODE (right_operand) == CONSTRUCTOR
|
|
931 && !CONTAINS_PLACEHOLDER_P
|
|
932 (DECL_SIZE (TYPE_FIELDS (left_type)))))
|
|
933 && !integer_zerop (TYPE_SIZE (right_type)))
|
|
934 {
|
|
935 /* We make an exception for a BLKmode type padding a non-BLKmode
|
|
936 inner type and do the conversion of the LHS right away, since
|
|
937 unchecked_convert wouldn't do it properly. */
|
|
938 if (TYPE_MODE (left_type) == BLKmode
|
|
939 && TYPE_MODE (right_type) != BLKmode
|
|
940 && TREE_CODE (right_operand) != CONSTRUCTOR)
|
|
941 {
|
|
942 operation_type = right_type;
|
|
943 left_operand = convert (operation_type, left_operand);
|
|
944 left_type = operation_type;
|
|
945 }
|
|
946 else
|
|
947 operation_type = left_type;
|
|
948 }
|
|
949
|
|
950 /* If we have a call to a function that returns with variable size, use
|
|
951 the RHS type in case we want to use the return slot optimization. */
|
|
952 else if (TREE_CODE (right_operand) == CALL_EXPR
|
|
953 && return_type_with_variable_size_p (right_type))
|
|
954 operation_type = right_type;
|
|
955
|
|
956 /* Find the best type to use for copying between aggregate types. */
|
|
957 else if (((TREE_CODE (left_type) == ARRAY_TYPE
|
|
958 && TREE_CODE (right_type) == ARRAY_TYPE)
|
|
959 || (TREE_CODE (left_type) == RECORD_TYPE
|
|
960 && TREE_CODE (right_type) == RECORD_TYPE))
|
|
961 && (best_type = find_common_type (left_type, right_type)))
|
|
962 operation_type = best_type;
|
|
963
|
|
964 /* Otherwise use the LHS type. */
|
|
965 else
|
|
966 operation_type = left_type;
|
|
967
|
|
968 /* Ensure everything on the LHS is valid. If we have a field reference,
|
|
969 strip anything that get_inner_reference can handle. Then remove any
|
|
970 conversions between types having the same code and mode. And mark
|
|
971 VIEW_CONVERT_EXPRs with TREE_ADDRESSABLE. When done, we must have
|
|
972 either an INDIRECT_REF, a NULL_EXPR, a SAVE_EXPR or a DECL node. */
|
|
973 result = left_operand;
|
|
974 while (true)
|
|
975 {
|
|
976 tree restype = TREE_TYPE (result);
|
|
977
|
|
978 if (TREE_CODE (result) == COMPONENT_REF
|
|
979 || TREE_CODE (result) == ARRAY_REF
|
|
980 || TREE_CODE (result) == ARRAY_RANGE_REF)
|
|
981 while (handled_component_p (result))
|
|
982 result = TREE_OPERAND (result, 0);
|
|
983 else if (TREE_CODE (result) == REALPART_EXPR
|
|
984 || TREE_CODE (result) == IMAGPART_EXPR
|
|
985 || (CONVERT_EXPR_P (result)
|
|
986 && (((TREE_CODE (restype)
|
|
987 == TREE_CODE (TREE_TYPE
|
|
988 (TREE_OPERAND (result, 0))))
|
|
989 && (TYPE_MODE (TREE_TYPE
|
|
990 (TREE_OPERAND (result, 0)))
|
|
991 == TYPE_MODE (restype)))
|
|
992 || TYPE_ALIGN_OK (restype))))
|
|
993 result = TREE_OPERAND (result, 0);
|
|
994 else if (TREE_CODE (result) == VIEW_CONVERT_EXPR)
|
|
995 {
|
|
996 TREE_ADDRESSABLE (result) = 1;
|
|
997 result = TREE_OPERAND (result, 0);
|
|
998 }
|
|
999 else
|
|
1000 break;
|
|
1001 }
|
|
1002
|
|
1003 gcc_assert (TREE_CODE (result) == INDIRECT_REF
|
|
1004 || TREE_CODE (result) == NULL_EXPR
|
|
1005 || TREE_CODE (result) == SAVE_EXPR
|
|
1006 || DECL_P (result));
|
|
1007
|
|
1008 /* Convert the right operand to the operation type unless it is
|
|
1009 either already of the correct type or if the type involves a
|
|
1010 placeholder, since the RHS may not have the same record type. */
|
|
1011 if (operation_type != right_type
|
|
1012 && !CONTAINS_PLACEHOLDER_P (TYPE_SIZE (operation_type)))
|
|
1013 {
|
|
1014 right_operand = convert (operation_type, right_operand);
|
|
1015 right_type = operation_type;
|
|
1016 }
|
|
1017
|
|
1018 /* If the left operand is not of the same type as the operation
|
|
1019 type, wrap it up in a VIEW_CONVERT_EXPR. */
|
|
1020 if (left_type != operation_type)
|
|
1021 left_operand = unchecked_convert (operation_type, left_operand, false);
|
|
1022
|
|
1023 has_side_effects = true;
|
|
1024 modulus = NULL_TREE;
|
|
1025 break;
|
|
1026
|
|
1027 case ARRAY_REF:
|
|
1028 if (!operation_type)
|
|
1029 operation_type = TREE_TYPE (left_type);
|
|
1030
|
|
1031 /* ... fall through ... */
|
|
1032
|
|
1033 case ARRAY_RANGE_REF:
|
|
1034 /* First look through conversion between type variants. Note that
|
|
1035 this changes neither the operation type nor the type domain. */
|
|
1036 if (TREE_CODE (left_operand) == VIEW_CONVERT_EXPR
|
|
1037 && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (left_operand, 0)))
|
|
1038 == TYPE_MAIN_VARIANT (left_type))
|
|
1039 {
|
|
1040 left_operand = TREE_OPERAND (left_operand, 0);
|
|
1041 left_type = TREE_TYPE (left_operand);
|
|
1042 }
|
|
1043
|
|
1044 /* For a range, make sure the element type is consistent. */
|
|
1045 if (op_code == ARRAY_RANGE_REF
|
|
1046 && TREE_TYPE (operation_type) != TREE_TYPE (left_type))
|
|
1047 operation_type = build_array_type (TREE_TYPE (left_type),
|
|
1048 TYPE_DOMAIN (operation_type));
|
|
1049
|
|
1050 /* Then convert the right operand to its base type. This will prevent
|
|
1051 unneeded sign conversions when sizetype is wider than integer. */
|
|
1052 right_operand = convert (right_base_type, right_operand);
|
|
1053 right_operand = convert_to_index_type (right_operand);
|
|
1054 modulus = NULL_TREE;
|
|
1055 break;
|
|
1056
|
|
1057 case TRUTH_ANDIF_EXPR:
|
|
1058 case TRUTH_ORIF_EXPR:
|
|
1059 case TRUTH_AND_EXPR:
|
|
1060 case TRUTH_OR_EXPR:
|
|
1061 case TRUTH_XOR_EXPR:
|
|
1062 gcc_checking_assert
|
|
1063 (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
|
|
1064 operation_type = left_base_type;
|
|
1065 left_operand = convert (operation_type, left_operand);
|
|
1066 right_operand = convert (operation_type, right_operand);
|
|
1067 break;
|
|
1068
|
|
1069 case GE_EXPR:
|
|
1070 case LE_EXPR:
|
|
1071 case GT_EXPR:
|
|
1072 case LT_EXPR:
|
|
1073 case EQ_EXPR:
|
|
1074 case NE_EXPR:
|
|
1075 gcc_checking_assert
|
|
1076 (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
|
|
1077 /* If either operand is a NULL_EXPR, just return a new one. */
|
|
1078 if (TREE_CODE (left_operand) == NULL_EXPR)
|
|
1079 return build2 (op_code, result_type,
|
|
1080 build1 (NULL_EXPR, integer_type_node,
|
|
1081 TREE_OPERAND (left_operand, 0)),
|
|
1082 integer_zero_node);
|
|
1083
|
|
1084 else if (TREE_CODE (right_operand) == NULL_EXPR)
|
|
1085 return build2 (op_code, result_type,
|
|
1086 build1 (NULL_EXPR, integer_type_node,
|
|
1087 TREE_OPERAND (right_operand, 0)),
|
|
1088 integer_zero_node);
|
|
1089
|
|
1090 /* If either object is a justified modular types, get the
|
|
1091 fields from within. */
|
|
1092 if (TREE_CODE (left_type) == RECORD_TYPE
|
|
1093 && TYPE_JUSTIFIED_MODULAR_P (left_type))
|
|
1094 {
|
|
1095 left_operand = convert (TREE_TYPE (TYPE_FIELDS (left_type)),
|
|
1096 left_operand);
|
|
1097 left_type = TREE_TYPE (left_operand);
|
|
1098 left_base_type = get_base_type (left_type);
|
|
1099 }
|
|
1100
|
|
1101 if (TREE_CODE (right_type) == RECORD_TYPE
|
|
1102 && TYPE_JUSTIFIED_MODULAR_P (right_type))
|
|
1103 {
|
|
1104 right_operand = convert (TREE_TYPE (TYPE_FIELDS (right_type)),
|
|
1105 right_operand);
|
|
1106 right_type = TREE_TYPE (right_operand);
|
|
1107 right_base_type = get_base_type (right_type);
|
|
1108 }
|
|
1109
|
|
1110 /* If both objects are arrays, compare them specially. */
|
|
1111 if ((TREE_CODE (left_type) == ARRAY_TYPE
|
|
1112 || (TREE_CODE (left_type) == INTEGER_TYPE
|
|
1113 && TYPE_HAS_ACTUAL_BOUNDS_P (left_type)))
|
|
1114 && (TREE_CODE (right_type) == ARRAY_TYPE
|
|
1115 || (TREE_CODE (right_type) == INTEGER_TYPE
|
|
1116 && TYPE_HAS_ACTUAL_BOUNDS_P (right_type))))
|
|
1117 {
|
|
1118 result = compare_arrays (input_location,
|
|
1119 result_type, left_operand, right_operand);
|
|
1120 if (op_code == NE_EXPR)
|
|
1121 result = invert_truthvalue_loc (EXPR_LOCATION (result), result);
|
|
1122 else
|
|
1123 gcc_assert (op_code == EQ_EXPR);
|
|
1124
|
|
1125 return result;
|
|
1126 }
|
|
1127
|
|
1128 /* Otherwise, the base types must be the same, unless they are both fat
|
|
1129 pointer types or record types. In the latter case, use the best type
|
|
1130 and convert both operands to that type. */
|
|
1131 if (left_base_type != right_base_type)
|
|
1132 {
|
|
1133 if (TYPE_IS_FAT_POINTER_P (left_base_type)
|
|
1134 && TYPE_IS_FAT_POINTER_P (right_base_type))
|
|
1135 {
|
|
1136 gcc_assert (TYPE_MAIN_VARIANT (left_base_type)
|
|
1137 == TYPE_MAIN_VARIANT (right_base_type));
|
|
1138 best_type = left_base_type;
|
|
1139 }
|
|
1140
|
|
1141 else if (TREE_CODE (left_base_type) == RECORD_TYPE
|
|
1142 && TREE_CODE (right_base_type) == RECORD_TYPE)
|
|
1143 {
|
|
1144 /* The only way this is permitted is if both types have the same
|
|
1145 name. In that case, one of them must not be self-referential.
|
|
1146 Use it as the best type. Even better with a fixed size. */
|
|
1147 gcc_assert (TYPE_NAME (left_base_type)
|
|
1148 && TYPE_NAME (left_base_type)
|
|
1149 == TYPE_NAME (right_base_type));
|
|
1150
|
|
1151 if (TREE_CONSTANT (TYPE_SIZE (left_base_type)))
|
|
1152 best_type = left_base_type;
|
|
1153 else if (TREE_CONSTANT (TYPE_SIZE (right_base_type)))
|
|
1154 best_type = right_base_type;
|
|
1155 else if (!CONTAINS_PLACEHOLDER_P (TYPE_SIZE (left_base_type)))
|
|
1156 best_type = left_base_type;
|
|
1157 else if (!CONTAINS_PLACEHOLDER_P (TYPE_SIZE (right_base_type)))
|
|
1158 best_type = right_base_type;
|
|
1159 else
|
|
1160 gcc_unreachable ();
|
|
1161 }
|
|
1162
|
|
1163 else if (POINTER_TYPE_P (left_base_type)
|
|
1164 && POINTER_TYPE_P (right_base_type))
|
|
1165 {
|
|
1166 gcc_assert (TREE_TYPE (left_base_type)
|
|
1167 == TREE_TYPE (right_base_type));
|
|
1168 best_type = left_base_type;
|
|
1169 }
|
|
1170 else
|
|
1171 gcc_unreachable ();
|
|
1172
|
|
1173 left_operand = convert (best_type, left_operand);
|
|
1174 right_operand = convert (best_type, right_operand);
|
|
1175 }
|
|
1176 else
|
|
1177 {
|
|
1178 left_operand = convert (left_base_type, left_operand);
|
|
1179 right_operand = convert (right_base_type, right_operand);
|
|
1180 }
|
|
1181
|
|
1182 /* If both objects are fat pointers, compare them specially. */
|
|
1183 if (TYPE_IS_FAT_POINTER_P (left_base_type))
|
|
1184 {
|
|
1185 result
|
|
1186 = compare_fat_pointers (input_location,
|
|
1187 result_type, left_operand, right_operand);
|
|
1188 if (op_code == NE_EXPR)
|
|
1189 result = invert_truthvalue_loc (EXPR_LOCATION (result), result);
|
|
1190 else
|
|
1191 gcc_assert (op_code == EQ_EXPR);
|
|
1192
|
|
1193 return result;
|
|
1194 }
|
|
1195
|
|
1196 modulus = NULL_TREE;
|
|
1197 break;
|
|
1198
|
|
1199 case LSHIFT_EXPR:
|
|
1200 case RSHIFT_EXPR:
|
|
1201 case LROTATE_EXPR:
|
|
1202 case RROTATE_EXPR:
|
|
1203 /* The RHS of a shift can be any type. Also, ignore any modulus
|
|
1204 (we used to abort, but this is needed for unchecked conversion
|
|
1205 to modular types). Otherwise, processing is the same as normal. */
|
|
1206 gcc_assert (operation_type == left_base_type);
|
|
1207 modulus = NULL_TREE;
|
|
1208 left_operand = convert (operation_type, left_operand);
|
|
1209 break;
|
|
1210
|
|
1211 case BIT_AND_EXPR:
|
|
1212 case BIT_IOR_EXPR:
|
|
1213 case BIT_XOR_EXPR:
|
|
1214 /* For binary modulus, if the inputs are in range, so are the
|
|
1215 outputs. */
|
|
1216 if (modulus && integer_pow2p (modulus))
|
|
1217 modulus = NULL_TREE;
|
|
1218 goto common;
|
|
1219
|
|
1220 case COMPLEX_EXPR:
|
|
1221 gcc_assert (TREE_TYPE (result_type) == left_base_type
|
|
1222 && TREE_TYPE (result_type) == right_base_type);
|
|
1223 left_operand = convert (left_base_type, left_operand);
|
|
1224 right_operand = convert (right_base_type, right_operand);
|
|
1225 break;
|
|
1226
|
|
1227 case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR:
|
|
1228 case CEIL_DIV_EXPR: case CEIL_MOD_EXPR:
|
|
1229 case FLOOR_DIV_EXPR: case FLOOR_MOD_EXPR:
|
|
1230 case ROUND_DIV_EXPR: case ROUND_MOD_EXPR:
|
|
1231 /* These always produce results lower than either operand. */
|
|
1232 modulus = NULL_TREE;
|
|
1233 goto common;
|
|
1234
|
|
1235 case POINTER_PLUS_EXPR:
|
|
1236 gcc_assert (operation_type == left_base_type
|
|
1237 && sizetype == right_base_type);
|
|
1238 left_operand = convert (operation_type, left_operand);
|
|
1239 right_operand = convert (sizetype, right_operand);
|
|
1240 break;
|
|
1241
|
|
1242 case PLUS_NOMOD_EXPR:
|
|
1243 case MINUS_NOMOD_EXPR:
|
|
1244 if (op_code == PLUS_NOMOD_EXPR)
|
|
1245 op_code = PLUS_EXPR;
|
|
1246 else
|
|
1247 op_code = MINUS_EXPR;
|
|
1248 modulus = NULL_TREE;
|
|
1249
|
|
1250 /* ... fall through ... */
|
|
1251
|
|
1252 case PLUS_EXPR:
|
|
1253 case MINUS_EXPR:
|
|
1254 /* Avoid doing arithmetics in ENUMERAL_TYPE or BOOLEAN_TYPE like the
|
|
1255 other compilers. Contrary to C, Ada doesn't allow arithmetics in
|
|
1256 these types but can generate addition/subtraction for Succ/Pred. */
|
|
1257 if (operation_type
|
|
1258 && (TREE_CODE (operation_type) == ENUMERAL_TYPE
|
|
1259 || TREE_CODE (operation_type) == BOOLEAN_TYPE))
|
|
1260 operation_type = left_base_type = right_base_type
|
|
1261 = gnat_type_for_mode (TYPE_MODE (operation_type),
|
|
1262 TYPE_UNSIGNED (operation_type));
|
|
1263
|
|
1264 /* ... fall through ... */
|
|
1265
|
|
1266 default:
|
|
1267 common:
|
|
1268 /* The result type should be the same as the base types of the
|
|
1269 both operands (and they should be the same). Convert
|
|
1270 everything to the result type. */
|
|
1271
|
|
1272 gcc_assert (operation_type == left_base_type
|
|
1273 && left_base_type == right_base_type);
|
|
1274 left_operand = convert (operation_type, left_operand);
|
|
1275 right_operand = convert (operation_type, right_operand);
|
|
1276 }
|
|
1277
|
|
1278 if (modulus && !integer_pow2p (modulus))
|
|
1279 {
|
|
1280 result = nonbinary_modular_operation (op_code, operation_type,
|
|
1281 left_operand, right_operand);
|
|
1282 modulus = NULL_TREE;
|
|
1283 }
|
|
1284 /* If either operand is a NULL_EXPR, just return a new one. */
|
|
1285 else if (TREE_CODE (left_operand) == NULL_EXPR)
|
|
1286 return build1 (NULL_EXPR, operation_type, TREE_OPERAND (left_operand, 0));
|
|
1287 else if (TREE_CODE (right_operand) == NULL_EXPR)
|
|
1288 return build1 (NULL_EXPR, operation_type, TREE_OPERAND (right_operand, 0));
|
|
1289 else if (op_code == ARRAY_REF || op_code == ARRAY_RANGE_REF)
|
|
1290 {
|
|
1291 result = build4 (op_code, operation_type, left_operand, right_operand,
|
|
1292 NULL_TREE, NULL_TREE);
|
|
1293 if (!no_fold)
|
|
1294 result = fold (result);
|
|
1295 }
|
|
1296 else if (op_code == INIT_EXPR || op_code == MODIFY_EXPR)
|
|
1297 result = build2 (op_code, void_type_node, left_operand, right_operand);
|
|
1298 else if (no_fold)
|
|
1299 result = build2 (op_code, operation_type, left_operand, right_operand);
|
|
1300 else
|
|
1301 result
|
|
1302 = fold_build2 (op_code, operation_type, left_operand, right_operand);
|
|
1303
|
|
1304 if (TREE_CONSTANT (result))
|
|
1305 ;
|
|
1306 else if (op_code == ARRAY_REF || op_code == ARRAY_RANGE_REF)
|
|
1307 {
|
|
1308 if (TYPE_VOLATILE (operation_type))
|
|
1309 TREE_THIS_VOLATILE (result) = 1;
|
|
1310 }
|
|
1311 else
|
|
1312 TREE_CONSTANT (result)
|
|
1313 |= (TREE_CONSTANT (left_operand) && TREE_CONSTANT (right_operand));
|
|
1314
|
|
1315 TREE_SIDE_EFFECTS (result) |= has_side_effects;
|
|
1316
|
|
1317 /* If we are working with modular types, perform the MOD operation
|
|
1318 if something above hasn't eliminated the need for it. */
|
|
1319 if (modulus)
|
|
1320 {
|
|
1321 modulus = convert (operation_type, modulus);
|
|
1322 if (no_fold)
|
|
1323 result = build2 (FLOOR_MOD_EXPR, operation_type, result, modulus);
|
|
1324 else
|
|
1325 result = fold_build2 (FLOOR_MOD_EXPR, operation_type, result, modulus);
|
|
1326 }
|
|
1327
|
|
1328 if (result_type && result_type != operation_type)
|
|
1329 result = convert (result_type, result);
|
|
1330
|
|
1331 return result;
|
|
1332 }
|
|
1333
|
|
1334 /* Similar, but for unary operations. */
|
|
1335
|
|
1336 tree
|
|
1337 build_unary_op (enum tree_code op_code, tree result_type, tree operand)
|
|
1338 {
|
|
1339 tree type = TREE_TYPE (operand);
|
|
1340 tree base_type = get_base_type (type);
|
|
1341 tree operation_type = result_type;
|
|
1342 tree result;
|
|
1343
|
|
1344 if (operation_type
|
|
1345 && TREE_CODE (operation_type) == RECORD_TYPE
|
|
1346 && TYPE_JUSTIFIED_MODULAR_P (operation_type))
|
|
1347 operation_type = TREE_TYPE (TYPE_FIELDS (operation_type));
|
|
1348
|
|
1349 if (operation_type
|
|
1350 && TREE_CODE (operation_type) == INTEGER_TYPE
|
|
1351 && TYPE_EXTRA_SUBTYPE_P (operation_type))
|
|
1352 operation_type = get_base_type (operation_type);
|
|
1353
|
|
1354 switch (op_code)
|
|
1355 {
|
|
1356 case REALPART_EXPR:
|
|
1357 case IMAGPART_EXPR:
|
|
1358 if (!operation_type)
|
|
1359 result_type = operation_type = TREE_TYPE (type);
|
|
1360 else
|
|
1361 gcc_assert (result_type == TREE_TYPE (type));
|
|
1362
|
|
1363 result = fold_build1 (op_code, operation_type, operand);
|
|
1364 break;
|
|
1365
|
|
1366 case TRUTH_NOT_EXPR:
|
|
1367 gcc_checking_assert
|
|
1368 (TREE_CODE (get_base_type (result_type)) == BOOLEAN_TYPE);
|
|
1369 result = invert_truthvalue_loc (EXPR_LOCATION (operand), operand);
|
|
1370 /* When not optimizing, fold the result as invert_truthvalue_loc
|
|
1371 doesn't fold the result of comparisons. This is intended to undo
|
|
1372 the trick used for boolean rvalues in gnat_to_gnu. */
|
|
1373 if (!optimize)
|
|
1374 result = fold (result);
|
|
1375 break;
|
|
1376
|
|
1377 case ATTR_ADDR_EXPR:
|
|
1378 case ADDR_EXPR:
|
|
1379 switch (TREE_CODE (operand))
|
|
1380 {
|
|
1381 case INDIRECT_REF:
|
|
1382 case UNCONSTRAINED_ARRAY_REF:
|
|
1383 result = TREE_OPERAND (operand, 0);
|
|
1384
|
|
1385 /* Make sure the type here is a pointer, not a reference.
|
|
1386 GCC wants pointer types for function addresses. */
|
|
1387 if (!result_type)
|
|
1388 result_type = build_pointer_type (type);
|
|
1389
|
|
1390 /* If the underlying object can alias everything, propagate the
|
|
1391 property since we are effectively retrieving the object. */
|
|
1392 if (POINTER_TYPE_P (TREE_TYPE (result))
|
|
1393 && TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (result)))
|
|
1394 {
|
|
1395 if (TREE_CODE (result_type) == POINTER_TYPE
|
|
1396 && !TYPE_REF_CAN_ALIAS_ALL (result_type))
|
|
1397 result_type
|
|
1398 = build_pointer_type_for_mode (TREE_TYPE (result_type),
|
|
1399 TYPE_MODE (result_type),
|
|
1400 true);
|
|
1401 else if (TREE_CODE (result_type) == REFERENCE_TYPE
|
|
1402 && !TYPE_REF_CAN_ALIAS_ALL (result_type))
|
|
1403 result_type
|
|
1404 = build_reference_type_for_mode (TREE_TYPE (result_type),
|
|
1405 TYPE_MODE (result_type),
|
|
1406 true);
|
|
1407 }
|
|
1408 break;
|
|
1409
|
|
1410 case NULL_EXPR:
|
|
1411 result = operand;
|
|
1412 TREE_TYPE (result) = type = build_pointer_type (type);
|
|
1413 break;
|
|
1414
|
|
1415 case COMPOUND_EXPR:
|
|
1416 /* Fold a compound expression if it has unconstrained array type
|
|
1417 since the middle-end cannot handle it. But we don't it in the
|
|
1418 general case because it may introduce aliasing issues if the
|
|
1419 first operand is an indirect assignment and the second operand
|
|
1420 the corresponding address, e.g. for an allocator. However do
|
|
1421 it for a return value to expose it for later recognition. */
|
|
1422 if (TREE_CODE (type) == UNCONSTRAINED_ARRAY_TYPE
|
|
1423 || (TREE_CODE (TREE_OPERAND (operand, 1)) == VAR_DECL
|
|
1424 && DECL_RETURN_VALUE_P (TREE_OPERAND (operand, 1))))
|
|
1425 {
|
|
1426 result = build_unary_op (ADDR_EXPR, result_type,
|
|
1427 TREE_OPERAND (operand, 1));
|
|
1428 result = build2 (COMPOUND_EXPR, TREE_TYPE (result),
|
|
1429 TREE_OPERAND (operand, 0), result);
|
|
1430 break;
|
|
1431 }
|
|
1432 goto common;
|
|
1433
|
|
1434 case ARRAY_REF:
|
|
1435 case ARRAY_RANGE_REF:
|
|
1436 case COMPONENT_REF:
|
|
1437 case BIT_FIELD_REF:
|
|
1438 /* If this is for 'Address, find the address of the prefix and add
|
|
1439 the offset to the field. Otherwise, do this the normal way. */
|
|
1440 if (op_code == ATTR_ADDR_EXPR)
|
|
1441 {
|
|
1442 HOST_WIDE_INT bitsize;
|
|
1443 HOST_WIDE_INT bitpos;
|
|
1444 tree offset, inner;
|
|
1445 machine_mode mode;
|
|
1446 int unsignedp, reversep, volatilep;
|
|
1447
|
|
1448 inner = get_inner_reference (operand, &bitsize, &bitpos, &offset,
|
|
1449 &mode, &unsignedp, &reversep,
|
|
1450 &volatilep);
|
|
1451
|
|
1452 /* If INNER is a padding type whose field has a self-referential
|
|
1453 size, convert to that inner type. We know the offset is zero
|
|
1454 and we need to have that type visible. */
|
|
1455 if (type_is_padding_self_referential (TREE_TYPE (inner)))
|
|
1456 inner = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (inner))),
|
|
1457 inner);
|
|
1458
|
|
1459 /* Compute the offset as a byte offset from INNER. */
|
|
1460 if (!offset)
|
|
1461 offset = size_zero_node;
|
|
1462
|
|
1463 offset = size_binop (PLUS_EXPR, offset,
|
|
1464 size_int (bitpos / BITS_PER_UNIT));
|
|
1465
|
|
1466 /* Take the address of INNER, convert it to a pointer to our type
|
|
1467 and add the offset. */
|
|
1468 inner = build_unary_op (ADDR_EXPR,
|
|
1469 build_pointer_type (TREE_TYPE (operand)),
|
|
1470 inner);
|
|
1471 result = build_binary_op (POINTER_PLUS_EXPR, TREE_TYPE (inner),
|
|
1472 inner, offset);
|
|
1473 break;
|
|
1474 }
|
|
1475 goto common;
|
|
1476
|
|
1477 case CONSTRUCTOR:
|
|
1478 /* If this is just a constructor for a padded record, we can
|
|
1479 just take the address of the single field and convert it to
|
|
1480 a pointer to our type. */
|
|
1481 if (TYPE_IS_PADDING_P (type))
|
|
1482 {
|
|
1483 result
|
|
1484 = build_unary_op (ADDR_EXPR,
|
|
1485 build_pointer_type (TREE_TYPE (operand)),
|
|
1486 CONSTRUCTOR_ELT (operand, 0)->value);
|
|
1487 break;
|
|
1488 }
|
|
1489 goto common;
|
|
1490
|
|
1491 case NOP_EXPR:
|
|
1492 if (AGGREGATE_TYPE_P (type)
|
|
1493 && AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (operand, 0))))
|
|
1494 return build_unary_op (ADDR_EXPR, result_type,
|
|
1495 TREE_OPERAND (operand, 0));
|
|
1496
|
|
1497 /* ... fallthru ... */
|
|
1498
|
|
1499 case VIEW_CONVERT_EXPR:
|
|
1500 /* If this just a variant conversion or if the conversion doesn't
|
|
1501 change the mode, get the result type from this type and go down.
|
|
1502 This is needed for conversions of CONST_DECLs, to eventually get
|
|
1503 to the address of their CORRESPONDING_VARs. */
|
|
1504 if ((TYPE_MAIN_VARIANT (type)
|
|
1505 == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (operand, 0))))
|
|
1506 || (TYPE_MODE (type) != BLKmode
|
|
1507 && (TYPE_MODE (type)
|
|
1508 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (operand, 0))))))
|
|
1509 return build_unary_op (ADDR_EXPR,
|
|
1510 (result_type ? result_type
|
|
1511 : build_pointer_type (type)),
|
|
1512 TREE_OPERAND (operand, 0));
|
|
1513 goto common;
|
|
1514
|
|
1515 case CONST_DECL:
|
|
1516 operand = DECL_CONST_CORRESPONDING_VAR (operand);
|
|
1517
|
|
1518 /* ... fall through ... */
|
|
1519
|
|
1520 default:
|
|
1521 common:
|
|
1522
|
|
1523 /* If we are taking the address of a padded record whose field
|
|
1524 contains a template, take the address of the field. */
|
|
1525 if (TYPE_IS_PADDING_P (type)
|
|
1526 && TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == RECORD_TYPE
|
|
1527 && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (TYPE_FIELDS (type))))
|
|
1528 {
|
|
1529 type = TREE_TYPE (TYPE_FIELDS (type));
|
|
1530 operand = convert (type, operand);
|
|
1531 }
|
|
1532
|
|
1533 gnat_mark_addressable (operand);
|
|
1534 result = build_fold_addr_expr (operand);
|
|
1535 }
|
|
1536
|
|
1537 TREE_CONSTANT (result) = staticp (operand) || TREE_CONSTANT (operand);
|
|
1538 break;
|
|
1539
|
|
1540 case INDIRECT_REF:
|
|
1541 {
|
|
1542 tree t = remove_conversions (operand, false);
|
|
1543 bool can_never_be_null = DECL_P (t) && DECL_CAN_NEVER_BE_NULL_P (t);
|
|
1544
|
|
1545 /* If TYPE is a thin pointer, either first retrieve the base if this
|
|
1546 is an expression with an offset built for the initialization of an
|
|
1547 object with an unconstrained nominal subtype, or else convert to
|
|
1548 the fat pointer. */
|
|
1549 if (TYPE_IS_THIN_POINTER_P (type))
|
|
1550 {
|
|
1551 tree rec_type = TREE_TYPE (type);
|
|
1552
|
|
1553 if (TREE_CODE (operand) == POINTER_PLUS_EXPR
|
|
1554 && TREE_OPERAND (operand, 1)
|
|
1555 == byte_position (DECL_CHAIN (TYPE_FIELDS (rec_type)))
|
|
1556 && TREE_CODE (TREE_OPERAND (operand, 0)) == NOP_EXPR)
|
|
1557 {
|
|
1558 operand = TREE_OPERAND (TREE_OPERAND (operand, 0), 0);
|
|
1559 type = TREE_TYPE (operand);
|
|
1560 }
|
|
1561 else if (TYPE_UNCONSTRAINED_ARRAY (rec_type))
|
|
1562 {
|
|
1563 operand
|
|
1564 = convert (TREE_TYPE (TYPE_UNCONSTRAINED_ARRAY (rec_type)),
|
|
1565 operand);
|
|
1566 type = TREE_TYPE (operand);
|
|
1567 }
|
|
1568 }
|
|
1569
|
|
1570 /* If we want to refer to an unconstrained array, use the appropriate
|
|
1571 expression. But this will never survive down to the back-end. */
|
|
1572 if (TYPE_IS_FAT_POINTER_P (type))
|
|
1573 {
|
|
1574 result = build1 (UNCONSTRAINED_ARRAY_REF,
|
|
1575 TYPE_UNCONSTRAINED_ARRAY (type), operand);
|
|
1576 TREE_READONLY (result)
|
|
1577 = TYPE_READONLY (TYPE_UNCONSTRAINED_ARRAY (type));
|
|
1578 }
|
|
1579
|
|
1580 /* If we are dereferencing an ADDR_EXPR, return its operand. */
|
|
1581 else if (TREE_CODE (operand) == ADDR_EXPR)
|
|
1582 result = TREE_OPERAND (operand, 0);
|
|
1583
|
|
1584 /* Otherwise, build and fold the indirect reference. */
|
|
1585 else
|
|
1586 {
|
|
1587 result = build_fold_indirect_ref (operand);
|
|
1588 TREE_READONLY (result) = TYPE_READONLY (TREE_TYPE (type));
|
|
1589 }
|
|
1590
|
|
1591 if (!TYPE_IS_FAT_POINTER_P (type) && TYPE_VOLATILE (TREE_TYPE (type)))
|
|
1592 {
|
|
1593 TREE_SIDE_EFFECTS (result) = 1;
|
|
1594 if (TREE_CODE (result) == INDIRECT_REF)
|
|
1595 TREE_THIS_VOLATILE (result) = TYPE_VOLATILE (TREE_TYPE (result));
|
|
1596 }
|
|
1597
|
|
1598 if ((TREE_CODE (result) == INDIRECT_REF
|
|
1599 || TREE_CODE (result) == UNCONSTRAINED_ARRAY_REF)
|
|
1600 && can_never_be_null)
|
|
1601 TREE_THIS_NOTRAP (result) = 1;
|
|
1602
|
|
1603 break;
|
|
1604 }
|
|
1605
|
|
1606 case NEGATE_EXPR:
|
|
1607 case BIT_NOT_EXPR:
|
|
1608 {
|
|
1609 tree modulus = ((operation_type
|
|
1610 && TREE_CODE (operation_type) == INTEGER_TYPE
|
|
1611 && TYPE_MODULAR_P (operation_type))
|
|
1612 ? TYPE_MODULUS (operation_type) : NULL_TREE);
|
|
1613 int mod_pow2 = modulus && integer_pow2p (modulus);
|
|
1614
|
|
1615 /* If this is a modular type, there are various possibilities
|
|
1616 depending on the operation and whether the modulus is a
|
|
1617 power of two or not. */
|
|
1618
|
|
1619 if (modulus)
|
|
1620 {
|
|
1621 gcc_assert (operation_type == base_type);
|
|
1622 operand = convert (operation_type, operand);
|
|
1623
|
|
1624 /* The fastest in the negate case for binary modulus is
|
|
1625 the straightforward code; the TRUNC_MOD_EXPR below
|
|
1626 is an AND operation. */
|
|
1627 if (op_code == NEGATE_EXPR && mod_pow2)
|
|
1628 result = fold_build2 (TRUNC_MOD_EXPR, operation_type,
|
|
1629 fold_build1 (NEGATE_EXPR, operation_type,
|
|
1630 operand),
|
|
1631 modulus);
|
|
1632
|
|
1633 /* For nonbinary negate case, return zero for zero operand,
|
|
1634 else return the modulus minus the operand. If the modulus
|
|
1635 is a power of two minus one, we can do the subtraction
|
|
1636 as an XOR since it is equivalent and faster on most machines. */
|
|
1637 else if (op_code == NEGATE_EXPR && !mod_pow2)
|
|
1638 {
|
|
1639 if (integer_pow2p (fold_build2 (PLUS_EXPR, operation_type,
|
|
1640 modulus,
|
|
1641 build_int_cst (operation_type,
|
|
1642 1))))
|
|
1643 result = fold_build2 (BIT_XOR_EXPR, operation_type,
|
|
1644 operand, modulus);
|
|
1645 else
|
|
1646 result = fold_build2 (MINUS_EXPR, operation_type,
|
|
1647 modulus, operand);
|
|
1648
|
|
1649 result = fold_build3 (COND_EXPR, operation_type,
|
|
1650 fold_build2 (NE_EXPR,
|
|
1651 boolean_type_node,
|
|
1652 operand,
|
|
1653 build_int_cst
|
|
1654 (operation_type, 0)),
|
|
1655 result, operand);
|
|
1656 }
|
|
1657 else
|
|
1658 {
|
|
1659 /* For the NOT cases, we need a constant equal to
|
|
1660 the modulus minus one. For a binary modulus, we
|
|
1661 XOR against the constant and subtract the operand from
|
|
1662 that constant for nonbinary modulus. */
|
|
1663
|
|
1664 tree cnst = fold_build2 (MINUS_EXPR, operation_type, modulus,
|
|
1665 build_int_cst (operation_type, 1));
|
|
1666
|
|
1667 if (mod_pow2)
|
|
1668 result = fold_build2 (BIT_XOR_EXPR, operation_type,
|
|
1669 operand, cnst);
|
|
1670 else
|
|
1671 result = fold_build2 (MINUS_EXPR, operation_type,
|
|
1672 cnst, operand);
|
|
1673 }
|
|
1674
|
|
1675 break;
|
|
1676 }
|
|
1677 }
|
|
1678
|
|
1679 /* ... fall through ... */
|
|
1680
|
|
1681 default:
|
|
1682 gcc_assert (operation_type == base_type);
|
|
1683 result = fold_build1 (op_code, operation_type,
|
|
1684 convert (operation_type, operand));
|
|
1685 }
|
|
1686
|
|
1687 if (result_type && TREE_TYPE (result) != result_type)
|
|
1688 result = convert (result_type, result);
|
|
1689
|
|
1690 return result;
|
|
1691 }
|
|
1692
|
|
1693 /* Similar, but for COND_EXPR. */
|
|
1694
|
|
1695 tree
|
|
1696 build_cond_expr (tree result_type, tree condition_operand,
|
|
1697 tree true_operand, tree false_operand)
|
|
1698 {
|
|
1699 bool addr_p = false;
|
|
1700 tree result;
|
|
1701
|
|
1702 /* The front-end verified that result, true and false operands have
|
|
1703 same base type. Convert everything to the result type. */
|
|
1704 true_operand = convert (result_type, true_operand);
|
|
1705 false_operand = convert (result_type, false_operand);
|
|
1706
|
|
1707 /* If the result type is unconstrained, take the address of the operands and
|
|
1708 then dereference the result. Likewise if the result type is passed by
|
|
1709 reference, because creating a temporary of this type is not allowed. */
|
|
1710 if (TREE_CODE (result_type) == UNCONSTRAINED_ARRAY_TYPE
|
|
1711 || TYPE_IS_BY_REFERENCE_P (result_type)
|
|
1712 || CONTAINS_PLACEHOLDER_P (TYPE_SIZE (result_type)))
|
|
1713 {
|
|
1714 result_type = build_pointer_type (result_type);
|
|
1715 true_operand = build_unary_op (ADDR_EXPR, result_type, true_operand);
|
|
1716 false_operand = build_unary_op (ADDR_EXPR, result_type, false_operand);
|
|
1717 addr_p = true;
|
|
1718 }
|
|
1719
|
|
1720 result = fold_build3 (COND_EXPR, result_type, condition_operand,
|
|
1721 true_operand, false_operand);
|
|
1722
|
|
1723 /* If we have a common SAVE_EXPR (possibly surrounded by arithmetics)
|
|
1724 in both arms, make sure it gets evaluated by moving it ahead of the
|
|
1725 conditional expression. This is necessary because it is evaluated
|
|
1726 in only one place at run time and would otherwise be uninitialized
|
|
1727 in one of the arms. */
|
|
1728 true_operand = skip_simple_arithmetic (true_operand);
|
|
1729 false_operand = skip_simple_arithmetic (false_operand);
|
|
1730
|
|
1731 if (true_operand == false_operand && TREE_CODE (true_operand) == SAVE_EXPR)
|
|
1732 result = build2 (COMPOUND_EXPR, result_type, true_operand, result);
|
|
1733
|
|
1734 if (addr_p)
|
|
1735 result = build_unary_op (INDIRECT_REF, NULL_TREE, result);
|
|
1736
|
|
1737 return result;
|
|
1738 }
|
|
1739
|
|
1740 /* Similar, but for COMPOUND_EXPR. */
|
|
1741
|
|
1742 tree
|
|
1743 build_compound_expr (tree result_type, tree stmt_operand, tree expr_operand)
|
|
1744 {
|
|
1745 bool addr_p = false;
|
|
1746 tree result;
|
|
1747
|
|
1748 /* If the result type is unconstrained, take the address of the operand and
|
|
1749 then dereference the result. Likewise if the result type is passed by
|
|
1750 reference, but this is natively handled in the gimplifier. */
|
|
1751 if (TREE_CODE (result_type) == UNCONSTRAINED_ARRAY_TYPE
|
|
1752 || CONTAINS_PLACEHOLDER_P (TYPE_SIZE (result_type)))
|
|
1753 {
|
|
1754 result_type = build_pointer_type (result_type);
|
|
1755 expr_operand = build_unary_op (ADDR_EXPR, result_type, expr_operand);
|
|
1756 addr_p = true;
|
|
1757 }
|
|
1758
|
|
1759 result = fold_build2 (COMPOUND_EXPR, result_type, stmt_operand,
|
|
1760 expr_operand);
|
|
1761
|
|
1762 if (addr_p)
|
|
1763 result = build_unary_op (INDIRECT_REF, NULL_TREE, result);
|
|
1764
|
|
1765 return result;
|
|
1766 }
|
|
1767
|
|
1768 /* Conveniently construct a function call expression. FNDECL names the
|
|
1769 function to be called, N is the number of arguments, and the "..."
|
|
1770 parameters are the argument expressions. Unlike build_call_expr
|
|
1771 this doesn't fold the call, hence it will always return a CALL_EXPR. */
|
|
1772
|
|
1773 tree
|
|
1774 build_call_n_expr (tree fndecl, int n, ...)
|
|
1775 {
|
|
1776 va_list ap;
|
|
1777 tree fntype = TREE_TYPE (fndecl);
|
|
1778 tree fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);
|
|
1779
|
|
1780 va_start (ap, n);
|
|
1781 fn = build_call_valist (TREE_TYPE (fntype), fn, n, ap);
|
|
1782 va_end (ap);
|
|
1783 return fn;
|
|
1784 }
|
|
1785
|
|
1786 /* Build a goto to LABEL for a raise, with an optional call to Local_Raise.
|
|
1787 MSG gives the exception's identity for the call to Local_Raise, if any. */
|
|
1788
|
|
1789 static tree
|
|
1790 build_goto_raise (Entity_Id gnat_label, int msg)
|
|
1791 {
|
|
1792 tree gnu_label = gnat_to_gnu_entity (gnat_label, NULL_TREE, false);
|
|
1793 tree gnu_result = build1 (GOTO_EXPR, void_type_node, gnu_label);
|
|
1794 Entity_Id local_raise = Get_Local_Raise_Call_Entity ();
|
|
1795
|
|
1796 /* If Local_Raise is present, build Local_Raise (Exception'Identity). */
|
|
1797 if (Present (local_raise))
|
|
1798 {
|
|
1799 tree gnu_local_raise
|
|
1800 = gnat_to_gnu_entity (local_raise, NULL_TREE, false);
|
|
1801 tree gnu_exception_entity
|
|
1802 = gnat_to_gnu_entity (Get_RT_Exception_Entity (msg), NULL_TREE, false);
|
|
1803 tree gnu_call
|
|
1804 = build_call_n_expr (gnu_local_raise, 1,
|
|
1805 build_unary_op (ADDR_EXPR, NULL_TREE,
|
|
1806 gnu_exception_entity));
|
|
1807 gnu_result
|
|
1808 = build2 (COMPOUND_EXPR, void_type_node, gnu_call, gnu_result);
|
|
1809 }
|
|
1810
|
|
1811 TREE_USED (gnu_label) = 1;
|
|
1812 return gnu_result;
|
|
1813 }
|
|
1814
|
|
1815 /* Expand the SLOC of GNAT_NODE, if present, into tree location information
|
|
1816 pointed to by FILENAME, LINE and COL. Fall back to the current location
|
|
1817 if GNAT_NODE is absent or has no SLOC. */
|
|
1818
|
|
1819 static void
|
|
1820 expand_sloc (Node_Id gnat_node, tree *filename, tree *line, tree *col)
|
|
1821 {
|
|
1822 const char *str;
|
|
1823 int line_number, column_number;
|
|
1824
|
|
1825 if (Debug_Flag_NN || Exception_Locations_Suppressed)
|
|
1826 {
|
|
1827 str = "";
|
|
1828 line_number = 0;
|
|
1829 column_number = 0;
|
|
1830 }
|
|
1831 else if (Present (gnat_node) && Sloc (gnat_node) != No_Location)
|
|
1832 {
|
|
1833 str = Get_Name_String
|
|
1834 (Debug_Source_Name (Get_Source_File_Index (Sloc (gnat_node))));
|
|
1835 line_number = Get_Logical_Line_Number (Sloc (gnat_node));
|
|
1836 column_number = Get_Column_Number (Sloc (gnat_node));
|
|
1837 }
|
|
1838 else
|
|
1839 {
|
|
1840 str = lbasename (LOCATION_FILE (input_location));
|
|
1841 line_number = LOCATION_LINE (input_location);
|
|
1842 column_number = LOCATION_COLUMN (input_location);
|
|
1843 }
|
|
1844
|
|
1845 const int len = strlen (str);
|
|
1846 *filename = build_string (len, str);
|
|
1847 TREE_TYPE (*filename) = build_array_type (char_type_node,
|
|
1848 build_index_type (size_int (len)));
|
|
1849 *line = build_int_cst (NULL_TREE, line_number);
|
|
1850 if (col)
|
|
1851 *col = build_int_cst (NULL_TREE, column_number);
|
|
1852 }
|
|
1853
|
|
1854 /* Build a call to a function that raises an exception and passes file name
|
|
1855 and line number, if requested. MSG says which exception function to call.
|
|
1856 GNAT_NODE is the node conveying the source location for which the error
|
|
1857 should be signaled, or Empty in which case the error is signaled for the
|
|
1858 current location. KIND says which kind of exception node this is for,
|
|
1859 among N_Raise_{Constraint,Storage,Program}_Error. */
|
|
1860
|
|
1861 tree
|
|
1862 build_call_raise (int msg, Node_Id gnat_node, char kind)
|
|
1863 {
|
|
1864 Entity_Id gnat_label = get_exception_label (kind);
|
|
1865 tree fndecl = gnat_raise_decls[msg];
|
|
1866 tree filename, line;
|
|
1867
|
|
1868 /* If this is to be done as a goto, handle that case. */
|
|
1869 if (Present (gnat_label))
|
|
1870 return build_goto_raise (gnat_label, msg);
|
|
1871
|
|
1872 expand_sloc (gnat_node, &filename, &line, NULL);
|
|
1873
|
|
1874 return
|
|
1875 build_call_n_expr (fndecl, 2,
|
|
1876 build1 (ADDR_EXPR,
|
|
1877 build_pointer_type (char_type_node),
|
|
1878 filename),
|
|
1879 line);
|
|
1880 }
|
|
1881
|
|
1882 /* Similar to build_call_raise, with extra information about the column
|
|
1883 where the check failed. */
|
|
1884
|
|
1885 tree
|
|
1886 build_call_raise_column (int msg, Node_Id gnat_node, char kind)
|
|
1887 {
|
|
1888 Entity_Id gnat_label = get_exception_label (kind);
|
|
1889 tree fndecl = gnat_raise_decls_ext[msg];
|
|
1890 tree filename, line, col;
|
|
1891
|
|
1892 /* If this is to be done as a goto, handle that case. */
|
|
1893 if (Present (gnat_label))
|
|
1894 return build_goto_raise (gnat_label, msg);
|
|
1895
|
|
1896 expand_sloc (gnat_node, &filename, &line, &col);
|
|
1897
|
|
1898 return
|
|
1899 build_call_n_expr (fndecl, 3,
|
|
1900 build1 (ADDR_EXPR,
|
|
1901 build_pointer_type (char_type_node),
|
|
1902 filename),
|
|
1903 line, col);
|
|
1904 }
|
|
1905
|
|
1906 /* Similar to build_call_raise_column, for an index or range check exception ,
|
|
1907 with extra information of the form "INDEX out of range FIRST..LAST". */
|
|
1908
|
|
1909 tree
|
|
1910 build_call_raise_range (int msg, Node_Id gnat_node, char kind,
|
|
1911 tree index, tree first, tree last)
|
|
1912 {
|
|
1913 Entity_Id gnat_label = get_exception_label (kind);
|
|
1914 tree fndecl = gnat_raise_decls_ext[msg];
|
|
1915 tree filename, line, col;
|
|
1916
|
|
1917 /* If this is to be done as a goto, handle that case. */
|
|
1918 if (Present (gnat_label))
|
|
1919 return build_goto_raise (gnat_label, msg);
|
|
1920
|
|
1921 expand_sloc (gnat_node, &filename, &line, &col);
|
|
1922
|
|
1923 return
|
|
1924 build_call_n_expr (fndecl, 6,
|
|
1925 build1 (ADDR_EXPR,
|
|
1926 build_pointer_type (char_type_node),
|
|
1927 filename),
|
|
1928 line, col,
|
|
1929 convert (integer_type_node, index),
|
|
1930 convert (integer_type_node, first),
|
|
1931 convert (integer_type_node, last));
|
|
1932 }
|
|
1933
|
|
1934 /* qsort comparer for the bit positions of two constructor elements
|
|
1935 for record components. */
|
|
1936
|
|
1937 static int
|
|
1938 compare_elmt_bitpos (const PTR rt1, const PTR rt2)
|
|
1939 {
|
|
1940 const constructor_elt * const elmt1 = (const constructor_elt *) rt1;
|
|
1941 const constructor_elt * const elmt2 = (const constructor_elt *) rt2;
|
|
1942 const_tree const field1 = elmt1->index;
|
|
1943 const_tree const field2 = elmt2->index;
|
|
1944 const int ret
|
|
1945 = tree_int_cst_compare (bit_position (field1), bit_position (field2));
|
|
1946
|
|
1947 return ret ? ret : (int) (DECL_UID (field1) - DECL_UID (field2));
|
|
1948 }
|
|
1949
|
|
1950 /* Return a CONSTRUCTOR of TYPE whose elements are V. */
|
|
1951
|
|
1952 tree
|
|
1953 gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v)
|
|
1954 {
|
|
1955 bool allconstant = (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST);
|
|
1956 bool read_only = true;
|
|
1957 bool side_effects = false;
|
|
1958 tree result, obj, val;
|
|
1959 unsigned int n_elmts;
|
|
1960
|
|
1961 /* Scan the elements to see if they are all constant or if any has side
|
|
1962 effects, to let us set global flags on the resulting constructor. Count
|
|
1963 the elements along the way for possible sorting purposes below. */
|
|
1964 FOR_EACH_CONSTRUCTOR_ELT (v, n_elmts, obj, val)
|
|
1965 {
|
|
1966 /* The predicate must be in keeping with output_constructor. */
|
|
1967 if ((!TREE_CONSTANT (val) && !TREE_STATIC (val))
|
|
1968 || (TREE_CODE (type) == RECORD_TYPE
|
|
1969 && CONSTRUCTOR_BITFIELD_P (obj)
|
|
1970 && !initializer_constant_valid_for_bitfield_p (val))
|
|
1971 || !initializer_constant_valid_p (val,
|
|
1972 TREE_TYPE (val),
|
|
1973 TYPE_REVERSE_STORAGE_ORDER (type)))
|
|
1974 allconstant = false;
|
|
1975
|
|
1976 if (!TREE_READONLY (val))
|
|
1977 read_only = false;
|
|
1978
|
|
1979 if (TREE_SIDE_EFFECTS (val))
|
|
1980 side_effects = true;
|
|
1981 }
|
|
1982
|
|
1983 /* For record types with constant components only, sort field list
|
|
1984 by increasing bit position. This is necessary to ensure the
|
|
1985 constructor can be output as static data. */
|
|
1986 if (allconstant && TREE_CODE (type) == RECORD_TYPE && n_elmts > 1)
|
|
1987 v->qsort (compare_elmt_bitpos);
|
|
1988
|
|
1989 result = build_constructor (type, v);
|
|
1990 CONSTRUCTOR_NO_CLEARING (result) = 1;
|
|
1991 TREE_CONSTANT (result) = TREE_STATIC (result) = allconstant;
|
|
1992 TREE_SIDE_EFFECTS (result) = side_effects;
|
|
1993 TREE_READONLY (result) = TYPE_READONLY (type) || read_only || allconstant;
|
|
1994 return result;
|
|
1995 }
|
|
1996
|
|
1997 /* Return a COMPONENT_REF to access FIELD in RECORD, or NULL_TREE if the field
|
|
1998 is not found in the record. Don't fold the result if NO_FOLD is true. */
|
|
1999
|
|
2000 static tree
|
|
2001 build_simple_component_ref (tree record, tree field, bool no_fold)
|
|
2002 {
|
|
2003 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (record));
|
|
2004 tree ref;
|
|
2005
|
|
2006 gcc_assert (RECORD_OR_UNION_TYPE_P (type) && COMPLETE_TYPE_P (type));
|
|
2007
|
|
2008 /* Try to fold a conversion from another record or union type unless the type
|
|
2009 contains a placeholder as it might be needed for a later substitution. */
|
|
2010 if (TREE_CODE (record) == VIEW_CONVERT_EXPR
|
|
2011 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (TREE_OPERAND (record, 0)))
|
|
2012 && !type_contains_placeholder_p (type))
|
|
2013 {
|
|
2014 tree op = TREE_OPERAND (record, 0);
|
|
2015
|
|
2016 /* If this is an unpadding operation, convert the underlying object to
|
|
2017 the unpadded type directly. */
|
|
2018 if (TYPE_IS_PADDING_P (type) && field == TYPE_FIELDS (type))
|
|
2019 return convert (TREE_TYPE (field), op);
|
|
2020
|
|
2021 /* Otherwise try to access FIELD directly in the underlying type, but
|
|
2022 make sure that the form of the reference doesn't change too much;
|
|
2023 this can happen for an unconstrained bit-packed array type whose
|
|
2024 constrained form can be an integer type. */
|
|
2025 ref = build_simple_component_ref (op, field, no_fold);
|
|
2026 if (ref && TREE_CODE (TREE_TYPE (ref)) == TREE_CODE (TREE_TYPE (field)))
|
|
2027 return ref;
|
|
2028 }
|
|
2029
|
|
2030 /* If this field is not in the specified record, see if we can find a field
|
|
2031 in the specified record whose original field is the same as this one. */
|
|
2032 if (DECL_CONTEXT (field) != type)
|
|
2033 {
|
|
2034 tree new_field;
|
|
2035
|
|
2036 /* First loop through normal components. */
|
|
2037 for (new_field = TYPE_FIELDS (type);
|
|
2038 new_field;
|
|
2039 new_field = DECL_CHAIN (new_field))
|
|
2040 if (SAME_FIELD_P (field, new_field))
|
|
2041 break;
|
|
2042
|
|
2043 /* Next, loop through DECL_INTERNAL_P components if we haven't found the
|
|
2044 component in the first search. Doing this search in two steps is
|
|
2045 required to avoid hidden homonymous fields in the _Parent field. */
|
|
2046 if (!new_field)
|
|
2047 for (new_field = TYPE_FIELDS (type);
|
|
2048 new_field;
|
|
2049 new_field = DECL_CHAIN (new_field))
|
|
2050 if (DECL_INTERNAL_P (new_field)
|
|
2051 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (new_field)))
|
|
2052 {
|
|
2053 tree field_ref
|
|
2054 = build_simple_component_ref (record, new_field, no_fold);
|
|
2055 ref = build_simple_component_ref (field_ref, field, no_fold);
|
|
2056 if (ref)
|
|
2057 return ref;
|
|
2058 }
|
|
2059
|
|
2060 field = new_field;
|
|
2061 }
|
|
2062
|
|
2063 if (!field)
|
|
2064 return NULL_TREE;
|
|
2065
|
|
2066 /* If the field's offset has overflowed, do not try to access it, as doing
|
|
2067 so may trigger sanity checks deeper in the back-end. Note that we don't
|
|
2068 need to warn since this will be done on trying to declare the object. */
|
|
2069 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST
|
|
2070 && TREE_OVERFLOW (DECL_FIELD_OFFSET (field)))
|
|
2071 return NULL_TREE;
|
|
2072
|
|
2073 ref = build3 (COMPONENT_REF, TREE_TYPE (field), record, field, NULL_TREE);
|
|
2074
|
|
2075 if (TREE_READONLY (record)
|
|
2076 || TREE_READONLY (field)
|
|
2077 || TYPE_READONLY (type))
|
|
2078 TREE_READONLY (ref) = 1;
|
|
2079
|
|
2080 if (TREE_THIS_VOLATILE (record)
|
|
2081 || TREE_THIS_VOLATILE (field)
|
|
2082 || TYPE_VOLATILE (type))
|
|
2083 TREE_THIS_VOLATILE (ref) = 1;
|
|
2084
|
|
2085 if (no_fold)
|
|
2086 return ref;
|
|
2087
|
|
2088 /* The generic folder may punt in this case because the inner array type
|
|
2089 can be self-referential, but folding is in fact not problematic. */
|
|
2090 if (TREE_CODE (record) == CONSTRUCTOR
|
|
2091 && TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (record)))
|
|
2092 {
|
|
2093 vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (record);
|
|
2094 unsigned HOST_WIDE_INT idx;
|
|
2095 tree index, value;
|
|
2096 FOR_EACH_CONSTRUCTOR_ELT (elts, idx, index, value)
|
|
2097 if (index == field)
|
|
2098 return value;
|
|
2099 return ref;
|
|
2100 }
|
|
2101
|
|
2102 return fold (ref);
|
|
2103 }
|
|
2104
|
|
2105 /* Likewise, but return NULL_EXPR and generate a Constraint_Error if the
|
|
2106 field is not found in the record. */
|
|
2107
|
|
2108 tree
|
|
2109 build_component_ref (tree record, tree field, bool no_fold)
|
|
2110 {
|
|
2111 tree ref = build_simple_component_ref (record, field, no_fold);
|
|
2112 if (ref)
|
|
2113 return ref;
|
|
2114
|
|
2115 /* Assume this is an invalid user field so raise Constraint_Error. */
|
|
2116 return build1 (NULL_EXPR, TREE_TYPE (field),
|
|
2117 build_call_raise (CE_Discriminant_Check_Failed, Empty,
|
|
2118 N_Raise_Constraint_Error));
|
|
2119 }
|
|
2120
|
|
2121 /* Helper for build_call_alloc_dealloc, with arguments to be interpreted
|
|
2122 identically. Process the case where a GNAT_PROC to call is provided. */
|
|
2123
|
|
2124 static inline tree
|
|
2125 build_call_alloc_dealloc_proc (tree gnu_obj, tree gnu_size, tree gnu_type,
|
|
2126 Entity_Id gnat_proc, Entity_Id gnat_pool)
|
|
2127 {
|
|
2128 tree gnu_proc = gnat_to_gnu (gnat_proc);
|
|
2129 tree gnu_call;
|
|
2130
|
|
2131 /* A storage pool's underlying type is a record type (for both predefined
|
|
2132 storage pools and GNAT simple storage pools). The secondary stack uses
|
|
2133 the same mechanism, but its pool object (SS_Pool) is an integer. */
|
|
2134 if (Is_Record_Type (Underlying_Type (Etype (gnat_pool))))
|
|
2135 {
|
|
2136 /* The size is the third parameter; the alignment is the
|
|
2137 same type. */
|
|
2138 Entity_Id gnat_size_type
|
|
2139 = Etype (Next_Formal (Next_Formal (First_Formal (gnat_proc))));
|
|
2140 tree gnu_size_type = gnat_to_gnu_type (gnat_size_type);
|
|
2141
|
|
2142 tree gnu_pool = gnat_to_gnu (gnat_pool);
|
|
2143 tree gnu_pool_addr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_pool);
|
|
2144 tree gnu_align = size_int (TYPE_ALIGN (gnu_type) / BITS_PER_UNIT);
|
|
2145
|
|
2146 gnu_size = convert (gnu_size_type, gnu_size);
|
|
2147 gnu_align = convert (gnu_size_type, gnu_align);
|
|
2148
|
|
2149 /* The first arg is always the address of the storage pool; next
|
|
2150 comes the address of the object, for a deallocator, then the
|
|
2151 size and alignment. */
|
|
2152 if (gnu_obj)
|
|
2153 gnu_call = build_call_n_expr (gnu_proc, 4, gnu_pool_addr, gnu_obj,
|
|
2154 gnu_size, gnu_align);
|
|
2155 else
|
|
2156 gnu_call = build_call_n_expr (gnu_proc, 3, gnu_pool_addr,
|
|
2157 gnu_size, gnu_align);
|
|
2158 }
|
|
2159
|
|
2160 /* Secondary stack case. */
|
|
2161 else
|
|
2162 {
|
|
2163 /* The size is the second parameter. */
|
|
2164 Entity_Id gnat_size_type
|
|
2165 = Etype (Next_Formal (First_Formal (gnat_proc)));
|
|
2166 tree gnu_size_type = gnat_to_gnu_type (gnat_size_type);
|
|
2167
|
|
2168 gnu_size = convert (gnu_size_type, gnu_size);
|
|
2169
|
|
2170 /* The first arg is the address of the object, for a deallocator,
|
|
2171 then the size. */
|
|
2172 if (gnu_obj)
|
|
2173 gnu_call = build_call_n_expr (gnu_proc, 2, gnu_obj, gnu_size);
|
|
2174 else
|
|
2175 gnu_call = build_call_n_expr (gnu_proc, 1, gnu_size);
|
|
2176 }
|
|
2177
|
|
2178 return gnu_call;
|
|
2179 }
|
|
2180
|
|
2181 /* Helper for build_call_alloc_dealloc, to build and return an allocator for
|
|
2182 DATA_SIZE bytes aimed at containing a DATA_TYPE object, using the default
|
|
2183 __gnat_malloc allocator. Honor DATA_TYPE alignments greater than what the
|
|
2184 latter offers. */
|
|
2185
|
|
2186 static inline tree
|
|
2187 maybe_wrap_malloc (tree data_size, tree data_type, Node_Id gnat_node)
|
|
2188 {
|
|
2189 /* When the DATA_TYPE alignment is stricter than what malloc offers
|
|
2190 (super-aligned case), we allocate an "aligning" wrapper type and return
|
|
2191 the address of its single data field with the malloc's return value
|
|
2192 stored just in front. */
|
|
2193
|
|
2194 unsigned int data_align = TYPE_ALIGN (data_type);
|
|
2195 unsigned int system_allocator_alignment
|
|
2196 = get_target_system_allocator_alignment () * BITS_PER_UNIT;
|
|
2197
|
|
2198 tree aligning_type
|
|
2199 = ((data_align > system_allocator_alignment)
|
|
2200 ? make_aligning_type (data_type, data_align, data_size,
|
|
2201 system_allocator_alignment,
|
|
2202 POINTER_SIZE / BITS_PER_UNIT,
|
|
2203 gnat_node)
|
|
2204 : NULL_TREE);
|
|
2205
|
|
2206 tree size_to_malloc
|
|
2207 = aligning_type ? TYPE_SIZE_UNIT (aligning_type) : data_size;
|
|
2208
|
|
2209 tree malloc_ptr = build_call_n_expr (malloc_decl, 1, size_to_malloc);
|
|
2210
|
|
2211 if (aligning_type)
|
|
2212 {
|
|
2213 /* Latch malloc's return value and get a pointer to the aligning field
|
|
2214 first. */
|
|
2215 tree storage_ptr = gnat_protect_expr (malloc_ptr);
|
|
2216
|
|
2217 tree aligning_record_addr
|
|
2218 = convert (build_pointer_type (aligning_type), storage_ptr);
|
|
2219
|
|
2220 tree aligning_record
|
|
2221 = build_unary_op (INDIRECT_REF, NULL_TREE, aligning_record_addr);
|
|
2222
|
|
2223 tree aligning_field
|
|
2224 = build_component_ref (aligning_record, TYPE_FIELDS (aligning_type),
|
|
2225 false);
|
|
2226
|
|
2227 tree aligning_field_addr
|
|
2228 = build_unary_op (ADDR_EXPR, NULL_TREE, aligning_field);
|
|
2229
|
|
2230 /* Then arrange to store the allocator's return value ahead
|
|
2231 and return. */
|
|
2232 tree storage_ptr_slot_addr
|
|
2233 = build_binary_op (POINTER_PLUS_EXPR, ptr_type_node,
|
|
2234 convert (ptr_type_node, aligning_field_addr),
|
|
2235 size_int (-(HOST_WIDE_INT) POINTER_SIZE
|
|
2236 / BITS_PER_UNIT));
|
|
2237
|
|
2238 tree storage_ptr_slot
|
|
2239 = build_unary_op (INDIRECT_REF, NULL_TREE,
|
|
2240 convert (build_pointer_type (ptr_type_node),
|
|
2241 storage_ptr_slot_addr));
|
|
2242
|
|
2243 return
|
|
2244 build2 (COMPOUND_EXPR, TREE_TYPE (aligning_field_addr),
|
|
2245 build_binary_op (INIT_EXPR, NULL_TREE,
|
|
2246 storage_ptr_slot, storage_ptr),
|
|
2247 aligning_field_addr);
|
|
2248 }
|
|
2249 else
|
|
2250 return malloc_ptr;
|
|
2251 }
|
|
2252
|
|
2253 /* Helper for build_call_alloc_dealloc, to release a DATA_TYPE object
|
|
2254 designated by DATA_PTR using the __gnat_free entry point. */
|
|
2255
|
|
2256 static inline tree
|
|
2257 maybe_wrap_free (tree data_ptr, tree data_type)
|
|
2258 {
|
|
2259 /* In the regular alignment case, we pass the data pointer straight to free.
|
|
2260 In the superaligned case, we need to retrieve the initial allocator
|
|
2261 return value, stored in front of the data block at allocation time. */
|
|
2262
|
|
2263 unsigned int data_align = TYPE_ALIGN (data_type);
|
|
2264 unsigned int system_allocator_alignment
|
|
2265 = get_target_system_allocator_alignment () * BITS_PER_UNIT;
|
|
2266
|
|
2267 tree free_ptr;
|
|
2268
|
|
2269 if (data_align > system_allocator_alignment)
|
|
2270 {
|
|
2271 /* DATA_FRONT_PTR (void *)
|
|
2272 = (void *)DATA_PTR - (void *)sizeof (void *)) */
|
|
2273 tree data_front_ptr
|
|
2274 = build_binary_op
|
|
2275 (POINTER_PLUS_EXPR, ptr_type_node,
|
|
2276 convert (ptr_type_node, data_ptr),
|
|
2277 size_int (-(HOST_WIDE_INT) POINTER_SIZE / BITS_PER_UNIT));
|
|
2278
|
|
2279 /* FREE_PTR (void *) = *(void **)DATA_FRONT_PTR */
|
|
2280 free_ptr
|
|
2281 = build_unary_op
|
|
2282 (INDIRECT_REF, NULL_TREE,
|
|
2283 convert (build_pointer_type (ptr_type_node), data_front_ptr));
|
|
2284 }
|
|
2285 else
|
|
2286 free_ptr = data_ptr;
|
|
2287
|
|
2288 return build_call_n_expr (free_decl, 1, free_ptr);
|
|
2289 }
|
|
2290
|
|
2291 /* Build a GCC tree to call an allocation or deallocation function.
|
|
2292 If GNU_OBJ is nonzero, it is an object to deallocate. Otherwise,
|
|
2293 generate an allocator.
|
|
2294
|
|
2295 GNU_SIZE is the number of bytes to allocate and GNU_TYPE is the contained
|
|
2296 object type, used to determine the to-be-honored address alignment.
|
|
2297 GNAT_PROC, if present, is a procedure to call and GNAT_POOL is the storage
|
|
2298 pool to use. If not present, malloc and free are used. GNAT_NODE is used
|
|
2299 to provide an error location for restriction violation messages. */
|
|
2300
|
|
2301 tree
|
|
2302 build_call_alloc_dealloc (tree gnu_obj, tree gnu_size, tree gnu_type,
|
|
2303 Entity_Id gnat_proc, Entity_Id gnat_pool,
|
|
2304 Node_Id gnat_node)
|
|
2305 {
|
|
2306 /* Explicit proc to call ? This one is assumed to deal with the type
|
|
2307 alignment constraints. */
|
|
2308 if (Present (gnat_proc))
|
|
2309 return build_call_alloc_dealloc_proc (gnu_obj, gnu_size, gnu_type,
|
|
2310 gnat_proc, gnat_pool);
|
|
2311
|
|
2312 /* Otherwise, object to "free" or "malloc" with possible special processing
|
|
2313 for alignments stricter than what the default allocator honors. */
|
|
2314 else if (gnu_obj)
|
|
2315 return maybe_wrap_free (gnu_obj, gnu_type);
|
|
2316 else
|
|
2317 {
|
|
2318 /* Assert that we no longer can be called with this special pool. */
|
|
2319 gcc_assert (gnat_pool != -1);
|
|
2320
|
|
2321 /* Check that we aren't violating the associated restriction. */
|
|
2322 if (!(Nkind (gnat_node) == N_Allocator && Comes_From_Source (gnat_node)))
|
|
2323 {
|
|
2324 Check_No_Implicit_Heap_Alloc (gnat_node);
|
|
2325 if (Has_Task (Etype (gnat_node)))
|
|
2326 Check_No_Implicit_Task_Alloc (gnat_node);
|
|
2327 if (Has_Protected (Etype (gnat_node)))
|
|
2328 Check_No_Implicit_Protected_Alloc (gnat_node);
|
|
2329 }
|
|
2330 return maybe_wrap_malloc (gnu_size, gnu_type, gnat_node);
|
|
2331 }
|
|
2332 }
|
|
2333
|
|
2334 /* Build a GCC tree that corresponds to allocating an object of TYPE whose
|
|
2335 initial value is INIT, if INIT is nonzero. Convert the expression to
|
|
2336 RESULT_TYPE, which must be some pointer type, and return the result.
|
|
2337
|
|
2338 GNAT_PROC and GNAT_POOL optionally give the procedure to call and
|
|
2339 the storage pool to use. GNAT_NODE is used to provide an error
|
|
2340 location for restriction violation messages. If IGNORE_INIT_TYPE is
|
|
2341 true, ignore the type of INIT for the purpose of determining the size;
|
|
2342 this will cause the maximum size to be allocated if TYPE is of
|
|
2343 self-referential size. */
|
|
2344
|
|
2345 tree
|
|
2346 build_allocator (tree type, tree init, tree result_type, Entity_Id gnat_proc,
|
|
2347 Entity_Id gnat_pool, Node_Id gnat_node, bool ignore_init_type)
|
|
2348 {
|
|
2349 tree size, storage, storage_deref, storage_init;
|
|
2350
|
|
2351 /* If the initializer, if present, is a NULL_EXPR, just return a new one. */
|
|
2352 if (init && TREE_CODE (init) == NULL_EXPR)
|
|
2353 return build1 (NULL_EXPR, result_type, TREE_OPERAND (init, 0));
|
|
2354
|
|
2355 /* If we are just annotating types, also return a NULL_EXPR. */
|
|
2356 else if (type_annotate_only)
|
|
2357 return build1 (NULL_EXPR, result_type,
|
|
2358 build_call_raise (CE_Range_Check_Failed, gnat_node,
|
|
2359 N_Raise_Constraint_Error));
|
|
2360
|
|
2361 /* If the initializer, if present, is a COND_EXPR, deal with each branch. */
|
|
2362 else if (init && TREE_CODE (init) == COND_EXPR)
|
|
2363 return build3 (COND_EXPR, result_type, TREE_OPERAND (init, 0),
|
|
2364 build_allocator (type, TREE_OPERAND (init, 1), result_type,
|
|
2365 gnat_proc, gnat_pool, gnat_node,
|
|
2366 ignore_init_type),
|
|
2367 build_allocator (type, TREE_OPERAND (init, 2), result_type,
|
|
2368 gnat_proc, gnat_pool, gnat_node,
|
|
2369 ignore_init_type));
|
|
2370
|
|
2371 /* If RESULT_TYPE is a fat or thin pointer, set SIZE to be the sum of the
|
|
2372 sizes of the object and its template. Allocate the whole thing and
|
|
2373 fill in the parts that are known. */
|
|
2374 else if (TYPE_IS_FAT_OR_THIN_POINTER_P (result_type))
|
|
2375 {
|
|
2376 tree storage_type
|
|
2377 = build_unc_object_type_from_ptr (result_type, type,
|
|
2378 get_identifier ("ALLOC"), false);
|
|
2379 tree template_type = TREE_TYPE (TYPE_FIELDS (storage_type));
|
|
2380 tree storage_ptr_type = build_pointer_type (storage_type);
|
|
2381
|
|
2382 size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_SIZE_UNIT (storage_type),
|
|
2383 init);
|
|
2384
|
|
2385 /* If the size overflows, pass -1 so Storage_Error will be raised. */
|
|
2386 if (TREE_CODE (size) == INTEGER_CST && !valid_constant_size_p (size))
|
|
2387 size = size_int (-1);
|
|
2388
|
|
2389 storage = build_call_alloc_dealloc (NULL_TREE, size, storage_type,
|
|
2390 gnat_proc, gnat_pool, gnat_node);
|
|
2391 storage = convert (storage_ptr_type, gnat_protect_expr (storage));
|
|
2392 storage_deref = build_unary_op (INDIRECT_REF, NULL_TREE, storage);
|
|
2393 TREE_THIS_NOTRAP (storage_deref) = 1;
|
|
2394
|
|
2395 /* If there is an initializing expression, then make a constructor for
|
|
2396 the entire object including the bounds and copy it into the object.
|
|
2397 If there is no initializing expression, just set the bounds. */
|
|
2398 if (init)
|
|
2399 {
|
|
2400 vec<constructor_elt, va_gc> *v;
|
|
2401 vec_alloc (v, 2);
|
|
2402
|
|
2403 CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (storage_type),
|
|
2404 build_template (template_type, type, init));
|
|
2405 CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (storage_type)),
|
|
2406 init);
|
|
2407 storage_init
|
|
2408 = build_binary_op (INIT_EXPR, NULL_TREE, storage_deref,
|
|
2409 gnat_build_constructor (storage_type, v));
|
|
2410 }
|
|
2411 else
|
|
2412 storage_init
|
|
2413 = build_binary_op (INIT_EXPR, NULL_TREE,
|
|
2414 build_component_ref (storage_deref,
|
|
2415 TYPE_FIELDS (storage_type),
|
|
2416 false),
|
|
2417 build_template (template_type, type, NULL_TREE));
|
|
2418
|
|
2419 return build2 (COMPOUND_EXPR, result_type,
|
|
2420 storage_init, convert (result_type, storage));
|
|
2421 }
|
|
2422
|
|
2423 size = TYPE_SIZE_UNIT (type);
|
|
2424
|
|
2425 /* If we have an initializing expression, see if its size is simpler
|
|
2426 than the size from the type. */
|
|
2427 if (!ignore_init_type && init && TYPE_SIZE_UNIT (TREE_TYPE (init))
|
|
2428 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (init))) == INTEGER_CST
|
|
2429 || CONTAINS_PLACEHOLDER_P (size)))
|
|
2430 size = TYPE_SIZE_UNIT (TREE_TYPE (init));
|
|
2431
|
|
2432 /* If the size is still self-referential, reference the initializing
|
|
2433 expression, if it is present. If not, this must have been a
|
|
2434 call to allocate a library-level object, in which case we use
|
|
2435 the maximum size. */
|
|
2436 if (CONTAINS_PLACEHOLDER_P (size))
|
|
2437 {
|
|
2438 if (!ignore_init_type && init)
|
|
2439 size = substitute_placeholder_in_expr (size, init);
|
|
2440 else
|
|
2441 size = max_size (size, true);
|
|
2442 }
|
|
2443
|
|
2444 /* If the size overflows, pass -1 so Storage_Error will be raised. */
|
|
2445 if (TREE_CODE (size) == INTEGER_CST && !valid_constant_size_p (size))
|
|
2446 size = size_int (-1);
|
|
2447
|
|
2448 storage = convert (result_type,
|
|
2449 build_call_alloc_dealloc (NULL_TREE, size, type,
|
|
2450 gnat_proc, gnat_pool,
|
|
2451 gnat_node));
|
|
2452
|
|
2453 /* If we have an initial value, protect the new address, assign the value
|
|
2454 and return the address with a COMPOUND_EXPR. */
|
|
2455 if (init)
|
|
2456 {
|
|
2457 storage = gnat_protect_expr (storage);
|
|
2458 storage_deref = build_unary_op (INDIRECT_REF, NULL_TREE, storage);
|
|
2459 TREE_THIS_NOTRAP (storage_deref) = 1;
|
|
2460 storage_init
|
|
2461 = build_binary_op (INIT_EXPR, NULL_TREE, storage_deref, init);
|
|
2462 return build2 (COMPOUND_EXPR, result_type, storage_init, storage);
|
|
2463 }
|
|
2464
|
|
2465 return storage;
|
|
2466 }
|
|
2467
|
|
2468 /* Indicate that we need to take the address of T and that it therefore
|
|
2469 should not be allocated in a register. Return true if successful. */
|
|
2470
|
|
2471 bool
|
|
2472 gnat_mark_addressable (tree t)
|
|
2473 {
|
|
2474 while (true)
|
|
2475 switch (TREE_CODE (t))
|
|
2476 {
|
|
2477 case ADDR_EXPR:
|
|
2478 case COMPONENT_REF:
|
|
2479 case ARRAY_REF:
|
|
2480 case ARRAY_RANGE_REF:
|
|
2481 case REALPART_EXPR:
|
|
2482 case IMAGPART_EXPR:
|
|
2483 case VIEW_CONVERT_EXPR:
|
|
2484 case NON_LVALUE_EXPR:
|
|
2485 CASE_CONVERT:
|
|
2486 t = TREE_OPERAND (t, 0);
|
|
2487 break;
|
|
2488
|
|
2489 case COMPOUND_EXPR:
|
|
2490 t = TREE_OPERAND (t, 1);
|
|
2491 break;
|
|
2492
|
|
2493 case CONSTRUCTOR:
|
|
2494 TREE_ADDRESSABLE (t) = 1;
|
|
2495 return true;
|
|
2496
|
|
2497 case VAR_DECL:
|
|
2498 case PARM_DECL:
|
|
2499 case RESULT_DECL:
|
|
2500 TREE_ADDRESSABLE (t) = 1;
|
|
2501 return true;
|
|
2502
|
|
2503 case FUNCTION_DECL:
|
|
2504 TREE_ADDRESSABLE (t) = 1;
|
|
2505 return true;
|
|
2506
|
|
2507 case CONST_DECL:
|
|
2508 return DECL_CONST_CORRESPONDING_VAR (t)
|
|
2509 && gnat_mark_addressable (DECL_CONST_CORRESPONDING_VAR (t));
|
|
2510
|
|
2511 default:
|
|
2512 return true;
|
|
2513 }
|
|
2514 }
|
|
2515
|
|
2516 /* Return true if EXP is a stable expression for the purpose of the functions
|
|
2517 below and, therefore, can be returned unmodified by them. We accept things
|
|
2518 that are actual constants or that have already been handled. */
|
|
2519
|
|
2520 static bool
|
|
2521 gnat_stable_expr_p (tree exp)
|
|
2522 {
|
|
2523 enum tree_code code = TREE_CODE (exp);
|
|
2524 return TREE_CONSTANT (exp) || code == NULL_EXPR || code == SAVE_EXPR;
|
|
2525 }
|
|
2526
|
|
2527 /* Save EXP for later use or reuse. This is equivalent to save_expr in tree.c
|
|
2528 but we know how to handle our own nodes. */
|
|
2529
|
|
2530 tree
|
|
2531 gnat_save_expr (tree exp)
|
|
2532 {
|
|
2533 tree type = TREE_TYPE (exp);
|
|
2534 enum tree_code code = TREE_CODE (exp);
|
|
2535
|
|
2536 if (gnat_stable_expr_p (exp))
|
|
2537 return exp;
|
|
2538
|
|
2539 if (code == UNCONSTRAINED_ARRAY_REF)
|
|
2540 {
|
|
2541 tree t = build1 (code, type, gnat_save_expr (TREE_OPERAND (exp, 0)));
|
|
2542 TREE_READONLY (t) = TYPE_READONLY (type);
|
|
2543 return t;
|
|
2544 }
|
|
2545
|
|
2546 /* If this is a COMPONENT_REF of a fat pointer, save the entire fat pointer.
|
|
2547 This may be more efficient, but will also allow us to more easily find
|
|
2548 the match for the PLACEHOLDER_EXPR. */
|
|
2549 if (code == COMPONENT_REF
|
|
2550 && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
|
|
2551 return build3 (code, type, gnat_save_expr (TREE_OPERAND (exp, 0)),
|
|
2552 TREE_OPERAND (exp, 1), NULL_TREE);
|
|
2553
|
|
2554 return save_expr (exp);
|
|
2555 }
|
|
2556
|
|
2557 /* Protect EXP for immediate reuse. This is a variant of gnat_save_expr that
|
|
2558 is optimized under the assumption that EXP's value doesn't change before
|
|
2559 its subsequent reuse(s) except through its potential reevaluation. */
|
|
2560
|
|
2561 tree
|
|
2562 gnat_protect_expr (tree exp)
|
|
2563 {
|
|
2564 tree type = TREE_TYPE (exp);
|
|
2565 enum tree_code code = TREE_CODE (exp);
|
|
2566
|
|
2567 if (gnat_stable_expr_p (exp))
|
|
2568 return exp;
|
|
2569
|
|
2570 /* If EXP has no side effects, we theoretically don't need to do anything.
|
|
2571 However, we may be recursively passed more and more complex expressions
|
|
2572 involving checks which will be reused multiple times and eventually be
|
|
2573 unshared for gimplification; in order to avoid a complexity explosion
|
|
2574 at that point, we protect any expressions more complex than a simple
|
|
2575 arithmetic expression. */
|
|
2576 if (!TREE_SIDE_EFFECTS (exp))
|
|
2577 {
|
|
2578 tree inner = skip_simple_arithmetic (exp);
|
|
2579 if (!EXPR_P (inner) || REFERENCE_CLASS_P (inner))
|
|
2580 return exp;
|
|
2581 }
|
|
2582
|
|
2583 /* If this is a conversion, protect what's inside the conversion. */
|
|
2584 if (code == NON_LVALUE_EXPR
|
|
2585 || CONVERT_EXPR_CODE_P (code)
|
|
2586 || code == VIEW_CONVERT_EXPR)
|
|
2587 return build1 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)));
|
|
2588
|
|
2589 /* If we're indirectly referencing something, we only need to protect the
|
|
2590 address since the data itself can't change in these situations. */
|
|
2591 if (code == INDIRECT_REF || code == UNCONSTRAINED_ARRAY_REF)
|
|
2592 {
|
|
2593 tree t = build1 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)));
|
|
2594 TREE_READONLY (t) = TYPE_READONLY (type);
|
|
2595 return t;
|
|
2596 }
|
|
2597
|
|
2598 /* Likewise if we're indirectly referencing part of something. */
|
|
2599 if (code == COMPONENT_REF
|
|
2600 && TREE_CODE (TREE_OPERAND (exp, 0)) == INDIRECT_REF)
|
|
2601 return build3 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)),
|
|
2602 TREE_OPERAND (exp, 1), NULL_TREE);
|
|
2603
|
|
2604 /* If this is a COMPONENT_REF of a fat pointer, save the entire fat pointer.
|
|
2605 This may be more efficient, but will also allow us to more easily find
|
|
2606 the match for the PLACEHOLDER_EXPR. */
|
|
2607 if (code == COMPONENT_REF
|
|
2608 && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
|
|
2609 return build3 (code, type, gnat_protect_expr (TREE_OPERAND (exp, 0)),
|
|
2610 TREE_OPERAND (exp, 1), NULL_TREE);
|
|
2611
|
|
2612 /* If this is a fat pointer or a scalar, just make a SAVE_EXPR. Likewise
|
|
2613 for a CALL_EXPR as large objects are returned via invisible reference
|
|
2614 in most ABIs so the temporary will directly be filled by the callee. */
|
|
2615 if (TYPE_IS_FAT_POINTER_P (type)
|
|
2616 || !AGGREGATE_TYPE_P (type)
|
|
2617 || code == CALL_EXPR)
|
|
2618 return save_expr (exp);
|
|
2619
|
|
2620 /* Otherwise reference, protect the address and dereference. */
|
|
2621 return
|
|
2622 build_unary_op (INDIRECT_REF, type,
|
|
2623 save_expr (build_unary_op (ADDR_EXPR, NULL_TREE, exp)));
|
|
2624 }
|
|
2625
|
|
2626 /* This is equivalent to stabilize_reference_1 in tree.c but we take an extra
|
|
2627 argument to force evaluation of everything. */
|
|
2628
|
|
2629 static tree
|
|
2630 gnat_stabilize_reference_1 (tree e, void *data)
|
|
2631 {
|
|
2632 const bool force = *(bool *)data;
|
|
2633 enum tree_code code = TREE_CODE (e);
|
|
2634 tree type = TREE_TYPE (e);
|
|
2635 tree result;
|
|
2636
|
|
2637 if (gnat_stable_expr_p (e))
|
|
2638 return e;
|
|
2639
|
|
2640 switch (TREE_CODE_CLASS (code))
|
|
2641 {
|
|
2642 case tcc_exceptional:
|
|
2643 case tcc_declaration:
|
|
2644 case tcc_comparison:
|
|
2645 case tcc_expression:
|
|
2646 case tcc_reference:
|
|
2647 case tcc_vl_exp:
|
|
2648 /* If this is a COMPONENT_REF of a fat pointer, save the entire
|
|
2649 fat pointer. This may be more efficient, but will also allow
|
|
2650 us to more easily find the match for the PLACEHOLDER_EXPR. */
|
|
2651 if (code == COMPONENT_REF
|
|
2652 && TYPE_IS_FAT_POINTER_P (TREE_TYPE (TREE_OPERAND (e, 0))))
|
|
2653 result
|
|
2654 = build3 (code, type,
|
|
2655 gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), data),
|
|
2656 TREE_OPERAND (e, 1), NULL_TREE);
|
|
2657 /* If the expression has side-effects, then encase it in a SAVE_EXPR
|
|
2658 so that it will only be evaluated once. */
|
|
2659 /* The tcc_reference and tcc_comparison classes could be handled as
|
|
2660 below, but it is generally faster to only evaluate them once. */
|
|
2661 else if (TREE_SIDE_EFFECTS (e) || force)
|
|
2662 return save_expr (e);
|
|
2663 else
|
|
2664 return e;
|
|
2665 break;
|
|
2666
|
|
2667 case tcc_binary:
|
|
2668 /* Recursively stabilize each operand. */
|
|
2669 result
|
|
2670 = build2 (code, type,
|
|
2671 gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), data),
|
|
2672 gnat_stabilize_reference_1 (TREE_OPERAND (e, 1), data));
|
|
2673 break;
|
|
2674
|
|
2675 case tcc_unary:
|
|
2676 /* Recursively stabilize each operand. */
|
|
2677 result
|
|
2678 = build1 (code, type,
|
|
2679 gnat_stabilize_reference_1 (TREE_OPERAND (e, 0), data));
|
|
2680 break;
|
|
2681
|
|
2682 default:
|
|
2683 gcc_unreachable ();
|
|
2684 }
|
|
2685
|
|
2686 TREE_READONLY (result) = TREE_READONLY (e);
|
|
2687 TREE_SIDE_EFFECTS (result) |= TREE_SIDE_EFFECTS (e);
|
|
2688 TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
|
|
2689
|
|
2690 return result;
|
|
2691 }
|
|
2692
|
|
2693 /* This is equivalent to stabilize_reference in tree.c but we know how to
|
|
2694 handle our own nodes and we take extra arguments. FORCE says whether to
|
|
2695 force evaluation of everything in REF. INIT is set to the first arm of
|
|
2696 a COMPOUND_EXPR present in REF, if any. */
|
|
2697
|
|
2698 tree
|
|
2699 gnat_stabilize_reference (tree ref, bool force, tree *init)
|
|
2700 {
|
|
2701 return
|
|
2702 gnat_rewrite_reference (ref, gnat_stabilize_reference_1, &force, init);
|
|
2703 }
|
|
2704
|
|
2705 /* Rewrite reference REF and call FUNC on each expression within REF in the
|
|
2706 process. DATA is passed unmodified to FUNC. INIT is set to the first
|
|
2707 arm of a COMPOUND_EXPR present in REF, if any. */
|
|
2708
|
|
2709 tree
|
|
2710 gnat_rewrite_reference (tree ref, rewrite_fn func, void *data, tree *init)
|
|
2711 {
|
|
2712 tree type = TREE_TYPE (ref);
|
|
2713 enum tree_code code = TREE_CODE (ref);
|
|
2714 tree result;
|
|
2715
|
|
2716 switch (code)
|
|
2717 {
|
|
2718 case CONST_DECL:
|
|
2719 case VAR_DECL:
|
|
2720 case PARM_DECL:
|
|
2721 case RESULT_DECL:
|
|
2722 /* No action is needed in this case. */
|
|
2723 return ref;
|
|
2724
|
|
2725 CASE_CONVERT:
|
|
2726 case FLOAT_EXPR:
|
|
2727 case FIX_TRUNC_EXPR:
|
|
2728 case REALPART_EXPR:
|
|
2729 case IMAGPART_EXPR:
|
|
2730 case VIEW_CONVERT_EXPR:
|
|
2731 result
|
|
2732 = build1 (code, type,
|
|
2733 gnat_rewrite_reference (TREE_OPERAND (ref, 0), func, data,
|
|
2734 init));
|
|
2735 break;
|
|
2736
|
|
2737 case INDIRECT_REF:
|
|
2738 case UNCONSTRAINED_ARRAY_REF:
|
|
2739 result = build1 (code, type, func (TREE_OPERAND (ref, 0), data));
|
|
2740 break;
|
|
2741
|
|
2742 case COMPONENT_REF:
|
|
2743 result = build3 (COMPONENT_REF, type,
|
|
2744 gnat_rewrite_reference (TREE_OPERAND (ref, 0), func,
|
|
2745 data, init),
|
|
2746 TREE_OPERAND (ref, 1), NULL_TREE);
|
|
2747 break;
|
|
2748
|
|
2749 case BIT_FIELD_REF:
|
|
2750 result = build3 (BIT_FIELD_REF, type,
|
|
2751 gnat_rewrite_reference (TREE_OPERAND (ref, 0), func,
|
|
2752 data, init),
|
|
2753 TREE_OPERAND (ref, 1), TREE_OPERAND (ref, 2));
|
|
2754 REF_REVERSE_STORAGE_ORDER (result) = REF_REVERSE_STORAGE_ORDER (ref);
|
|
2755 break;
|
|
2756
|
|
2757 case ARRAY_REF:
|
|
2758 case ARRAY_RANGE_REF:
|
|
2759 result
|
|
2760 = build4 (code, type,
|
|
2761 gnat_rewrite_reference (TREE_OPERAND (ref, 0), func, data,
|
|
2762 init),
|
|
2763 func (TREE_OPERAND (ref, 1), data),
|
|
2764 TREE_OPERAND (ref, 2), NULL_TREE);
|
|
2765 break;
|
|
2766
|
|
2767 case COMPOUND_EXPR:
|
|
2768 gcc_assert (!*init);
|
|
2769 *init = TREE_OPERAND (ref, 0);
|
|
2770 /* We expect only the pattern built in Call_to_gnu. */
|
|
2771 gcc_assert (DECL_P (TREE_OPERAND (ref, 1))
|
|
2772 || (TREE_CODE (TREE_OPERAND (ref, 1)) == COMPONENT_REF
|
|
2773 && DECL_P (TREE_OPERAND (TREE_OPERAND (ref, 1), 0))));
|
|
2774 return TREE_OPERAND (ref, 1);
|
|
2775
|
|
2776 case CALL_EXPR:
|
|
2777 {
|
|
2778 /* This can only be an atomic load. */
|
|
2779 gcc_assert (call_is_atomic_load (ref));
|
|
2780
|
|
2781 /* An atomic load is an INDIRECT_REF of its first argument. */
|
|
2782 tree t = CALL_EXPR_ARG (ref, 0);
|
|
2783 if (TREE_CODE (t) == NOP_EXPR)
|
|
2784 t = TREE_OPERAND (t, 0);
|
|
2785 if (TREE_CODE (t) == ADDR_EXPR)
|
|
2786 t = build1 (ADDR_EXPR, TREE_TYPE (t),
|
|
2787 gnat_rewrite_reference (TREE_OPERAND (t, 0), func, data,
|
|
2788 init));
|
|
2789 else
|
|
2790 t = func (t, data);
|
|
2791 t = fold_convert (TREE_TYPE (CALL_EXPR_ARG (ref, 0)), t);
|
|
2792
|
|
2793 result = build_call_expr (TREE_OPERAND (CALL_EXPR_FN (ref), 0), 2,
|
|
2794 t, CALL_EXPR_ARG (ref, 1));
|
|
2795 }
|
|
2796 break;
|
|
2797
|
|
2798 case ERROR_MARK:
|
|
2799 case NULL_EXPR:
|
|
2800 return ref;
|
|
2801
|
|
2802 default:
|
|
2803 gcc_unreachable ();
|
|
2804 }
|
|
2805
|
|
2806 /* TREE_THIS_VOLATILE and TREE_SIDE_EFFECTS set on the initial expression
|
|
2807 may not be sustained across some paths, such as the way via build1 for
|
|
2808 INDIRECT_REF. We reset those flags here in the general case, which is
|
|
2809 consistent with the GCC version of this routine.
|
|
2810
|
|
2811 Special care should be taken regarding TREE_SIDE_EFFECTS, because some
|
|
2812 paths introduce side-effects where there was none initially (e.g. if a
|
|
2813 SAVE_EXPR is built) and we also want to keep track of that. */
|
|
2814 TREE_READONLY (result) = TREE_READONLY (ref);
|
|
2815 TREE_SIDE_EFFECTS (result) |= TREE_SIDE_EFFECTS (ref);
|
|
2816 TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
|
|
2817
|
|
2818 if (code == INDIRECT_REF
|
|
2819 || code == UNCONSTRAINED_ARRAY_REF
|
|
2820 || code == ARRAY_REF
|
|
2821 || code == ARRAY_RANGE_REF)
|
|
2822 TREE_THIS_NOTRAP (result) = TREE_THIS_NOTRAP (ref);
|
|
2823
|
|
2824 return result;
|
|
2825 }
|
|
2826
|
|
2827 /* This is equivalent to get_inner_reference in expr.c but it returns the
|
|
2828 ultimate containing object only if the reference (lvalue) is constant,
|
|
2829 i.e. if it doesn't depend on the context in which it is evaluated. */
|
|
2830
|
|
2831 tree
|
|
2832 get_inner_constant_reference (tree exp)
|
|
2833 {
|
|
2834 while (true)
|
|
2835 {
|
|
2836 switch (TREE_CODE (exp))
|
|
2837 {
|
|
2838 case BIT_FIELD_REF:
|
|
2839 break;
|
|
2840
|
|
2841 case COMPONENT_REF:
|
|
2842 if (!TREE_CONSTANT (DECL_FIELD_OFFSET (TREE_OPERAND (exp, 1))))
|
|
2843 return NULL_TREE;
|
|
2844 break;
|
|
2845
|
|
2846 case ARRAY_REF:
|
|
2847 case ARRAY_RANGE_REF:
|
|
2848 {
|
|
2849 if (TREE_OPERAND (exp, 2))
|
|
2850 return NULL_TREE;
|
|
2851
|
|
2852 tree array_type = TREE_TYPE (TREE_OPERAND (exp, 0));
|
|
2853 if (!TREE_CONSTANT (TREE_OPERAND (exp, 1))
|
|
2854 || !TREE_CONSTANT (TYPE_MIN_VALUE (TYPE_DOMAIN (array_type)))
|
|
2855 || !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (array_type))))
|
|
2856 return NULL_TREE;
|
|
2857 }
|
|
2858 break;
|
|
2859
|
|
2860 case REALPART_EXPR:
|
|
2861 case IMAGPART_EXPR:
|
|
2862 case VIEW_CONVERT_EXPR:
|
|
2863 break;
|
|
2864
|
|
2865 default:
|
|
2866 goto done;
|
|
2867 }
|
|
2868
|
|
2869 exp = TREE_OPERAND (exp, 0);
|
|
2870 }
|
|
2871
|
|
2872 done:
|
|
2873 return exp;
|
|
2874 }
|
|
2875
|
|
2876 /* Return true if EXPR is the addition or the subtraction of a constant and,
|
|
2877 if so, set *ADD to the addend, *CST to the constant and *MINUS_P to true
|
|
2878 if this is a subtraction. */
|
|
2879
|
|
2880 bool
|
|
2881 is_simple_additive_expression (tree expr, tree *add, tree *cst, bool *minus_p)
|
|
2882 {
|
|
2883 /* Skip overflow checks. */
|
|
2884 if (TREE_CODE (expr) == COND_EXPR
|
|
2885 && TREE_CODE (COND_EXPR_THEN (expr)) == COMPOUND_EXPR
|
|
2886 && TREE_CODE (TREE_OPERAND (COND_EXPR_THEN (expr), 0)) == CALL_EXPR
|
|
2887 && get_callee_fndecl (TREE_OPERAND (COND_EXPR_THEN (expr), 0))
|
|
2888 == gnat_raise_decls[CE_Overflow_Check_Failed])
|
|
2889 expr = COND_EXPR_ELSE (expr);
|
|
2890
|
|
2891 if (TREE_CODE (expr) == PLUS_EXPR)
|
|
2892 {
|
|
2893 if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
|
|
2894 {
|
|
2895 *add = TREE_OPERAND (expr, 1);
|
|
2896 *cst = TREE_OPERAND (expr, 0);
|
|
2897 *minus_p = false;
|
|
2898 return true;
|
|
2899 }
|
|
2900 else if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
|
|
2901 {
|
|
2902 *add = TREE_OPERAND (expr, 0);
|
|
2903 *cst = TREE_OPERAND (expr, 1);
|
|
2904 *minus_p = false;
|
|
2905 return true;
|
|
2906 }
|
|
2907 }
|
|
2908 else if (TREE_CODE (expr) == MINUS_EXPR)
|
|
2909 {
|
|
2910 if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
|
|
2911 {
|
|
2912 *add = TREE_OPERAND (expr, 0);
|
|
2913 *cst = TREE_OPERAND (expr, 1);
|
|
2914 *minus_p = true;
|
|
2915 return true;
|
|
2916 }
|
|
2917 }
|
|
2918
|
|
2919 return false;
|
|
2920 }
|
|
2921
|
|
2922 /* If EXPR is an expression that is invariant in the current function, in the
|
|
2923 sense that it can be evaluated anywhere in the function and any number of
|
|
2924 times, return EXPR or an equivalent expression. Otherwise return NULL. */
|
|
2925
|
|
2926 tree
|
|
2927 gnat_invariant_expr (tree expr)
|
|
2928 {
|
|
2929 const tree type = TREE_TYPE (expr);
|
|
2930 tree add, cst;
|
|
2931 bool minus_p;
|
|
2932
|
|
2933 expr = remove_conversions (expr, false);
|
|
2934
|
|
2935 /* Look through temporaries created to capture values. */
|
|
2936 while ((TREE_CODE (expr) == CONST_DECL
|
|
2937 || (TREE_CODE (expr) == VAR_DECL && TREE_READONLY (expr)))
|
|
2938 && decl_function_context (expr) == current_function_decl
|
|
2939 && DECL_INITIAL (expr))
|
|
2940 {
|
|
2941 expr = DECL_INITIAL (expr);
|
|
2942 /* Look into CONSTRUCTORs built to initialize padded types. */
|
|
2943 if (TYPE_IS_PADDING_P (TREE_TYPE (expr)))
|
|
2944 expr = convert (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (expr))), expr);
|
|
2945 expr = remove_conversions (expr, false);
|
|
2946 }
|
|
2947
|
|
2948 /* We are only interested in scalar types at the moment and, even if we may
|
|
2949 have gone through padding types in the above loop, we must be back to a
|
|
2950 scalar value at this point. */
|
|
2951 if (AGGREGATE_TYPE_P (TREE_TYPE (expr)))
|
|
2952 return NULL_TREE;
|
|
2953
|
|
2954 if (TREE_CONSTANT (expr))
|
|
2955 return fold_convert (type, expr);
|
|
2956
|
|
2957 /* Deal with addition or subtraction of constants. */
|
|
2958 if (is_simple_additive_expression (expr, &add, &cst, &minus_p))
|
|
2959 {
|
|
2960 add = gnat_invariant_expr (add);
|
|
2961 if (add)
|
|
2962 return
|
|
2963 fold_build2 (minus_p ? MINUS_EXPR : PLUS_EXPR, type,
|
|
2964 fold_convert (type, add), fold_convert (type, cst));
|
|
2965 else
|
|
2966 return NULL_TREE;
|
|
2967 }
|
|
2968
|
|
2969 bool invariant_p = false;
|
|
2970 tree t = expr;
|
|
2971
|
|
2972 while (true)
|
|
2973 {
|
|
2974 switch (TREE_CODE (t))
|
|
2975 {
|
|
2976 case COMPONENT_REF:
|
|
2977 invariant_p |= DECL_INVARIANT_P (TREE_OPERAND (t, 1));
|
|
2978 break;
|
|
2979
|
|
2980 case ARRAY_REF:
|
|
2981 case ARRAY_RANGE_REF:
|
|
2982 if (!TREE_CONSTANT (TREE_OPERAND (t, 1)) || TREE_OPERAND (t, 2))
|
|
2983 return NULL_TREE;
|
|
2984 break;
|
|
2985
|
|
2986 case BIT_FIELD_REF:
|
|
2987 case REALPART_EXPR:
|
|
2988 case IMAGPART_EXPR:
|
|
2989 case VIEW_CONVERT_EXPR:
|
|
2990 CASE_CONVERT:
|
|
2991 break;
|
|
2992
|
|
2993 case INDIRECT_REF:
|
|
2994 if ((!invariant_p && !TREE_READONLY (t)) || TREE_SIDE_EFFECTS (t))
|
|
2995 return NULL_TREE;
|
|
2996 invariant_p = false;
|
|
2997 break;
|
|
2998
|
|
2999 default:
|
|
3000 goto object;
|
|
3001 }
|
|
3002
|
|
3003 t = TREE_OPERAND (t, 0);
|
|
3004 }
|
|
3005
|
|
3006 object:
|
|
3007 if (TREE_SIDE_EFFECTS (t))
|
|
3008 return NULL_TREE;
|
|
3009
|
|
3010 if (TREE_CODE (t) == CONST_DECL
|
|
3011 && (DECL_EXTERNAL (t)
|
|
3012 || decl_function_context (t) != current_function_decl))
|
|
3013 return fold_convert (type, expr);
|
|
3014
|
|
3015 if (!invariant_p && !TREE_READONLY (t))
|
|
3016 return NULL_TREE;
|
|
3017
|
|
3018 if (TREE_CODE (t) == PARM_DECL)
|
|
3019 return fold_convert (type, expr);
|
|
3020
|
|
3021 if (TREE_CODE (t) == VAR_DECL
|
|
3022 && (DECL_EXTERNAL (t)
|
|
3023 || decl_function_context (t) != current_function_decl))
|
|
3024 return fold_convert (type, expr);
|
|
3025
|
|
3026 return NULL_TREE;
|
|
3027 }
|