Mercurial > hg > CbC > CbC_gcc
comparison gcc/tree-vrp.c @ 132:d34655255c78
update gcc-8.2
author | mir3636 |
---|---|
date | Thu, 25 Oct 2018 10:21:07 +0900 |
parents | 84e7813d76e9 |
children | 1830386684a0 |
comparison
equal
deleted
inserted
replaced
130:e108057fa461 | 132:d34655255c78 |
---|---|
1 /* Support routines for Value Range Propagation (VRP). | 1 /* Support routines for Value Range Propagation (VRP). |
2 Copyright (C) 2005-2017 Free Software Foundation, Inc. | 2 Copyright (C) 2005-2018 Free Software Foundation, Inc. |
3 Contributed by Diego Novillo <dnovillo@redhat.com>. | 3 Contributed by Diego Novillo <dnovillo@redhat.com>. |
4 | 4 |
5 This file is part of GCC. | 5 This file is part of GCC. |
6 | 6 |
7 GCC is free software; you can redistribute it and/or modify | 7 GCC is free software; you can redistribute it and/or modify |
40 #include "gimple-fold.h" | 40 #include "gimple-fold.h" |
41 #include "tree-eh.h" | 41 #include "tree-eh.h" |
42 #include "gimple-iterator.h" | 42 #include "gimple-iterator.h" |
43 #include "gimple-walk.h" | 43 #include "gimple-walk.h" |
44 #include "tree-cfg.h" | 44 #include "tree-cfg.h" |
45 #include "tree-dfa.h" | |
45 #include "tree-ssa-loop-manip.h" | 46 #include "tree-ssa-loop-manip.h" |
46 #include "tree-ssa-loop-niter.h" | 47 #include "tree-ssa-loop-niter.h" |
47 #include "tree-ssa-loop.h" | 48 #include "tree-ssa-loop.h" |
48 #include "tree-into-ssa.h" | 49 #include "tree-into-ssa.h" |
49 #include "tree-ssa.h" | 50 #include "tree-ssa.h" |
62 #include "alloc-pool.h" | 63 #include "alloc-pool.h" |
63 #include "domwalk.h" | 64 #include "domwalk.h" |
64 #include "tree-cfgcleanup.h" | 65 #include "tree-cfgcleanup.h" |
65 #include "stringpool.h" | 66 #include "stringpool.h" |
66 #include "attribs.h" | 67 #include "attribs.h" |
67 | 68 #include "vr-values.h" |
68 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } | 69 #include "builtins.h" |
69 | 70 #include "wide-int-range.h" |
70 /* Allocation pools for tree-vrp allocations. */ | |
71 static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges"); | |
72 static bitmap_obstack vrp_equiv_obstack; | |
73 | 71 |
74 /* Set of SSA names found live during the RPO traversal of the function | 72 /* Set of SSA names found live during the RPO traversal of the function |
75 for still active basic-blocks. */ | 73 for still active basic-blocks. */ |
76 static sbitmap *live; | 74 static sbitmap *live; |
77 | 75 |
76 /* Initialize value_range. */ | |
77 | |
78 void | |
79 value_range::set (enum value_range_kind kind, tree min, tree max, | |
80 bitmap equiv) | |
81 { | |
82 m_kind = kind; | |
83 m_min = min; | |
84 m_max = max; | |
85 | |
86 /* Since updating the equivalence set involves deep copying the | |
87 bitmaps, only do it if absolutely necessary. | |
88 | |
89 All equivalence bitmaps are allocated from the same obstack. So | |
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */ | |
91 if (m_equiv == NULL | |
92 && equiv != NULL) | |
93 m_equiv = BITMAP_ALLOC (equiv->obstack); | |
94 | |
95 if (equiv != m_equiv) | |
96 { | |
97 if (equiv && !bitmap_empty_p (equiv)) | |
98 bitmap_copy (m_equiv, equiv); | |
99 else | |
100 bitmap_clear (m_equiv); | |
101 } | |
102 if (flag_checking) | |
103 check (); | |
104 } | |
105 | |
106 value_range::value_range (value_range_kind kind, tree min, tree max, | |
107 bitmap equiv) | |
108 { | |
109 m_equiv = NULL; | |
110 set (kind, min, max, equiv); | |
111 } | |
112 | |
113 /* Like above, but keep the equivalences intact. */ | |
114 | |
115 void | |
116 value_range::update (value_range_kind kind, tree min, tree max) | |
117 { | |
118 set (kind, min, max, m_equiv); | |
119 } | |
120 | |
121 /* Copy value_range in FROM into THIS while avoiding bitmap sharing. | |
122 | |
123 Note: The code that avoids the bitmap sharing looks at the existing | |
124 this->m_equiv, so this function cannot be used to initalize an | |
125 object. Use the constructors for initialization. */ | |
126 | |
127 void | |
128 value_range::deep_copy (const value_range *from) | |
129 { | |
130 set (from->m_kind, from->min (), from->max (), from->m_equiv); | |
131 } | |
132 | |
133 /* Check the validity of the range. */ | |
134 | |
135 void | |
136 value_range::check () | |
137 { | |
138 switch (m_kind) | |
139 { | |
140 case VR_RANGE: | |
141 case VR_ANTI_RANGE: | |
142 { | |
143 int cmp; | |
144 | |
145 gcc_assert (m_min && m_max); | |
146 | |
147 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max)); | |
148 | |
149 /* Creating ~[-MIN, +MAX] is stupid because that would be | |
150 the empty set. */ | |
151 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE) | |
152 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max)); | |
153 | |
154 cmp = compare_values (m_min, m_max); | |
155 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
156 break; | |
157 } | |
158 case VR_UNDEFINED: | |
159 case VR_VARYING: | |
160 gcc_assert (!m_min && !m_max); | |
161 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv)); | |
162 break; | |
163 default: | |
164 gcc_unreachable (); | |
165 } | |
166 } | |
167 | |
168 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if | |
169 IGNORE_EQUIVS is TRUE. */ | |
170 | |
171 bool | |
172 value_range::equal_p (const value_range &other, bool ignore_equivs) const | |
173 { | |
174 return (m_kind == other.m_kind | |
175 && vrp_operand_equal_p (m_min, other.m_min) | |
176 && vrp_operand_equal_p (m_max, other.m_max) | |
177 && (ignore_equivs | |
178 || vrp_bitmap_equal_p (m_equiv, other.m_equiv))); | |
179 } | |
180 | |
181 /* Return equality while ignoring equivalence bitmap. */ | |
182 | |
183 bool | |
184 value_range::ignore_equivs_equal_p (const value_range &other) const | |
185 { | |
186 return equal_p (other, /*ignore_equivs=*/true); | |
187 } | |
188 | |
189 bool | |
190 value_range::operator== (const value_range &other) const | |
191 { | |
192 return equal_p (other, /*ignore_equivs=*/false); | |
193 } | |
194 | |
195 bool | |
196 value_range::operator!= (const value_range &other) const | |
197 { | |
198 return !(*this == other); | |
199 } | |
200 | |
201 /* Return TRUE if this is a symbolic range. */ | |
202 | |
203 bool | |
204 value_range::symbolic_p () const | |
205 { | |
206 return (!varying_p () | |
207 && !undefined_p () | |
208 && (!is_gimple_min_invariant (m_min) | |
209 || !is_gimple_min_invariant (m_max))); | |
210 } | |
211 | |
212 /* NOTE: This is not the inverse of symbolic_p because the range | |
213 could also be varying or undefined. Ideally they should be inverse | |
214 of each other, with varying only applying to symbolics. Varying of | |
215 constants would be represented as [-MIN, +MAX]. */ | |
216 | |
217 bool | |
218 value_range::constant_p () const | |
219 { | |
220 return (!varying_p () | |
221 && !undefined_p () | |
222 && TREE_CODE (m_min) == INTEGER_CST | |
223 && TREE_CODE (m_max) == INTEGER_CST); | |
224 } | |
225 | |
226 void | |
227 value_range::set_undefined () | |
228 { | |
229 equiv_clear (); | |
230 *this = value_range (VR_UNDEFINED, NULL, NULL, NULL); | |
231 } | |
232 | |
233 void | |
234 value_range::set_varying () | |
235 { | |
236 equiv_clear (); | |
237 *this = value_range (VR_VARYING, NULL, NULL, NULL); | |
238 } | |
239 | |
240 /* Return TRUE if it is possible that range contains VAL. */ | |
241 | |
242 bool | |
243 value_range::may_contain_p (tree val) const | |
244 { | |
245 if (varying_p ()) | |
246 return true; | |
247 | |
248 if (undefined_p ()) | |
249 return true; | |
250 | |
251 if (m_kind == VR_ANTI_RANGE) | |
252 { | |
253 int res = value_inside_range (val, m_min, m_max); | |
254 return res == 0 || res == -2; | |
255 } | |
256 return value_inside_range (val, m_min, m_max) != 0; | |
257 } | |
258 | |
259 void | |
260 value_range::equiv_clear () | |
261 { | |
262 if (m_equiv) | |
263 bitmap_clear (m_equiv); | |
264 } | |
265 | |
266 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence | |
267 bitmap. If no equivalence table has been created, OBSTACK is the | |
268 obstack to use (NULL for the default obstack). | |
269 | |
270 This is the central point where equivalence processing can be | |
271 turned on/off. */ | |
272 | |
273 void | |
274 value_range::equiv_add (const_tree var, | |
275 const value_range *var_vr, | |
276 bitmap_obstack *obstack) | |
277 { | |
278 if (!m_equiv) | |
279 m_equiv = BITMAP_ALLOC (obstack); | |
280 unsigned ver = SSA_NAME_VERSION (var); | |
281 bitmap_set_bit (m_equiv, ver); | |
282 if (var_vr && var_vr->m_equiv) | |
283 bitmap_ior_into (m_equiv, var_vr->m_equiv); | |
284 } | |
285 | |
286 /* If range is a singleton, place it in RESULT and return TRUE. | |
287 Note: A singleton can be any gimple invariant, not just constants. | |
288 So, [&x, &x] counts as a singleton. */ | |
289 | |
290 bool | |
291 value_range::singleton_p (tree *result) const | |
292 { | |
293 if (m_kind == VR_RANGE | |
294 && vrp_operand_equal_p (m_min, m_max) | |
295 && is_gimple_min_invariant (m_min)) | |
296 { | |
297 if (result) | |
298 *result = m_min; | |
299 return true; | |
300 } | |
301 return false; | |
302 } | |
303 | |
304 tree | |
305 value_range::type () const | |
306 { | |
307 /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are | |
308 known to have non-zero min/max. */ | |
309 gcc_assert (m_min); | |
310 return TREE_TYPE (m_min); | |
311 } | |
312 | |
313 /* Dump value range to FILE. */ | |
314 | |
315 void | |
316 value_range::dump (FILE *file) const | |
317 { | |
318 if (undefined_p ()) | |
319 fprintf (file, "UNDEFINED"); | |
320 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE) | |
321 { | |
322 tree type = TREE_TYPE (min ()); | |
323 | |
324 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : ""); | |
325 | |
326 if (INTEGRAL_TYPE_P (type) | |
327 && !TYPE_UNSIGNED (type) | |
328 && vrp_val_is_min (min ())) | |
329 fprintf (file, "-INF"); | |
330 else | |
331 print_generic_expr (file, min ()); | |
332 | |
333 fprintf (file, ", "); | |
334 | |
335 if (INTEGRAL_TYPE_P (type) | |
336 && vrp_val_is_max (max ())) | |
337 fprintf (file, "+INF"); | |
338 else | |
339 print_generic_expr (file, max ()); | |
340 | |
341 fprintf (file, "]"); | |
342 | |
343 if (m_equiv) | |
344 { | |
345 bitmap_iterator bi; | |
346 unsigned i, c = 0; | |
347 | |
348 fprintf (file, " EQUIVALENCES: { "); | |
349 | |
350 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi) | |
351 { | |
352 print_generic_expr (file, ssa_name (i)); | |
353 fprintf (file, " "); | |
354 c++; | |
355 } | |
356 | |
357 fprintf (file, "} (%u elements)", c); | |
358 } | |
359 } | |
360 else if (varying_p ()) | |
361 fprintf (file, "VARYING"); | |
362 else | |
363 fprintf (file, "INVALID RANGE"); | |
364 } | |
365 | |
366 void | |
367 value_range::dump () const | |
368 { | |
369 dump_value_range (stderr, this); | |
370 fprintf (stderr, "\n"); | |
371 } | |
372 | |
78 /* Return true if the SSA name NAME is live on the edge E. */ | 373 /* Return true if the SSA name NAME is live on the edge E. */ |
79 | 374 |
80 static bool | 375 static bool |
81 live_on_edge (edge e, tree name) | 376 live_on_edge (edge e, tree name) |
82 { | 377 { |
83 return (live[e->dest->index] | 378 return (live[e->dest->index] |
84 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); | 379 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |
85 } | 380 } |
86 | |
87 /* Local functions. */ | |
88 static int compare_values (tree val1, tree val2); | |
89 static int compare_values_warnv (tree val1, tree val2, bool *); | |
90 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, | |
91 tree, tree, bool, bool *, | |
92 bool *); | |
93 | |
94 struct assert_info | |
95 { | |
96 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ | |
97 enum tree_code comp_code; | |
98 | |
99 /* Name to register the assert for. */ | |
100 tree name; | |
101 | |
102 /* Value being compared against. */ | |
103 tree val; | |
104 | |
105 /* Expression to compare. */ | |
106 tree expr; | |
107 }; | |
108 | 381 |
109 /* Location information for ASSERT_EXPRs. Each instance of this | 382 /* Location information for ASSERT_EXPRs. Each instance of this |
110 structure describes an ASSERT_EXPR for an SSA name. Since a single | 383 structure describes an ASSERT_EXPR for an SSA name. Since a single |
111 SSA name may have more than one assertion associated with it, these | 384 SSA name may have more than one assertion associated with it, these |
112 locations are kept in a linked list attached to the corresponding | 385 locations are kept in a linked list attached to the corresponding |
143 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] | 416 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] |
144 holds a list of ASSERT_LOCUS_T nodes that describe where | 417 holds a list of ASSERT_LOCUS_T nodes that describe where |
145 ASSERT_EXPRs for SSA name N_I should be inserted. */ | 418 ASSERT_EXPRs for SSA name N_I should be inserted. */ |
146 static assert_locus **asserts_for; | 419 static assert_locus **asserts_for; |
147 | 420 |
148 /* Value range array. After propagation, VR_VALUE[I] holds the range | |
149 of values that SSA name N_I may take. */ | |
150 static unsigned num_vr_values; | |
151 static value_range **vr_value; | |
152 static bool values_propagated; | |
153 | |
154 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the | |
155 number of executable edges we saw the last time we visited the | |
156 node. */ | |
157 static int *vr_phi_edge_counts; | |
158 | |
159 struct switch_update { | |
160 gswitch *stmt; | |
161 tree vec; | |
162 }; | |
163 | |
164 static vec<edge> to_remove_edges; | |
165 static vec<switch_update> to_update_switch_stmts; | |
166 | |
167 | |
168 /* Return the maximum value for TYPE. */ | 421 /* Return the maximum value for TYPE. */ |
169 | 422 |
170 static inline tree | 423 tree |
171 vrp_val_max (const_tree type) | 424 vrp_val_max (const_tree type) |
172 { | 425 { |
173 if (!INTEGRAL_TYPE_P (type)) | 426 if (!INTEGRAL_TYPE_P (type)) |
174 return NULL_TREE; | 427 return NULL_TREE; |
175 | 428 |
176 return TYPE_MAX_VALUE (type); | 429 return TYPE_MAX_VALUE (type); |
177 } | 430 } |
178 | 431 |
179 /* Return the minimum value for TYPE. */ | 432 /* Return the minimum value for TYPE. */ |
180 | 433 |
181 static inline tree | 434 tree |
182 vrp_val_min (const_tree type) | 435 vrp_val_min (const_tree type) |
183 { | 436 { |
184 if (!INTEGRAL_TYPE_P (type)) | 437 if (!INTEGRAL_TYPE_P (type)) |
185 return NULL_TREE; | 438 return NULL_TREE; |
186 | 439 |
190 /* Return whether VAL is equal to the maximum value of its type. | 443 /* Return whether VAL is equal to the maximum value of its type. |
191 We can't do a simple equality comparison with TYPE_MAX_VALUE because | 444 We can't do a simple equality comparison with TYPE_MAX_VALUE because |
192 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE | 445 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE |
193 is not == to the integer constant with the same value in the type. */ | 446 is not == to the integer constant with the same value in the type. */ |
194 | 447 |
195 static inline bool | 448 bool |
196 vrp_val_is_max (const_tree val) | 449 vrp_val_is_max (const_tree val) |
197 { | 450 { |
198 tree type_max = vrp_val_max (TREE_TYPE (val)); | 451 tree type_max = vrp_val_max (TREE_TYPE (val)); |
199 return (val == type_max | 452 return (val == type_max |
200 || (type_max != NULL_TREE | 453 || (type_max != NULL_TREE |
201 && operand_equal_p (val, type_max, 0))); | 454 && operand_equal_p (val, type_max, 0))); |
202 } | 455 } |
203 | 456 |
204 /* Return whether VAL is equal to the minimum value of its type. */ | 457 /* Return whether VAL is equal to the minimum value of its type. */ |
205 | 458 |
206 static inline bool | 459 bool |
207 vrp_val_is_min (const_tree val) | 460 vrp_val_is_min (const_tree val) |
208 { | 461 { |
209 tree type_min = vrp_val_min (TREE_TYPE (val)); | 462 tree type_min = vrp_val_min (TREE_TYPE (val)); |
210 return (val == type_min | 463 return (val == type_min |
211 || (type_min != NULL_TREE | 464 || (type_min != NULL_TREE |
212 && operand_equal_p (val, type_min, 0))); | 465 && operand_equal_p (val, type_min, 0))); |
213 } | 466 } |
214 | 467 |
468 /* VR_TYPE describes a range with mininum value *MIN and maximum | |
469 value *MAX. Restrict the range to the set of values that have | |
470 no bits set outside NONZERO_BITS. Update *MIN and *MAX and | |
471 return the new range type. | |
472 | |
473 SGN gives the sign of the values described by the range. */ | |
474 | |
475 enum value_range_kind | |
476 intersect_range_with_nonzero_bits (enum value_range_kind vr_type, | |
477 wide_int *min, wide_int *max, | |
478 const wide_int &nonzero_bits, | |
479 signop sgn) | |
480 { | |
481 if (vr_type == VR_ANTI_RANGE) | |
482 { | |
483 /* The VR_ANTI_RANGE is equivalent to the union of the ranges | |
484 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS | |
485 to create an inclusive upper bound for A and an inclusive lower | |
486 bound for B. */ | |
487 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits); | |
488 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits); | |
489 | |
490 /* If the calculation of A_MAX wrapped, A is effectively empty | |
491 and A_MAX is the highest value that satisfies NONZERO_BITS. | |
492 Likewise if the calculation of B_MIN wrapped, B is effectively | |
493 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */ | |
494 bool a_empty = wi::ge_p (a_max, *min, sgn); | |
495 bool b_empty = wi::le_p (b_min, *max, sgn); | |
496 | |
497 /* If both A and B are empty, there are no valid values. */ | |
498 if (a_empty && b_empty) | |
499 return VR_UNDEFINED; | |
500 | |
501 /* If exactly one of A or B is empty, return a VR_RANGE for the | |
502 other one. */ | |
503 if (a_empty || b_empty) | |
504 { | |
505 *min = b_min; | |
506 *max = a_max; | |
507 gcc_checking_assert (wi::le_p (*min, *max, sgn)); | |
508 return VR_RANGE; | |
509 } | |
510 | |
511 /* Update the VR_ANTI_RANGE bounds. */ | |
512 *min = a_max + 1; | |
513 *max = b_min - 1; | |
514 gcc_checking_assert (wi::le_p (*min, *max, sgn)); | |
515 | |
516 /* Now check whether the excluded range includes any values that | |
517 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */ | |
518 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min) | |
519 { | |
520 unsigned int precision = min->get_precision (); | |
521 *min = wi::min_value (precision, sgn); | |
522 *max = wi::max_value (precision, sgn); | |
523 vr_type = VR_RANGE; | |
524 } | |
525 } | |
526 if (vr_type == VR_RANGE) | |
527 { | |
528 *max = wi::round_down_for_mask (*max, nonzero_bits); | |
529 | |
530 /* Check that the range contains at least one valid value. */ | |
531 if (wi::gt_p (*min, *max, sgn)) | |
532 return VR_UNDEFINED; | |
533 | |
534 *min = wi::round_up_for_mask (*min, nonzero_bits); | |
535 gcc_checking_assert (wi::le_p (*min, *max, sgn)); | |
536 } | |
537 return vr_type; | |
538 } | |
215 | 539 |
216 /* Set value range VR to VR_UNDEFINED. */ | 540 /* Set value range VR to VR_UNDEFINED. */ |
217 | 541 |
218 static inline void | 542 static inline void |
219 set_value_range_to_undefined (value_range *vr) | 543 set_value_range_to_undefined (value_range *vr) |
220 { | 544 { |
221 vr->type = VR_UNDEFINED; | 545 vr->set_undefined (); |
222 vr->min = vr->max = NULL_TREE; | 546 } |
223 if (vr->equiv) | |
224 bitmap_clear (vr->equiv); | |
225 } | |
226 | |
227 | 547 |
228 /* Set value range VR to VR_VARYING. */ | 548 /* Set value range VR to VR_VARYING. */ |
229 | 549 |
230 static inline void | 550 void |
231 set_value_range_to_varying (value_range *vr) | 551 set_value_range_to_varying (value_range *vr) |
232 { | 552 { |
233 vr->type = VR_VARYING; | 553 vr->set_varying (); |
234 vr->min = vr->max = NULL_TREE; | 554 } |
235 if (vr->equiv) | |
236 bitmap_clear (vr->equiv); | |
237 } | |
238 | |
239 | 555 |
240 /* Set value range VR to {T, MIN, MAX, EQUIV}. */ | 556 /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
241 | 557 |
242 static void | 558 void |
243 set_value_range (value_range *vr, enum value_range_type t, tree min, | 559 set_value_range (value_range *vr, enum value_range_kind kind, |
244 tree max, bitmap equiv) | 560 tree min, tree max, bitmap equiv) |
245 { | 561 { |
246 /* Check the validity of the range. */ | 562 *vr = value_range (kind, min, max, equiv); |
247 if (flag_checking | 563 } |
248 && (t == VR_RANGE || t == VR_ANTI_RANGE)) | 564 |
249 { | 565 |
250 int cmp; | 566 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}. |
251 | 567 This means adjusting VRTYPE, MIN and MAX representing the case of a |
252 gcc_assert (min && max); | |
253 | |
254 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max)); | |
255 | |
256 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) | |
257 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); | |
258 | |
259 cmp = compare_values (min, max); | |
260 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
261 } | |
262 | |
263 if (flag_checking | |
264 && (t == VR_UNDEFINED || t == VR_VARYING)) | |
265 { | |
266 gcc_assert (min == NULL_TREE && max == NULL_TREE); | |
267 gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); | |
268 } | |
269 | |
270 vr->type = t; | |
271 vr->min = min; | |
272 vr->max = max; | |
273 | |
274 /* Since updating the equivalence set involves deep copying the | |
275 bitmaps, only do it if absolutely necessary. */ | |
276 if (vr->equiv == NULL | |
277 && equiv != NULL) | |
278 vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack); | |
279 | |
280 if (equiv != vr->equiv) | |
281 { | |
282 if (equiv && !bitmap_empty_p (equiv)) | |
283 bitmap_copy (vr->equiv, equiv); | |
284 else | |
285 bitmap_clear (vr->equiv); | |
286 } | |
287 } | |
288 | |
289 | |
290 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. | |
291 This means adjusting T, MIN and MAX representing the case of a | |
292 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] | 568 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] |
293 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. | 569 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. |
294 In corner cases where MAX+1 or MIN-1 wraps this will fall back | 570 In corner cases where MAX+1 or MIN-1 wraps this will fall back |
295 to varying. | 571 to varying. |
296 This routine exists to ease canonicalization in the case where we | 572 This routine exists to ease canonicalization in the case where we |
297 extract ranges from var + CST op limit. */ | 573 extract ranges from var + CST op limit. */ |
298 | 574 |
299 static void | 575 void |
300 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t, | 576 value_range::set_and_canonicalize (enum value_range_kind kind, |
301 tree min, tree max, bitmap equiv) | 577 tree min, tree max, bitmap equiv) |
302 { | 578 { |
303 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ | 579 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
304 if (t == VR_UNDEFINED) | 580 if (kind == VR_UNDEFINED) |
305 { | 581 { |
306 set_value_range_to_undefined (vr); | 582 set_undefined (); |
307 return; | 583 return; |
308 } | 584 } |
309 else if (t == VR_VARYING) | 585 else if (kind == VR_VARYING) |
310 { | 586 { |
311 set_value_range_to_varying (vr); | 587 set_varying (); |
312 return; | 588 return; |
313 } | 589 } |
314 | 590 |
315 /* Nothing to canonicalize for symbolic ranges. */ | 591 /* Nothing to canonicalize for symbolic ranges. */ |
316 if (TREE_CODE (min) != INTEGER_CST | 592 if (TREE_CODE (min) != INTEGER_CST |
317 || TREE_CODE (max) != INTEGER_CST) | 593 || TREE_CODE (max) != INTEGER_CST) |
318 { | 594 { |
319 set_value_range (vr, t, min, max, equiv); | 595 set_value_range (this, kind, min, max, equiv); |
320 return; | 596 return; |
321 } | 597 } |
322 | 598 |
323 /* Wrong order for min and max, to swap them and the VR type we need | 599 /* Wrong order for min and max, to swap them and the VR type we need |
324 to adjust them. */ | 600 to adjust them. */ |
329 /* For one bit precision if max < min, then the swapped | 605 /* For one bit precision if max < min, then the swapped |
330 range covers all values, so for VR_RANGE it is varying and | 606 range covers all values, so for VR_RANGE it is varying and |
331 for VR_ANTI_RANGE empty range, so drop to varying as well. */ | 607 for VR_ANTI_RANGE empty range, so drop to varying as well. */ |
332 if (TYPE_PRECISION (TREE_TYPE (min)) == 1) | 608 if (TYPE_PRECISION (TREE_TYPE (min)) == 1) |
333 { | 609 { |
334 set_value_range_to_varying (vr); | 610 set_varying (); |
335 return; | 611 return; |
336 } | 612 } |
337 | 613 |
338 one = build_int_cst (TREE_TYPE (min), 1); | 614 one = build_int_cst (TREE_TYPE (min), 1); |
339 tmp = int_const_binop (PLUS_EXPR, max, one); | 615 tmp = int_const_binop (PLUS_EXPR, max, one); |
343 /* There's one corner case, if we had [C+1, C] before we now have | 619 /* There's one corner case, if we had [C+1, C] before we now have |
344 that again. But this represents an empty value range, so drop | 620 that again. But this represents an empty value range, so drop |
345 to varying in this case. */ | 621 to varying in this case. */ |
346 if (tree_int_cst_lt (max, min)) | 622 if (tree_int_cst_lt (max, min)) |
347 { | 623 { |
348 set_value_range_to_varying (vr); | 624 set_varying (); |
349 return; | 625 return; |
350 } | 626 } |
351 | 627 |
352 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; | 628 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; |
353 } | 629 } |
354 | 630 |
355 /* Anti-ranges that can be represented as ranges should be so. */ | 631 /* Anti-ranges that can be represented as ranges should be so. */ |
356 if (t == VR_ANTI_RANGE) | 632 if (kind == VR_ANTI_RANGE) |
357 { | 633 { |
358 bool is_min = vrp_val_is_min (min); | 634 /* For -fstrict-enums we may receive out-of-range ranges so consider |
359 bool is_max = vrp_val_is_max (max); | 635 values < -INF and values > INF as -INF/INF as well. */ |
636 tree type = TREE_TYPE (min); | |
637 bool is_min = (INTEGRAL_TYPE_P (type) | |
638 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0); | |
639 bool is_max = (INTEGRAL_TYPE_P (type) | |
640 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0); | |
360 | 641 |
361 if (is_min && is_max) | 642 if (is_min && is_max) |
362 { | 643 { |
363 /* We cannot deal with empty ranges, drop to varying. | 644 /* We cannot deal with empty ranges, drop to varying. |
364 ??? This could be VR_UNDEFINED instead. */ | 645 ??? This could be VR_UNDEFINED instead. */ |
365 set_value_range_to_varying (vr); | 646 set_varying (); |
366 return; | 647 return; |
367 } | 648 } |
368 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 | 649 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
369 && (is_min || is_max)) | 650 && (is_min || is_max)) |
370 { | 651 { |
372 as a singleton range. */ | 653 as a singleton range. */ |
373 if (is_min) | 654 if (is_min) |
374 min = max = vrp_val_max (TREE_TYPE (min)); | 655 min = max = vrp_val_max (TREE_TYPE (min)); |
375 else | 656 else |
376 min = max = vrp_val_min (TREE_TYPE (min)); | 657 min = max = vrp_val_min (TREE_TYPE (min)); |
377 t = VR_RANGE; | 658 kind = VR_RANGE; |
378 } | 659 } |
379 else if (is_min | 660 else if (is_min |
380 /* As a special exception preserve non-null ranges. */ | 661 /* As a special exception preserve non-null ranges. */ |
381 && !(TYPE_UNSIGNED (TREE_TYPE (min)) | 662 && !(TYPE_UNSIGNED (TREE_TYPE (min)) |
382 && integer_zerop (max))) | 663 && integer_zerop (max))) |
383 { | 664 { |
384 tree one = build_int_cst (TREE_TYPE (max), 1); | 665 tree one = build_int_cst (TREE_TYPE (max), 1); |
385 min = int_const_binop (PLUS_EXPR, max, one); | 666 min = int_const_binop (PLUS_EXPR, max, one); |
386 max = vrp_val_max (TREE_TYPE (max)); | 667 max = vrp_val_max (TREE_TYPE (max)); |
387 t = VR_RANGE; | 668 kind = VR_RANGE; |
388 } | 669 } |
389 else if (is_max) | 670 else if (is_max) |
390 { | 671 { |
391 tree one = build_int_cst (TREE_TYPE (min), 1); | 672 tree one = build_int_cst (TREE_TYPE (min), 1); |
392 max = int_const_binop (MINUS_EXPR, min, one); | 673 max = int_const_binop (MINUS_EXPR, min, one); |
393 min = vrp_val_min (TREE_TYPE (min)); | 674 min = vrp_val_min (TREE_TYPE (min)); |
394 t = VR_RANGE; | 675 kind = VR_RANGE; |
395 } | 676 } |
396 } | 677 } |
397 | 678 |
398 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky | 679 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky |
399 to make sure VRP iteration terminates, otherwise we can get into | 680 to make sure VRP iteration terminates, otherwise we can get into |
400 oscillations. */ | 681 oscillations. */ |
401 | 682 |
402 set_value_range (vr, t, min, max, equiv); | 683 set_value_range (this, kind, min, max, equiv); |
403 } | |
404 | |
405 /* Copy value range FROM into value range TO. */ | |
406 | |
407 static inline void | |
408 copy_value_range (value_range *to, value_range *from) | |
409 { | |
410 set_value_range (to, from->type, from->min, from->max, from->equiv); | |
411 } | 684 } |
412 | 685 |
413 /* Set value range VR to a single value. This function is only called | 686 /* Set value range VR to a single value. This function is only called |
414 with values we get from statements, and exists to clear the | 687 with values we get from statements, and exists to clear the |
415 TREE_OVERFLOW flag. */ | 688 TREE_OVERFLOW flag. */ |
416 | 689 |
417 static inline void | 690 void |
418 set_value_range_to_value (value_range *vr, tree val, bitmap equiv) | 691 set_value_range_to_value (value_range *vr, tree val, bitmap equiv) |
419 { | 692 { |
420 gcc_assert (is_gimple_min_invariant (val)); | 693 gcc_assert (is_gimple_min_invariant (val)); |
421 if (TREE_OVERFLOW_P (val)) | 694 if (TREE_OVERFLOW_P (val)) |
422 val = drop_tree_overflow (val); | 695 val = drop_tree_overflow (val); |
423 set_value_range (vr, VR_RANGE, val, val, equiv); | 696 set_value_range (vr, VR_RANGE, val, val, equiv); |
424 } | 697 } |
425 | 698 |
426 /* Set value range VR to a non-negative range of type TYPE. */ | 699 /* Set value range VR to a non-NULL range of type TYPE. */ |
427 | 700 |
428 static inline void | 701 void |
429 set_value_range_to_nonnegative (value_range *vr, tree type) | 702 set_value_range_to_nonnull (value_range *vr, tree type) |
430 { | 703 { |
431 tree zero = build_int_cst (type, 0); | 704 tree zero = build_int_cst (type, 0); |
432 set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv); | 705 vr->update (VR_ANTI_RANGE, zero, zero); |
433 } | |
434 | |
435 /* Set value range VR to a non-NULL range of type TYPE. */ | |
436 | |
437 static inline void | |
438 set_value_range_to_nonnull (value_range *vr, tree type) | |
439 { | |
440 tree zero = build_int_cst (type, 0); | |
441 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); | |
442 } | 706 } |
443 | 707 |
444 | 708 |
445 /* Set value range VR to a NULL range of type TYPE. */ | 709 /* Set value range VR to a NULL range of type TYPE. */ |
446 | 710 |
447 static inline void | 711 void |
448 set_value_range_to_null (value_range *vr, tree type) | 712 set_value_range_to_null (value_range *vr, tree type) |
449 { | 713 { |
450 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); | 714 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv ()); |
451 } | 715 } |
452 | |
453 | |
454 /* Set value range VR to a range of a truthvalue of type TYPE. */ | |
455 | |
456 static inline void | |
457 set_value_range_to_truthvalue (value_range *vr, tree type) | |
458 { | |
459 if (TYPE_PRECISION (type) == 1) | |
460 set_value_range_to_varying (vr); | |
461 else | |
462 set_value_range (vr, VR_RANGE, | |
463 build_int_cst (type, 0), build_int_cst (type, 1), | |
464 vr->equiv); | |
465 } | |
466 | |
467 | |
468 /* If abs (min) < abs (max), set VR to [-max, max], if | |
469 abs (min) >= abs (max), set VR to [-min, min]. */ | |
470 | |
471 static void | |
472 abs_extent_range (value_range *vr, tree min, tree max) | |
473 { | |
474 int cmp; | |
475 | |
476 gcc_assert (TREE_CODE (min) == INTEGER_CST); | |
477 gcc_assert (TREE_CODE (max) == INTEGER_CST); | |
478 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); | |
479 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); | |
480 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); | |
481 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); | |
482 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) | |
483 { | |
484 set_value_range_to_varying (vr); | |
485 return; | |
486 } | |
487 cmp = compare_values (min, max); | |
488 if (cmp == -1) | |
489 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); | |
490 else if (cmp == 0 || cmp == 1) | |
491 { | |
492 max = min; | |
493 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); | |
494 } | |
495 else | |
496 { | |
497 set_value_range_to_varying (vr); | |
498 return; | |
499 } | |
500 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
501 } | |
502 | |
503 | |
504 /* Return value range information for VAR. | |
505 | |
506 If we have no values ranges recorded (ie, VRP is not running), then | |
507 return NULL. Otherwise create an empty range if none existed for VAR. */ | |
508 | |
509 static value_range * | |
510 get_value_range (const_tree var) | |
511 { | |
512 static const value_range vr_const_varying | |
513 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; | |
514 value_range *vr; | |
515 tree sym; | |
516 unsigned ver = SSA_NAME_VERSION (var); | |
517 | |
518 /* If we have no recorded ranges, then return NULL. */ | |
519 if (! vr_value) | |
520 return NULL; | |
521 | |
522 /* If we query the range for a new SSA name return an unmodifiable VARYING. | |
523 We should get here at most from the substitute-and-fold stage which | |
524 will never try to change values. */ | |
525 if (ver >= num_vr_values) | |
526 return CONST_CAST (value_range *, &vr_const_varying); | |
527 | |
528 vr = vr_value[ver]; | |
529 if (vr) | |
530 return vr; | |
531 | |
532 /* After propagation finished do not allocate new value-ranges. */ | |
533 if (values_propagated) | |
534 return CONST_CAST (value_range *, &vr_const_varying); | |
535 | |
536 /* Create a default value range. */ | |
537 vr_value[ver] = vr = vrp_value_range_pool.allocate (); | |
538 memset (vr, 0, sizeof (*vr)); | |
539 | |
540 /* Defer allocating the equivalence set. */ | |
541 vr->equiv = NULL; | |
542 | |
543 /* If VAR is a default definition of a parameter, the variable can | |
544 take any value in VAR's type. */ | |
545 if (SSA_NAME_IS_DEFAULT_DEF (var)) | |
546 { | |
547 sym = SSA_NAME_VAR (var); | |
548 if (TREE_CODE (sym) == PARM_DECL) | |
549 { | |
550 /* Try to use the "nonnull" attribute to create ~[0, 0] | |
551 anti-ranges for pointers. Note that this is only valid with | |
552 default definitions of PARM_DECLs. */ | |
553 if (POINTER_TYPE_P (TREE_TYPE (sym)) | |
554 && (nonnull_arg_p (sym) | |
555 || get_ptr_nonnull (var))) | |
556 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
557 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym))) | |
558 { | |
559 wide_int min, max; | |
560 value_range_type rtype = get_range_info (var, &min, &max); | |
561 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE) | |
562 set_value_range (vr, rtype, | |
563 wide_int_to_tree (TREE_TYPE (var), min), | |
564 wide_int_to_tree (TREE_TYPE (var), max), | |
565 NULL); | |
566 else | |
567 set_value_range_to_varying (vr); | |
568 } | |
569 else | |
570 set_value_range_to_varying (vr); | |
571 } | |
572 else if (TREE_CODE (sym) == RESULT_DECL | |
573 && DECL_BY_REFERENCE (sym)) | |
574 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
575 } | |
576 | |
577 return vr; | |
578 } | |
579 | |
580 /* Set value-ranges of all SSA names defined by STMT to varying. */ | |
581 | |
582 static void | |
583 set_defs_to_varying (gimple *stmt) | |
584 { | |
585 ssa_op_iter i; | |
586 tree def; | |
587 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) | |
588 { | |
589 value_range *vr = get_value_range (def); | |
590 /* Avoid writing to vr_const_varying get_value_range may return. */ | |
591 if (vr->type != VR_VARYING) | |
592 set_value_range_to_varying (vr); | |
593 } | |
594 } | |
595 | |
596 | 716 |
597 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ | 717 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
598 | 718 |
599 static inline bool | 719 bool |
600 vrp_operand_equal_p (const_tree val1, const_tree val2) | 720 vrp_operand_equal_p (const_tree val1, const_tree val2) |
601 { | 721 { |
602 if (val1 == val2) | 722 if (val1 == val2) |
603 return true; | 723 return true; |
604 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) | 724 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) |
606 return true; | 726 return true; |
607 } | 727 } |
608 | 728 |
609 /* Return true, if the bitmaps B1 and B2 are equal. */ | 729 /* Return true, if the bitmaps B1 and B2 are equal. */ |
610 | 730 |
611 static inline bool | 731 bool |
612 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) | 732 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
613 { | 733 { |
614 return (b1 == b2 | 734 return (b1 == b2 |
615 || ((!b1 || bitmap_empty_p (b1)) | 735 || ((!b1 || bitmap_empty_p (b1)) |
616 && (!b2 || bitmap_empty_p (b2))) | 736 && (!b2 || bitmap_empty_p (b2))) |
617 || (b1 && b2 | 737 || (b1 && b2 |
618 && bitmap_equal_p (b1, b2))); | 738 && bitmap_equal_p (b1, b2))); |
619 } | 739 } |
620 | 740 |
621 /* Update the value range and equivalence set for variable VAR to | 741 /* Return true if VR is [0, 0]. */ |
622 NEW_VR. Return true if NEW_VR is different from VAR's previous | |
623 value. | |
624 | |
625 NOTE: This function assumes that NEW_VR is a temporary value range | |
626 object created for the sole purpose of updating VAR's range. The | |
627 storage used by the equivalence set from NEW_VR will be freed by | |
628 this function. Do not call update_value_range when NEW_VR | |
629 is the range object associated with another SSA name. */ | |
630 | 742 |
631 static inline bool | 743 static inline bool |
632 update_value_range (const_tree var, value_range *new_vr) | 744 range_is_null (const value_range *vr) |
633 { | 745 { |
634 value_range *old_vr; | 746 return vr->null_p (); |
635 bool is_new; | 747 } |
636 | |
637 /* If there is a value-range on the SSA name from earlier analysis | |
638 factor that in. */ | |
639 if (INTEGRAL_TYPE_P (TREE_TYPE (var))) | |
640 { | |
641 wide_int min, max; | |
642 value_range_type rtype = get_range_info (var, &min, &max); | |
643 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE) | |
644 { | |
645 tree nr_min, nr_max; | |
646 nr_min = wide_int_to_tree (TREE_TYPE (var), min); | |
647 nr_max = wide_int_to_tree (TREE_TYPE (var), max); | |
648 value_range nr = VR_INITIALIZER; | |
649 set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL); | |
650 vrp_intersect_ranges (new_vr, &nr); | |
651 } | |
652 } | |
653 | |
654 /* Update the value range, if necessary. */ | |
655 old_vr = get_value_range (var); | |
656 is_new = old_vr->type != new_vr->type | |
657 || !vrp_operand_equal_p (old_vr->min, new_vr->min) | |
658 || !vrp_operand_equal_p (old_vr->max, new_vr->max) | |
659 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); | |
660 | |
661 if (is_new) | |
662 { | |
663 /* Do not allow transitions up the lattice. The following | |
664 is slightly more awkward than just new_vr->type < old_vr->type | |
665 because VR_RANGE and VR_ANTI_RANGE need to be considered | |
666 the same. We may not have is_new when transitioning to | |
667 UNDEFINED. If old_vr->type is VARYING, we shouldn't be | |
668 called. */ | |
669 if (new_vr->type == VR_UNDEFINED) | |
670 { | |
671 BITMAP_FREE (new_vr->equiv); | |
672 set_value_range_to_varying (old_vr); | |
673 set_value_range_to_varying (new_vr); | |
674 return true; | |
675 } | |
676 else | |
677 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, | |
678 new_vr->equiv); | |
679 } | |
680 | |
681 BITMAP_FREE (new_vr->equiv); | |
682 | |
683 return is_new; | |
684 } | |
685 | |
686 | |
687 /* Add VAR and VAR's equivalence set to EQUIV. This is the central | |
688 point where equivalence processing can be turned on/off. */ | |
689 | |
690 static void | |
691 add_equivalence (bitmap *equiv, const_tree var) | |
692 { | |
693 unsigned ver = SSA_NAME_VERSION (var); | |
694 value_range *vr = get_value_range (var); | |
695 | |
696 if (*equiv == NULL) | |
697 *equiv = BITMAP_ALLOC (&vrp_equiv_obstack); | |
698 bitmap_set_bit (*equiv, ver); | |
699 if (vr && vr->equiv) | |
700 bitmap_ior_into (*equiv, vr->equiv); | |
701 } | |
702 | |
703 | |
704 /* Return true if VR is ~[0, 0]. */ | |
705 | 748 |
706 static inline bool | 749 static inline bool |
707 range_is_nonnull (value_range *vr) | 750 range_is_nonnull (const value_range *vr) |
708 { | 751 { |
709 return vr->type == VR_ANTI_RANGE | 752 return (vr->kind () == VR_ANTI_RANGE |
710 && integer_zerop (vr->min) | 753 && vr->min () == vr->max () |
711 && integer_zerop (vr->max); | 754 && integer_zerop (vr->min ())); |
712 } | |
713 | |
714 | |
715 /* Return true if VR is [0, 0]. */ | |
716 | |
717 static inline bool | |
718 range_is_null (value_range *vr) | |
719 { | |
720 return vr->type == VR_RANGE | |
721 && integer_zerop (vr->min) | |
722 && integer_zerop (vr->max); | |
723 } | 755 } |
724 | 756 |
725 /* Return true if max and min of VR are INTEGER_CST. It's not necessary | 757 /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
726 a singleton. */ | 758 a singleton. */ |
727 | 759 |
728 static inline bool | 760 bool |
729 range_int_cst_p (value_range *vr) | 761 range_int_cst_p (const value_range *vr) |
730 { | 762 { |
731 return (vr->type == VR_RANGE | 763 return (vr->kind () == VR_RANGE |
732 && TREE_CODE (vr->max) == INTEGER_CST | 764 && TREE_CODE (vr->min ()) == INTEGER_CST |
733 && TREE_CODE (vr->min) == INTEGER_CST); | 765 && TREE_CODE (vr->max ()) == INTEGER_CST); |
734 } | 766 } |
735 | 767 |
736 /* Return true if VR is a INTEGER_CST singleton. */ | 768 /* Return true if VR is a INTEGER_CST singleton. */ |
737 | 769 |
738 static inline bool | 770 bool |
739 range_int_cst_singleton_p (value_range *vr) | 771 range_int_cst_singleton_p (const value_range *vr) |
740 { | 772 { |
741 return (range_int_cst_p (vr) | 773 return (range_int_cst_p (vr) |
742 && tree_int_cst_equal (vr->min, vr->max)); | 774 && tree_int_cst_equal (vr->min (), vr->max ())); |
743 } | |
744 | |
745 /* Return true if value range VR involves at least one symbol. */ | |
746 | |
747 static inline bool | |
748 symbolic_range_p (value_range *vr) | |
749 { | |
750 return (!is_gimple_min_invariant (vr->min) | |
751 || !is_gimple_min_invariant (vr->max)); | |
752 } | 775 } |
753 | 776 |
754 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE | 777 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
755 otherwise. We only handle additive operations and set NEG to true if the | 778 otherwise. We only handle additive operations and set NEG to true if the |
756 symbol is negated and INV to the invariant part, if any. */ | 779 symbol is negated and INV to the invariant part, if any. */ |
757 | 780 |
758 static tree | 781 tree |
759 get_single_symbol (tree t, bool *neg, tree *inv) | 782 get_single_symbol (tree t, bool *neg, tree *inv) |
760 { | 783 { |
761 bool neg_; | 784 bool neg_; |
762 tree inv_; | 785 tree inv_; |
763 | 786 |
820 | 843 |
821 if (integer_zerop (inv)) | 844 if (integer_zerop (inv)) |
822 return t; | 845 return t; |
823 | 846 |
824 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); | 847 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); |
825 } | |
826 | |
827 /* Return true if value range VR involves exactly one symbol SYM. */ | |
828 | |
829 static bool | |
830 symbolic_range_based_on_p (value_range *vr, const_tree sym) | |
831 { | |
832 bool neg, min_has_symbol, max_has_symbol; | |
833 tree inv; | |
834 | |
835 if (is_gimple_min_invariant (vr->min)) | |
836 min_has_symbol = false; | |
837 else if (get_single_symbol (vr->min, &neg, &inv) == sym) | |
838 min_has_symbol = true; | |
839 else | |
840 return false; | |
841 | |
842 if (is_gimple_min_invariant (vr->max)) | |
843 max_has_symbol = false; | |
844 else if (get_single_symbol (vr->max, &neg, &inv) == sym) | |
845 max_has_symbol = true; | |
846 else | |
847 return false; | |
848 | |
849 return (min_has_symbol || max_has_symbol); | |
850 } | |
851 | |
852 /* Return true if the result of assignment STMT is know to be non-zero. */ | |
853 | |
854 static bool | |
855 gimple_assign_nonzero_p (gimple *stmt) | |
856 { | |
857 enum tree_code code = gimple_assign_rhs_code (stmt); | |
858 bool strict_overflow_p; | |
859 switch (get_gimple_rhs_class (code)) | |
860 { | |
861 case GIMPLE_UNARY_RHS: | |
862 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
863 gimple_expr_type (stmt), | |
864 gimple_assign_rhs1 (stmt), | |
865 &strict_overflow_p); | |
866 case GIMPLE_BINARY_RHS: | |
867 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
868 gimple_expr_type (stmt), | |
869 gimple_assign_rhs1 (stmt), | |
870 gimple_assign_rhs2 (stmt), | |
871 &strict_overflow_p); | |
872 case GIMPLE_TERNARY_RHS: | |
873 return false; | |
874 case GIMPLE_SINGLE_RHS: | |
875 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), | |
876 &strict_overflow_p); | |
877 case GIMPLE_INVALID_RHS: | |
878 gcc_unreachable (); | |
879 default: | |
880 gcc_unreachable (); | |
881 } | |
882 } | |
883 | |
884 /* Return true if STMT is known to compute a non-zero value. */ | |
885 | |
886 static bool | |
887 gimple_stmt_nonzero_p (gimple *stmt) | |
888 { | |
889 switch (gimple_code (stmt)) | |
890 { | |
891 case GIMPLE_ASSIGN: | |
892 return gimple_assign_nonzero_p (stmt); | |
893 case GIMPLE_CALL: | |
894 { | |
895 tree fndecl = gimple_call_fndecl (stmt); | |
896 if (!fndecl) return false; | |
897 if (flag_delete_null_pointer_checks && !flag_check_new | |
898 && DECL_IS_OPERATOR_NEW (fndecl) | |
899 && !TREE_NOTHROW (fndecl)) | |
900 return true; | |
901 /* References are always non-NULL. */ | |
902 if (flag_delete_null_pointer_checks | |
903 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE) | |
904 return true; | |
905 if (flag_delete_null_pointer_checks && | |
906 lookup_attribute ("returns_nonnull", | |
907 TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))) | |
908 return true; | |
909 | |
910 gcall *call_stmt = as_a<gcall *> (stmt); | |
911 unsigned rf = gimple_call_return_flags (call_stmt); | |
912 if (rf & ERF_RETURNS_ARG) | |
913 { | |
914 unsigned argnum = rf & ERF_RETURN_ARG_MASK; | |
915 if (argnum < gimple_call_num_args (call_stmt)) | |
916 { | |
917 tree arg = gimple_call_arg (call_stmt, argnum); | |
918 if (SSA_VAR_P (arg) | |
919 && infer_nonnull_range_by_attribute (stmt, arg)) | |
920 return true; | |
921 } | |
922 } | |
923 return gimple_alloca_call_p (stmt); | |
924 } | |
925 default: | |
926 gcc_unreachable (); | |
927 } | |
928 } | |
929 | |
930 /* Like tree_expr_nonzero_p, but this function uses value ranges | |
931 obtained so far. */ | |
932 | |
933 static bool | |
934 vrp_stmt_computes_nonzero (gimple *stmt) | |
935 { | |
936 if (gimple_stmt_nonzero_p (stmt)) | |
937 return true; | |
938 | |
939 /* If we have an expression of the form &X->a, then the expression | |
940 is nonnull if X is nonnull. */ | |
941 if (is_gimple_assign (stmt) | |
942 && gimple_assign_rhs_code (stmt) == ADDR_EXPR) | |
943 { | |
944 tree expr = gimple_assign_rhs1 (stmt); | |
945 tree base = get_base_address (TREE_OPERAND (expr, 0)); | |
946 | |
947 if (base != NULL_TREE | |
948 && TREE_CODE (base) == MEM_REF | |
949 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) | |
950 { | |
951 value_range *vr = get_value_range (TREE_OPERAND (base, 0)); | |
952 if (range_is_nonnull (vr)) | |
953 return true; | |
954 } | |
955 } | |
956 | |
957 return false; | |
958 } | |
959 | |
960 /* Returns true if EXPR is a valid value (as expected by compare_values) -- | |
961 a gimple invariant, or SSA_NAME +- CST. */ | |
962 | |
963 static bool | |
964 valid_value_p (tree expr) | |
965 { | |
966 if (TREE_CODE (expr) == SSA_NAME) | |
967 return true; | |
968 | |
969 if (TREE_CODE (expr) == PLUS_EXPR | |
970 || TREE_CODE (expr) == MINUS_EXPR) | |
971 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME | |
972 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); | |
973 | |
974 return is_gimple_min_invariant (expr); | |
975 } | 848 } |
976 | 849 |
977 /* Return | 850 /* Return |
978 1 if VAL < VAL2 | 851 1 if VAL < VAL2 |
979 0 if !(VAL < VAL2) | 852 0 if !(VAL < VAL2) |
980 -2 if those are incomparable. */ | 853 -2 if those are incomparable. */ |
981 static inline int | 854 int |
982 operand_less_p (tree val, tree val2) | 855 operand_less_p (tree val, tree val2) |
983 { | 856 { |
984 /* LT is folded faster than GE and others. Inline the common case. */ | 857 /* LT is folded faster than GE and others. Inline the common case. */ |
985 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) | 858 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) |
986 return tree_int_cst_lt (val, val2); | 859 return tree_int_cst_lt (val, val2); |
1018 | 891 |
1019 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to | 892 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to |
1020 true if the return value is only valid if we assume that signed | 893 true if the return value is only valid if we assume that signed |
1021 overflow is undefined. */ | 894 overflow is undefined. */ |
1022 | 895 |
1023 static int | 896 int |
1024 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) | 897 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
1025 { | 898 { |
1026 if (val1 == val2) | 899 if (val1 == val2) |
1027 return 0; | 900 return 0; |
1028 | 901 |
1119 { | 992 { |
1120 /* We cannot compare overflowed values. */ | 993 /* We cannot compare overflowed values. */ |
1121 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) | 994 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
1122 return -2; | 995 return -2; |
1123 | 996 |
1124 return tree_int_cst_compare (val1, val2); | 997 if (TREE_CODE (val1) == INTEGER_CST |
998 && TREE_CODE (val2) == INTEGER_CST) | |
999 return tree_int_cst_compare (val1, val2); | |
1000 | |
1001 if (poly_int_tree_p (val1) && poly_int_tree_p (val2)) | |
1002 { | |
1003 if (known_eq (wi::to_poly_widest (val1), | |
1004 wi::to_poly_widest (val2))) | |
1005 return 0; | |
1006 if (known_lt (wi::to_poly_widest (val1), | |
1007 wi::to_poly_widest (val2))) | |
1008 return -1; | |
1009 if (known_gt (wi::to_poly_widest (val1), | |
1010 wi::to_poly_widest (val2))) | |
1011 return 1; | |
1012 } | |
1013 | |
1014 return -2; | |
1125 } | 1015 } |
1126 else | 1016 else |
1127 { | 1017 { |
1128 tree t; | 1018 tree t; |
1129 | 1019 |
1155 } | 1045 } |
1156 } | 1046 } |
1157 | 1047 |
1158 /* Compare values like compare_values_warnv. */ | 1048 /* Compare values like compare_values_warnv. */ |
1159 | 1049 |
1160 static int | 1050 int |
1161 compare_values (tree val1, tree val2) | 1051 compare_values (tree val1, tree val2) |
1162 { | 1052 { |
1163 bool sop; | 1053 bool sop; |
1164 return compare_values_warnv (val1, val2, &sop); | 1054 return compare_values_warnv (val1, val2, &sop); |
1165 } | 1055 } |
1170 -2 if we cannot tell either way. | 1060 -2 if we cannot tell either way. |
1171 | 1061 |
1172 Benchmark compile/20001226-1.c compilation time after changing this | 1062 Benchmark compile/20001226-1.c compilation time after changing this |
1173 function. */ | 1063 function. */ |
1174 | 1064 |
1175 static inline int | 1065 int |
1176 value_inside_range (tree val, tree min, tree max) | 1066 value_inside_range (tree val, tree min, tree max) |
1177 { | 1067 { |
1178 int cmp1, cmp2; | 1068 int cmp1, cmp2; |
1179 | 1069 |
1180 cmp1 = operand_less_p (val, min); | 1070 cmp1 = operand_less_p (val, min); |
1189 | 1079 |
1190 return !cmp2; | 1080 return !cmp2; |
1191 } | 1081 } |
1192 | 1082 |
1193 | 1083 |
1194 /* Return true if value ranges VR0 and VR1 have a non-empty | 1084 /* Return TRUE if *VR includes the value zero. */ |
1195 intersection. | 1085 |
1196 | 1086 bool |
1197 Benchmark compile/20001226-1.c compilation time after changing this | 1087 range_includes_zero_p (const value_range *vr) |
1198 function. | 1088 { |
1199 */ | 1089 if (vr->varying_p () || vr->undefined_p ()) |
1200 | |
1201 static inline bool | |
1202 value_ranges_intersect_p (value_range *vr0, value_range *vr1) | |
1203 { | |
1204 /* The value ranges do not intersect if the maximum of the first range is | |
1205 less than the minimum of the second range or vice versa. | |
1206 When those relations are unknown, we can't do any better. */ | |
1207 if (operand_less_p (vr0->max, vr1->min) != 0) | |
1208 return false; | |
1209 if (operand_less_p (vr1->max, vr0->min) != 0) | |
1210 return false; | |
1211 return true; | |
1212 } | |
1213 | |
1214 | |
1215 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not | |
1216 include the value zero, -2 if we cannot tell. */ | |
1217 | |
1218 static inline int | |
1219 range_includes_zero_p (tree min, tree max) | |
1220 { | |
1221 tree zero = build_int_cst (TREE_TYPE (min), 0); | |
1222 return value_inside_range (zero, min, max); | |
1223 } | |
1224 | |
1225 /* Return true if *VR is know to only contain nonnegative values. */ | |
1226 | |
1227 static inline bool | |
1228 value_range_nonnegative_p (value_range *vr) | |
1229 { | |
1230 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range | |
1231 which would return a useful value should be encoded as a | |
1232 VR_RANGE. */ | |
1233 if (vr->type == VR_RANGE) | |
1234 { | |
1235 int result = compare_values (vr->min, integer_zero_node); | |
1236 return (result == 0 || result == 1); | |
1237 } | |
1238 | |
1239 return false; | |
1240 } | |
1241 | |
1242 /* If *VR has a value rante that is a single constant value return that, | |
1243 otherwise return NULL_TREE. */ | |
1244 | |
1245 static tree | |
1246 value_range_constant_singleton (value_range *vr) | |
1247 { | |
1248 if (vr->type == VR_RANGE | |
1249 && vrp_operand_equal_p (vr->min, vr->max) | |
1250 && is_gimple_min_invariant (vr->min)) | |
1251 return vr->min; | |
1252 | |
1253 return NULL_TREE; | |
1254 } | |
1255 | |
1256 /* If OP has a value range with a single constant value return that, | |
1257 otherwise return NULL_TREE. This returns OP itself if OP is a | |
1258 constant. */ | |
1259 | |
1260 static tree | |
1261 op_with_constant_singleton_value_range (tree op) | |
1262 { | |
1263 if (is_gimple_min_invariant (op)) | |
1264 return op; | |
1265 | |
1266 if (TREE_CODE (op) != SSA_NAME) | |
1267 return NULL_TREE; | |
1268 | |
1269 return value_range_constant_singleton (get_value_range (op)); | |
1270 } | |
1271 | |
1272 /* Return true if op is in a boolean [0, 1] value-range. */ | |
1273 | |
1274 static bool | |
1275 op_with_boolean_value_range_p (tree op) | |
1276 { | |
1277 value_range *vr; | |
1278 | |
1279 if (TYPE_PRECISION (TREE_TYPE (op)) == 1) | |
1280 return true; | 1090 return true; |
1281 | 1091 tree zero = build_int_cst (vr->type (), 0); |
1282 if (integer_zerop (op) | 1092 return vr->may_contain_p (zero); |
1283 || integer_onep (op)) | 1093 } |
1284 return true; | 1094 |
1285 | 1095 /* If *VR has a value range that is a single constant value return that, |
1286 if (TREE_CODE (op) != SSA_NAME) | 1096 otherwise return NULL_TREE. |
1287 return false; | 1097 |
1288 | 1098 ?? This actually returns TRUE for [&x, &x], so perhaps "constant" |
1289 vr = get_value_range (op); | 1099 is not the best name. */ |
1290 return (vr->type == VR_RANGE | 1100 |
1291 && integer_zerop (vr->min) | 1101 tree |
1292 && integer_onep (vr->max)); | 1102 value_range_constant_singleton (const value_range *vr) |
1293 } | 1103 { |
1294 | 1104 tree result = NULL; |
1295 /* Extract value range information for VAR when (OP COND_CODE LIMIT) is | 1105 if (vr->singleton_p (&result)) |
1296 true and store it in *VR_P. */ | 1106 return result; |
1297 | 1107 return NULL; |
1298 static void | 1108 } |
1299 extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code, | 1109 |
1300 tree op, tree limit, | 1110 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits. |
1301 value_range *vr_p) | 1111 |
1302 { | 1112 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR. |
1303 tree min, max, type; | 1113 |
1304 value_range *limit_vr; | 1114 Return TRUE if VR was a constant range and we were able to compute |
1305 type = TREE_TYPE (var); | 1115 the bit masks. */ |
1306 gcc_assert (limit != var); | 1116 |
1307 | 1117 bool |
1308 /* For pointer arithmetic, we only keep track of pointer equality | 1118 vrp_set_zero_nonzero_bits (const tree expr_type, |
1309 and inequality. */ | 1119 const value_range *vr, |
1310 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) | |
1311 { | |
1312 set_value_range_to_varying (vr_p); | |
1313 return; | |
1314 } | |
1315 | |
1316 /* If LIMIT is another SSA name and LIMIT has a range of its own, | |
1317 try to use LIMIT's range to avoid creating symbolic ranges | |
1318 unnecessarily. */ | |
1319 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; | |
1320 | |
1321 /* LIMIT's range is only interesting if it has any useful information. */ | |
1322 if (! limit_vr | |
1323 || limit_vr->type == VR_UNDEFINED | |
1324 || limit_vr->type == VR_VARYING | |
1325 || (symbolic_range_p (limit_vr) | |
1326 && ! (limit_vr->type == VR_RANGE | |
1327 && (limit_vr->min == limit_vr->max | |
1328 || operand_equal_p (limit_vr->min, limit_vr->max, 0))))) | |
1329 limit_vr = NULL; | |
1330 | |
1331 /* Initially, the new range has the same set of equivalences of | |
1332 VAR's range. This will be revised before returning the final | |
1333 value. Since assertions may be chained via mutually exclusive | |
1334 predicates, we will need to trim the set of equivalences before | |
1335 we are done. */ | |
1336 gcc_assert (vr_p->equiv == NULL); | |
1337 add_equivalence (&vr_p->equiv, var); | |
1338 | |
1339 /* Extract a new range based on the asserted comparison for VAR and | |
1340 LIMIT's value range. Notice that if LIMIT has an anti-range, we | |
1341 will only use it for equality comparisons (EQ_EXPR). For any | |
1342 other kind of assertion, we cannot derive a range from LIMIT's | |
1343 anti-range that can be used to describe the new range. For | |
1344 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], | |
1345 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is | |
1346 no single range for x_2 that could describe LE_EXPR, so we might | |
1347 as well build the range [b_4, +INF] for it. | |
1348 One special case we handle is extracting a range from a | |
1349 range test encoded as (unsigned)var + CST <= limit. */ | |
1350 if (TREE_CODE (op) == NOP_EXPR | |
1351 || TREE_CODE (op) == PLUS_EXPR) | |
1352 { | |
1353 if (TREE_CODE (op) == PLUS_EXPR) | |
1354 { | |
1355 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)), | |
1356 TREE_OPERAND (op, 1)); | |
1357 max = int_const_binop (PLUS_EXPR, limit, min); | |
1358 op = TREE_OPERAND (op, 0); | |
1359 } | |
1360 else | |
1361 { | |
1362 min = build_int_cst (TREE_TYPE (var), 0); | |
1363 max = limit; | |
1364 } | |
1365 | |
1366 /* Make sure to not set TREE_OVERFLOW on the final type | |
1367 conversion. We are willingly interpreting large positive | |
1368 unsigned values as negative signed values here. */ | |
1369 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false); | |
1370 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false); | |
1371 | |
1372 /* We can transform a max, min range to an anti-range or | |
1373 vice-versa. Use set_and_canonicalize_value_range which does | |
1374 this for us. */ | |
1375 if (cond_code == LE_EXPR) | |
1376 set_and_canonicalize_value_range (vr_p, VR_RANGE, | |
1377 min, max, vr_p->equiv); | |
1378 else if (cond_code == GT_EXPR) | |
1379 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1380 min, max, vr_p->equiv); | |
1381 else | |
1382 gcc_unreachable (); | |
1383 } | |
1384 else if (cond_code == EQ_EXPR) | |
1385 { | |
1386 enum value_range_type range_type; | |
1387 | |
1388 if (limit_vr) | |
1389 { | |
1390 range_type = limit_vr->type; | |
1391 min = limit_vr->min; | |
1392 max = limit_vr->max; | |
1393 } | |
1394 else | |
1395 { | |
1396 range_type = VR_RANGE; | |
1397 min = limit; | |
1398 max = limit; | |
1399 } | |
1400 | |
1401 set_value_range (vr_p, range_type, min, max, vr_p->equiv); | |
1402 | |
1403 /* When asserting the equality VAR == LIMIT and LIMIT is another | |
1404 SSA name, the new range will also inherit the equivalence set | |
1405 from LIMIT. */ | |
1406 if (TREE_CODE (limit) == SSA_NAME) | |
1407 add_equivalence (&vr_p->equiv, limit); | |
1408 } | |
1409 else if (cond_code == NE_EXPR) | |
1410 { | |
1411 /* As described above, when LIMIT's range is an anti-range and | |
1412 this assertion is an inequality (NE_EXPR), then we cannot | |
1413 derive anything from the anti-range. For instance, if | |
1414 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does | |
1415 not imply that VAR's range is [0, 0]. So, in the case of | |
1416 anti-ranges, we just assert the inequality using LIMIT and | |
1417 not its anti-range. | |
1418 | |
1419 If LIMIT_VR is a range, we can only use it to build a new | |
1420 anti-range if LIMIT_VR is a single-valued range. For | |
1421 instance, if LIMIT_VR is [0, 1], the predicate | |
1422 VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. | |
1423 Rather, it means that for value 0 VAR should be ~[0, 0] | |
1424 and for value 1, VAR should be ~[1, 1]. We cannot | |
1425 represent these ranges. | |
1426 | |
1427 The only situation in which we can build a valid | |
1428 anti-range is when LIMIT_VR is a single-valued range | |
1429 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, | |
1430 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ | |
1431 if (limit_vr | |
1432 && limit_vr->type == VR_RANGE | |
1433 && compare_values (limit_vr->min, limit_vr->max) == 0) | |
1434 { | |
1435 min = limit_vr->min; | |
1436 max = limit_vr->max; | |
1437 } | |
1438 else | |
1439 { | |
1440 /* In any other case, we cannot use LIMIT's range to build a | |
1441 valid anti-range. */ | |
1442 min = max = limit; | |
1443 } | |
1444 | |
1445 /* If MIN and MAX cover the whole range for their type, then | |
1446 just use the original LIMIT. */ | |
1447 if (INTEGRAL_TYPE_P (type) | |
1448 && vrp_val_is_min (min) | |
1449 && vrp_val_is_max (max)) | |
1450 min = max = limit; | |
1451 | |
1452 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1453 min, max, vr_p->equiv); | |
1454 } | |
1455 else if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
1456 { | |
1457 min = TYPE_MIN_VALUE (type); | |
1458 | |
1459 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1460 max = limit; | |
1461 else | |
1462 { | |
1463 /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1464 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for | |
1465 LT_EXPR. */ | |
1466 max = limit_vr->max; | |
1467 } | |
1468 | |
1469 /* If the maximum value forces us to be out of bounds, simply punt. | |
1470 It would be pointless to try and do anything more since this | |
1471 all should be optimized away above us. */ | |
1472 if (cond_code == LT_EXPR | |
1473 && compare_values (max, min) == 0) | |
1474 set_value_range_to_varying (vr_p); | |
1475 else | |
1476 { | |
1477 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ | |
1478 if (cond_code == LT_EXPR) | |
1479 { | |
1480 if (TYPE_PRECISION (TREE_TYPE (max)) == 1 | |
1481 && !TYPE_UNSIGNED (TREE_TYPE (max))) | |
1482 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, | |
1483 build_int_cst (TREE_TYPE (max), -1)); | |
1484 else | |
1485 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, | |
1486 build_int_cst (TREE_TYPE (max), 1)); | |
1487 /* Signal to compare_values_warnv this expr doesn't overflow. */ | |
1488 if (EXPR_P (max)) | |
1489 TREE_NO_WARNING (max) = 1; | |
1490 } | |
1491 | |
1492 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); | |
1493 } | |
1494 } | |
1495 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
1496 { | |
1497 max = TYPE_MAX_VALUE (type); | |
1498 | |
1499 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1500 min = limit; | |
1501 else | |
1502 { | |
1503 /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1504 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for | |
1505 GT_EXPR. */ | |
1506 min = limit_vr->min; | |
1507 } | |
1508 | |
1509 /* If the minimum value forces us to be out of bounds, simply punt. | |
1510 It would be pointless to try and do anything more since this | |
1511 all should be optimized away above us. */ | |
1512 if (cond_code == GT_EXPR | |
1513 && compare_values (min, max) == 0) | |
1514 set_value_range_to_varying (vr_p); | |
1515 else | |
1516 { | |
1517 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ | |
1518 if (cond_code == GT_EXPR) | |
1519 { | |
1520 if (TYPE_PRECISION (TREE_TYPE (min)) == 1 | |
1521 && !TYPE_UNSIGNED (TREE_TYPE (min))) | |
1522 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, | |
1523 build_int_cst (TREE_TYPE (min), -1)); | |
1524 else | |
1525 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, | |
1526 build_int_cst (TREE_TYPE (min), 1)); | |
1527 /* Signal to compare_values_warnv this expr doesn't overflow. */ | |
1528 if (EXPR_P (min)) | |
1529 TREE_NO_WARNING (min) = 1; | |
1530 } | |
1531 | |
1532 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); | |
1533 } | |
1534 } | |
1535 else | |
1536 gcc_unreachable (); | |
1537 | |
1538 /* Finally intersect the new range with what we already know about var. */ | |
1539 vrp_intersect_ranges (vr_p, get_value_range (var)); | |
1540 } | |
1541 | |
1542 /* Extract value range information from an ASSERT_EXPR EXPR and store | |
1543 it in *VR_P. */ | |
1544 | |
1545 static void | |
1546 extract_range_from_assert (value_range *vr_p, tree expr) | |
1547 { | |
1548 tree var = ASSERT_EXPR_VAR (expr); | |
1549 tree cond = ASSERT_EXPR_COND (expr); | |
1550 tree limit, op; | |
1551 enum tree_code cond_code; | |
1552 gcc_assert (COMPARISON_CLASS_P (cond)); | |
1553 | |
1554 /* Find VAR in the ASSERT_EXPR conditional. */ | |
1555 if (var == TREE_OPERAND (cond, 0) | |
1556 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR | |
1557 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) | |
1558 { | |
1559 /* If the predicate is of the form VAR COMP LIMIT, then we just | |
1560 take LIMIT from the RHS and use the same comparison code. */ | |
1561 cond_code = TREE_CODE (cond); | |
1562 limit = TREE_OPERAND (cond, 1); | |
1563 op = TREE_OPERAND (cond, 0); | |
1564 } | |
1565 else | |
1566 { | |
1567 /* If the predicate is of the form LIMIT COMP VAR, then we need | |
1568 to flip around the comparison code to create the proper range | |
1569 for VAR. */ | |
1570 cond_code = swap_tree_comparison (TREE_CODE (cond)); | |
1571 limit = TREE_OPERAND (cond, 0); | |
1572 op = TREE_OPERAND (cond, 1); | |
1573 } | |
1574 extract_range_for_var_from_comparison_expr (var, cond_code, op, | |
1575 limit, vr_p); | |
1576 } | |
1577 | |
1578 /* Extract range information from SSA name VAR and store it in VR. If | |
1579 VAR has an interesting range, use it. Otherwise, create the | |
1580 range [VAR, VAR] and return it. This is useful in situations where | |
1581 we may have conditionals testing values of VARYING names. For | |
1582 instance, | |
1583 | |
1584 x_3 = y_5; | |
1585 if (x_3 > y_5) | |
1586 ... | |
1587 | |
1588 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is | |
1589 always false. */ | |
1590 | |
1591 static void | |
1592 extract_range_from_ssa_name (value_range *vr, tree var) | |
1593 { | |
1594 value_range *var_vr = get_value_range (var); | |
1595 | |
1596 if (var_vr->type != VR_VARYING) | |
1597 copy_value_range (vr, var_vr); | |
1598 else | |
1599 set_value_range (vr, VR_RANGE, var, var, NULL); | |
1600 | |
1601 add_equivalence (&vr->equiv, var); | |
1602 } | |
1603 | |
1604 | |
1605 /* Wrapper around int_const_binop. If the operation overflows and | |
1606 overflow is undefined, then adjust the result to be | |
1607 -INF or +INF depending on CODE, VAL1 and VAL2. Sets *OVERFLOW_P | |
1608 to whether the operation overflowed. For division by zero | |
1609 the result is indeterminate but *OVERFLOW_P is set. */ | |
1610 | |
1611 static wide_int | |
1612 vrp_int_const_binop (enum tree_code code, tree val1, tree val2, | |
1613 bool *overflow_p) | |
1614 { | |
1615 bool overflow = false; | |
1616 signop sign = TYPE_SIGN (TREE_TYPE (val1)); | |
1617 wide_int res; | |
1618 | |
1619 *overflow_p = false; | |
1620 | |
1621 switch (code) | |
1622 { | |
1623 case RSHIFT_EXPR: | |
1624 case LSHIFT_EXPR: | |
1625 { | |
1626 wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1))); | |
1627 if (wi::neg_p (wval2)) | |
1628 { | |
1629 wval2 = -wval2; | |
1630 if (code == RSHIFT_EXPR) | |
1631 code = LSHIFT_EXPR; | |
1632 else | |
1633 code = RSHIFT_EXPR; | |
1634 } | |
1635 | |
1636 if (code == RSHIFT_EXPR) | |
1637 /* It's unclear from the C standard whether shifts can overflow. | |
1638 The following code ignores overflow; perhaps a C standard | |
1639 interpretation ruling is needed. */ | |
1640 res = wi::rshift (wi::to_wide (val1), wval2, sign); | |
1641 else | |
1642 res = wi::lshift (wi::to_wide (val1), wval2); | |
1643 break; | |
1644 } | |
1645 | |
1646 case MULT_EXPR: | |
1647 res = wi::mul (wi::to_wide (val1), | |
1648 wi::to_wide (val2), sign, &overflow); | |
1649 break; | |
1650 | |
1651 case TRUNC_DIV_EXPR: | |
1652 case EXACT_DIV_EXPR: | |
1653 if (val2 == 0) | |
1654 { | |
1655 *overflow_p = true; | |
1656 return res; | |
1657 } | |
1658 else | |
1659 res = wi::div_trunc (wi::to_wide (val1), | |
1660 wi::to_wide (val2), sign, &overflow); | |
1661 break; | |
1662 | |
1663 case FLOOR_DIV_EXPR: | |
1664 if (val2 == 0) | |
1665 { | |
1666 *overflow_p = true; | |
1667 return res; | |
1668 } | |
1669 res = wi::div_floor (wi::to_wide (val1), | |
1670 wi::to_wide (val2), sign, &overflow); | |
1671 break; | |
1672 | |
1673 case CEIL_DIV_EXPR: | |
1674 if (val2 == 0) | |
1675 { | |
1676 *overflow_p = true; | |
1677 return res; | |
1678 } | |
1679 res = wi::div_ceil (wi::to_wide (val1), | |
1680 wi::to_wide (val2), sign, &overflow); | |
1681 break; | |
1682 | |
1683 case ROUND_DIV_EXPR: | |
1684 if (val2 == 0) | |
1685 { | |
1686 *overflow_p = 0; | |
1687 return res; | |
1688 } | |
1689 res = wi::div_round (wi::to_wide (val1), | |
1690 wi::to_wide (val2), sign, &overflow); | |
1691 break; | |
1692 | |
1693 default: | |
1694 gcc_unreachable (); | |
1695 } | |
1696 | |
1697 if (overflow | |
1698 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) | |
1699 { | |
1700 /* If the operation overflowed return -INF or +INF depending | |
1701 on the operation and the combination of signs of the operands. */ | |
1702 int sgn1 = tree_int_cst_sgn (val1); | |
1703 int sgn2 = tree_int_cst_sgn (val2); | |
1704 | |
1705 /* Notice that we only need to handle the restricted set of | |
1706 operations handled by extract_range_from_binary_expr. | |
1707 Among them, only multiplication, addition and subtraction | |
1708 can yield overflow without overflown operands because we | |
1709 are working with integral types only... except in the | |
1710 case VAL1 = -INF and VAL2 = -1 which overflows to +INF | |
1711 for division too. */ | |
1712 | |
1713 /* For multiplication, the sign of the overflow is given | |
1714 by the comparison of the signs of the operands. */ | |
1715 if ((code == MULT_EXPR && sgn1 == sgn2) | |
1716 /* For addition, the operands must be of the same sign | |
1717 to yield an overflow. Its sign is therefore that | |
1718 of one of the operands, for example the first. */ | |
1719 || (code == PLUS_EXPR && sgn1 >= 0) | |
1720 /* For subtraction, operands must be of | |
1721 different signs to yield an overflow. Its sign is | |
1722 therefore that of the first operand or the opposite of | |
1723 that of the second operand. A first operand of 0 counts | |
1724 as positive here, for the corner case 0 - (-INF), which | |
1725 overflows, but must yield +INF. */ | |
1726 || (code == MINUS_EXPR && sgn1 >= 0) | |
1727 /* For division, the only case is -INF / -1 = +INF. */ | |
1728 || code == TRUNC_DIV_EXPR | |
1729 || code == FLOOR_DIV_EXPR | |
1730 || code == CEIL_DIV_EXPR | |
1731 || code == EXACT_DIV_EXPR | |
1732 || code == ROUND_DIV_EXPR) | |
1733 return wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), | |
1734 TYPE_SIGN (TREE_TYPE (val1))); | |
1735 else | |
1736 return wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), | |
1737 TYPE_SIGN (TREE_TYPE (val1))); | |
1738 } | |
1739 | |
1740 *overflow_p = overflow; | |
1741 | |
1742 return res; | |
1743 } | |
1744 | |
1745 | |
1746 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO | |
1747 bitmask if some bit is unset, it means for all numbers in the range | |
1748 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO | |
1749 bitmask if some bit is set, it means for all numbers in the range | |
1750 the bit is 1, otherwise it might be 0 or 1. */ | |
1751 | |
1752 static bool | |
1753 zero_nonzero_bits_from_vr (const tree expr_type, | |
1754 value_range *vr, | |
1755 wide_int *may_be_nonzero, | 1120 wide_int *may_be_nonzero, |
1756 wide_int *must_be_nonzero) | 1121 wide_int *must_be_nonzero) |
1757 { | 1122 { |
1758 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); | |
1759 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); | |
1760 if (!range_int_cst_p (vr)) | 1123 if (!range_int_cst_p (vr)) |
1761 return false; | 1124 { |
1762 | 1125 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
1763 if (range_int_cst_singleton_p (vr)) | 1126 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); |
1764 { | 1127 return false; |
1765 *may_be_nonzero = wi::to_wide (vr->min); | 1128 } |
1766 *must_be_nonzero = *may_be_nonzero; | 1129 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type), |
1767 } | 1130 wi::to_wide (vr->min ()), |
1768 else if (tree_int_cst_sgn (vr->min) >= 0 | 1131 wi::to_wide (vr->max ()), |
1769 || tree_int_cst_sgn (vr->max) < 0) | 1132 *may_be_nonzero, *must_be_nonzero); |
1770 { | |
1771 wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max); | |
1772 *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max); | |
1773 *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max); | |
1774 if (xor_mask != 0) | |
1775 { | |
1776 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, | |
1777 may_be_nonzero->get_precision ()); | |
1778 *may_be_nonzero = *may_be_nonzero | mask; | |
1779 *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask); | |
1780 } | |
1781 } | |
1782 | |
1783 return true; | 1133 return true; |
1784 } | 1134 } |
1785 | 1135 |
1786 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR | 1136 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
1787 so that *VR0 U *VR1 == *AR. Returns true if that is possible, | 1137 so that *VR0 U *VR1 == *AR. Returns true if that is possible, |
1788 false otherwise. If *AR can be represented with a single range | 1138 false otherwise. If *AR can be represented with a single range |
1789 *VR1 will be VR_UNDEFINED. */ | 1139 *VR1 will be VR_UNDEFINED. */ |
1790 | 1140 |
1791 static bool | 1141 static bool |
1792 ranges_from_anti_range (value_range *ar, | 1142 ranges_from_anti_range (const value_range *ar, |
1793 value_range *vr0, value_range *vr1) | 1143 value_range *vr0, value_range *vr1) |
1794 { | 1144 { |
1795 tree type = TREE_TYPE (ar->min); | 1145 tree type = ar->type (); |
1796 | 1146 |
1797 vr0->type = VR_UNDEFINED; | 1147 vr0->set_undefined (); |
1798 vr1->type = VR_UNDEFINED; | 1148 vr1->set_undefined (); |
1799 | 1149 |
1800 if (ar->type != VR_ANTI_RANGE | 1150 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U |
1801 || TREE_CODE (ar->min) != INTEGER_CST | 1151 [A+1, +INF]. Not sure if this helps in practice, though. */ |
1802 || TREE_CODE (ar->max) != INTEGER_CST | 1152 |
1153 if (ar->kind () != VR_ANTI_RANGE | |
1154 || TREE_CODE (ar->min ()) != INTEGER_CST | |
1155 || TREE_CODE (ar->max ()) != INTEGER_CST | |
1803 || !vrp_val_min (type) | 1156 || !vrp_val_min (type) |
1804 || !vrp_val_max (type)) | 1157 || !vrp_val_max (type)) |
1805 return false; | 1158 return false; |
1806 | 1159 |
1807 if (!vrp_val_is_min (ar->min)) | 1160 if (!vrp_val_is_min (ar->min ())) |
1808 { | 1161 *vr0 = value_range (VR_RANGE, |
1809 vr0->type = VR_RANGE; | 1162 vrp_val_min (type), |
1810 vr0->min = vrp_val_min (type); | 1163 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1)); |
1811 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1); | 1164 if (!vrp_val_is_max (ar->max ())) |
1812 } | 1165 *vr1 = value_range (VR_RANGE, |
1813 if (!vrp_val_is_max (ar->max)) | 1166 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1), |
1814 { | 1167 vrp_val_max (type)); |
1815 vr1->type = VR_RANGE; | 1168 if (vr0->undefined_p ()) |
1816 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1); | |
1817 vr1->max = vrp_val_max (type); | |
1818 } | |
1819 if (vr0->type == VR_UNDEFINED) | |
1820 { | 1169 { |
1821 *vr0 = *vr1; | 1170 *vr0 = *vr1; |
1822 vr1->type = VR_UNDEFINED; | 1171 vr1->set_undefined (); |
1823 } | 1172 } |
1824 | 1173 |
1825 return vr0->type != VR_UNDEFINED; | 1174 return !vr0->undefined_p (); |
1826 } | 1175 } |
1827 | 1176 |
1828 /* Helper to extract a value-range *VR for a multiplicative operation | 1177 /* Extract the components of a value range into a pair of wide ints in |
1829 *VR0 CODE *VR1. */ | 1178 [WMIN, WMAX]. |
1179 | |
1180 If the value range is anything but a VR_*RANGE of constants, the | |
1181 resulting wide ints are set to [-MIN, +MAX] for the type. */ | |
1182 | |
1183 static void inline | |
1184 extract_range_into_wide_ints (const value_range *vr, | |
1185 signop sign, unsigned prec, | |
1186 wide_int &wmin, wide_int &wmax) | |
1187 { | |
1188 gcc_assert (vr->kind () != VR_ANTI_RANGE || vr->symbolic_p ()); | |
1189 if (range_int_cst_p (vr)) | |
1190 { | |
1191 wmin = wi::to_wide (vr->min ()); | |
1192 wmax = wi::to_wide (vr->max ()); | |
1193 } | |
1194 else | |
1195 { | |
1196 wmin = wi::min_value (prec, sign); | |
1197 wmax = wi::max_value (prec, sign); | |
1198 } | |
1199 } | |
1200 | |
1201 /* Value range wrapper for wide_int_range_multiplicative_op: | |
1202 | |
1203 *VR = *VR0 .CODE. *VR1. */ | |
1830 | 1204 |
1831 static void | 1205 static void |
1832 extract_range_from_multiplicative_op_1 (value_range *vr, | 1206 extract_range_from_multiplicative_op (value_range *vr, |
1833 enum tree_code code, | 1207 enum tree_code code, |
1834 value_range *vr0, value_range *vr1) | 1208 const value_range *vr0, |
1835 { | 1209 const value_range *vr1) |
1836 enum value_range_type rtype; | 1210 { |
1837 wide_int val, min, max; | |
1838 bool sop; | |
1839 tree type; | |
1840 | |
1841 /* Multiplications, divisions and shifts are a bit tricky to handle, | |
1842 depending on the mix of signs we have in the two ranges, we | |
1843 need to operate on different values to get the minimum and | |
1844 maximum values for the new range. One approach is to figure | |
1845 out all the variations of range combinations and do the | |
1846 operations. | |
1847 | |
1848 However, this involves several calls to compare_values and it | |
1849 is pretty convoluted. It's simpler to do the 4 operations | |
1850 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP | |
1851 MAX1) and then figure the smallest and largest values to form | |
1852 the new range. */ | |
1853 gcc_assert (code == MULT_EXPR | 1211 gcc_assert (code == MULT_EXPR |
1854 || code == TRUNC_DIV_EXPR | 1212 || code == TRUNC_DIV_EXPR |
1855 || code == FLOOR_DIV_EXPR | 1213 || code == FLOOR_DIV_EXPR |
1856 || code == CEIL_DIV_EXPR | 1214 || code == CEIL_DIV_EXPR |
1857 || code == EXACT_DIV_EXPR | 1215 || code == EXACT_DIV_EXPR |
1858 || code == ROUND_DIV_EXPR | 1216 || code == ROUND_DIV_EXPR |
1859 || code == RSHIFT_EXPR | 1217 || code == RSHIFT_EXPR |
1860 || code == LSHIFT_EXPR); | 1218 || code == LSHIFT_EXPR); |
1861 gcc_assert (vr0->type == VR_RANGE | 1219 gcc_assert (vr0->kind () == VR_RANGE |
1862 && vr0->type == vr1->type); | 1220 && vr0->kind () == vr1->kind ()); |
1863 | 1221 |
1864 rtype = vr0->type; | 1222 tree type = vr0->type (); |
1865 type = TREE_TYPE (vr0->min); | 1223 wide_int res_lb, res_ub; |
1866 signop sgn = TYPE_SIGN (type); | 1224 wide_int vr0_lb = wi::to_wide (vr0->min ()); |
1867 | 1225 wide_int vr0_ub = wi::to_wide (vr0->max ()); |
1868 /* Compute the 4 cross operations and their minimum and maximum value. */ | 1226 wide_int vr1_lb = wi::to_wide (vr1->min ()); |
1869 sop = false; | 1227 wide_int vr1_ub = wi::to_wide (vr1->max ()); |
1870 val = vrp_int_const_binop (code, vr0->min, vr1->min, &sop); | 1228 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type); |
1871 if (! sop) | 1229 unsigned prec = TYPE_PRECISION (type); |
1872 min = max = val; | 1230 |
1873 | 1231 if (wide_int_range_multiplicative_op (res_lb, res_ub, |
1874 if (vr1->max == vr1->min) | 1232 code, TYPE_SIGN (type), prec, |
1233 vr0_lb, vr0_ub, vr1_lb, vr1_ub, | |
1234 overflow_undefined)) | |
1235 vr->set_and_canonicalize (VR_RANGE, | |
1236 wide_int_to_tree (type, res_lb), | |
1237 wide_int_to_tree (type, res_ub), NULL); | |
1238 else | |
1239 set_value_range_to_varying (vr); | |
1240 } | |
1241 | |
1242 /* If BOUND will include a symbolic bound, adjust it accordingly, | |
1243 otherwise leave it as is. | |
1244 | |
1245 CODE is the original operation that combined the bounds (PLUS_EXPR | |
1246 or MINUS_EXPR). | |
1247 | |
1248 TYPE is the type of the original operation. | |
1249 | |
1250 SYM_OPn is the symbolic for OPn if it has a symbolic. | |
1251 | |
1252 NEG_OPn is TRUE if the OPn was negated. */ | |
1253 | |
1254 static void | |
1255 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type, | |
1256 tree sym_op0, tree sym_op1, | |
1257 bool neg_op0, bool neg_op1) | |
1258 { | |
1259 bool minus_p = (code == MINUS_EXPR); | |
1260 /* If the result bound is constant, we're done; otherwise, build the | |
1261 symbolic lower bound. */ | |
1262 if (sym_op0 == sym_op1) | |
1875 ; | 1263 ; |
1876 else if (! sop) | 1264 else if (sym_op0) |
1877 { | 1265 bound = build_symbolic_expr (type, sym_op0, |
1878 val = vrp_int_const_binop (code, vr0->min, vr1->max, &sop); | 1266 neg_op0, bound); |
1879 if (! sop) | 1267 else if (sym_op1) |
1880 { | 1268 { |
1881 if (wi::lt_p (val, min, sgn)) | 1269 /* We may not negate if that might introduce |
1882 min = val; | 1270 undefined overflow. */ |
1883 else if (wi::gt_p (val, max, sgn)) | 1271 if (!minus_p |
1884 max = val; | 1272 || neg_op1 |
1885 } | 1273 || TYPE_OVERFLOW_WRAPS (type)) |
1886 } | 1274 bound = build_symbolic_expr (type, sym_op1, |
1887 | 1275 neg_op1 ^ minus_p, bound); |
1888 if (vr0->max == vr0->min) | 1276 else |
1889 ; | 1277 bound = NULL_TREE; |
1890 else if (! sop) | 1278 } |
1891 { | 1279 } |
1892 val = vrp_int_const_binop (code, vr0->max, vr1->min, &sop); | 1280 |
1893 if (! sop) | 1281 /* Combine OP1 and OP1, which are two parts of a bound, into one wide |
1894 { | 1282 int bound according to CODE. CODE is the operation combining the |
1895 if (wi::lt_p (val, min, sgn)) | 1283 bound (either a PLUS_EXPR or a MINUS_EXPR). |
1896 min = val; | 1284 |
1897 else if (wi::gt_p (val, max, sgn)) | 1285 TYPE is the type of the combine operation. |
1898 max = val; | 1286 |
1899 } | 1287 WI is the wide int to store the result. |
1900 } | 1288 |
1901 | 1289 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0 |
1902 if (vr0->min == vr0->max || vr1->min == vr1->max) | 1290 if over/underflow occurred. */ |
1903 ; | 1291 |
1904 else if (! sop) | 1292 static void |
1905 { | 1293 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf, |
1906 val = vrp_int_const_binop (code, vr0->max, vr1->max, &sop); | 1294 tree type, tree op0, tree op1) |
1907 if (! sop) | 1295 { |
1908 { | 1296 bool minus_p = (code == MINUS_EXPR); |
1909 if (wi::lt_p (val, min, sgn)) | 1297 const signop sgn = TYPE_SIGN (type); |
1910 min = val; | 1298 const unsigned int prec = TYPE_PRECISION (type); |
1911 else if (wi::gt_p (val, max, sgn)) | 1299 |
1912 max = val; | 1300 /* Combine the bounds, if any. */ |
1913 } | 1301 if (op0 && op1) |
1914 } | 1302 { |
1915 | 1303 if (minus_p) |
1916 /* If either operation overflowed, drop to VARYING. */ | 1304 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf); |
1917 if (sop) | 1305 else |
1918 { | 1306 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf); |
1919 set_value_range_to_varying (vr); | 1307 } |
1308 else if (op0) | |
1309 wi = wi::to_wide (op0); | |
1310 else if (op1) | |
1311 { | |
1312 if (minus_p) | |
1313 wi = wi::neg (wi::to_wide (op1), &ovf); | |
1314 else | |
1315 wi = wi::to_wide (op1); | |
1316 } | |
1317 else | |
1318 wi = wi::shwi (0, prec); | |
1319 } | |
1320 | |
1321 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and | |
1322 put the result in VR. | |
1323 | |
1324 TYPE is the type of the range. | |
1325 | |
1326 MIN_OVF and MAX_OVF indicate what type of overflow, if any, | |
1327 occurred while originally calculating WMIN or WMAX. -1 indicates | |
1328 underflow. +1 indicates overflow. 0 indicates neither. */ | |
1329 | |
1330 static void | |
1331 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max, | |
1332 tree type, | |
1333 const wide_int &wmin, const wide_int &wmax, | |
1334 wi::overflow_type min_ovf, | |
1335 wi::overflow_type max_ovf) | |
1336 { | |
1337 const signop sgn = TYPE_SIGN (type); | |
1338 const unsigned int prec = TYPE_PRECISION (type); | |
1339 | |
1340 /* For one bit precision if max < min, then the swapped | |
1341 range covers all values. */ | |
1342 if (prec == 1 && wi::lt_p (wmax, wmin, sgn)) | |
1343 { | |
1344 kind = VR_VARYING; | |
1920 return; | 1345 return; |
1921 } | 1346 } |
1922 | 1347 |
1923 /* If the new range has its limits swapped around (MIN > MAX), | 1348 if (TYPE_OVERFLOW_WRAPS (type)) |
1924 then the operation caused one of them to wrap around, mark | 1349 { |
1925 the new range VARYING. */ | 1350 /* If overflow wraps, truncate the values and adjust the |
1926 if (wi::gt_p (min, max, sgn)) | 1351 range kind and bounds appropriately. */ |
1927 { | 1352 wide_int tmin = wide_int::from (wmin, prec, sgn); |
1928 set_value_range_to_varying (vr); | 1353 wide_int tmax = wide_int::from (wmax, prec, sgn); |
1929 return; | 1354 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE)) |
1930 } | 1355 { |
1931 | 1356 /* If the limits are swapped, we wrapped around and cover |
1932 /* We punt for [-INF, +INF]. | 1357 the entire range. We have a similar check at the end of |
1933 We learn nothing when we have INF on both sides. | 1358 extract_range_from_binary_expr_1. */ |
1934 Note that we do accept [-INF, -INF] and [+INF, +INF]. */ | 1359 if (wi::gt_p (tmin, tmax, sgn)) |
1935 if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn)) | 1360 kind = VR_VARYING; |
1936 && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn))) | 1361 else |
1937 { | 1362 { |
1938 set_value_range_to_varying (vr); | 1363 kind = VR_RANGE; |
1939 return; | 1364 /* No overflow or both overflow or underflow. The |
1940 } | 1365 range kind stays VR_RANGE. */ |
1941 | 1366 min = wide_int_to_tree (type, tmin); |
1942 set_value_range (vr, rtype, | 1367 max = wide_int_to_tree (type, tmax); |
1943 wide_int_to_tree (type, min), | 1368 } |
1944 wide_int_to_tree (type, max), NULL); | 1369 return; |
1370 } | |
1371 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE) | |
1372 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE)) | |
1373 { | |
1374 /* Min underflow or max overflow. The range kind | |
1375 changes to VR_ANTI_RANGE. */ | |
1376 bool covers = false; | |
1377 wide_int tem = tmin; | |
1378 tmin = tmax + 1; | |
1379 if (wi::cmp (tmin, tmax, sgn) < 0) | |
1380 covers = true; | |
1381 tmax = tem - 1; | |
1382 if (wi::cmp (tmax, tem, sgn) > 0) | |
1383 covers = true; | |
1384 /* If the anti-range would cover nothing, drop to varying. | |
1385 Likewise if the anti-range bounds are outside of the | |
1386 types values. */ | |
1387 if (covers || wi::cmp (tmin, tmax, sgn) > 0) | |
1388 { | |
1389 kind = VR_VARYING; | |
1390 return; | |
1391 } | |
1392 kind = VR_ANTI_RANGE; | |
1393 min = wide_int_to_tree (type, tmin); | |
1394 max = wide_int_to_tree (type, tmax); | |
1395 return; | |
1396 } | |
1397 else | |
1398 { | |
1399 /* Other underflow and/or overflow, drop to VR_VARYING. */ | |
1400 kind = VR_VARYING; | |
1401 return; | |
1402 } | |
1403 } | |
1404 else | |
1405 { | |
1406 /* If overflow does not wrap, saturate to the types min/max | |
1407 value. */ | |
1408 wide_int type_min = wi::min_value (prec, sgn); | |
1409 wide_int type_max = wi::max_value (prec, sgn); | |
1410 kind = VR_RANGE; | |
1411 if (min_ovf == wi::OVF_UNDERFLOW) | |
1412 min = wide_int_to_tree (type, type_min); | |
1413 else if (min_ovf == wi::OVF_OVERFLOW) | |
1414 min = wide_int_to_tree (type, type_max); | |
1415 else | |
1416 min = wide_int_to_tree (type, wmin); | |
1417 | |
1418 if (max_ovf == wi::OVF_UNDERFLOW) | |
1419 max = wide_int_to_tree (type, type_min); | |
1420 else if (max_ovf == wi::OVF_OVERFLOW) | |
1421 max = wide_int_to_tree (type, type_max); | |
1422 else | |
1423 max = wide_int_to_tree (type, wmax); | |
1424 } | |
1945 } | 1425 } |
1946 | 1426 |
1947 /* Extract range information from a binary operation CODE based on | 1427 /* Extract range information from a binary operation CODE based on |
1948 the ranges of each of its operands *VR0 and *VR1 with resulting | 1428 the ranges of each of its operands *VR0 and *VR1 with resulting |
1949 type EXPR_TYPE. The resulting range is stored in *VR. */ | 1429 type EXPR_TYPE. The resulting range is stored in *VR. */ |
1950 | 1430 |
1951 static void | 1431 void |
1952 extract_range_from_binary_expr_1 (value_range *vr, | 1432 extract_range_from_binary_expr_1 (value_range *vr, |
1953 enum tree_code code, tree expr_type, | 1433 enum tree_code code, tree expr_type, |
1954 value_range *vr0_, value_range *vr1_) | 1434 const value_range *vr0_, |
1955 { | 1435 const value_range *vr1_) |
1436 { | |
1437 signop sign = TYPE_SIGN (expr_type); | |
1438 unsigned int prec = TYPE_PRECISION (expr_type); | |
1956 value_range vr0 = *vr0_, vr1 = *vr1_; | 1439 value_range vr0 = *vr0_, vr1 = *vr1_; |
1957 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; | 1440 value_range vrtem0, vrtem1; |
1958 enum value_range_type type; | 1441 enum value_range_kind type; |
1959 tree min = NULL_TREE, max = NULL_TREE; | 1442 tree min = NULL_TREE, max = NULL_TREE; |
1960 int cmp; | 1443 int cmp; |
1961 | 1444 |
1962 if (!INTEGRAL_TYPE_P (expr_type) | 1445 if (!INTEGRAL_TYPE_P (expr_type) |
1963 && !POINTER_TYPE_P (expr_type)) | 1446 && !POINTER_TYPE_P (expr_type)) |
1989 set_value_range_to_varying (vr); | 1472 set_value_range_to_varying (vr); |
1990 return; | 1473 return; |
1991 } | 1474 } |
1992 | 1475 |
1993 /* If both ranges are UNDEFINED, so is the result. */ | 1476 /* If both ranges are UNDEFINED, so is the result. */ |
1994 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) | 1477 if (vr0.undefined_p () && vr1.undefined_p ()) |
1995 { | 1478 { |
1996 set_value_range_to_undefined (vr); | 1479 set_value_range_to_undefined (vr); |
1997 return; | 1480 return; |
1998 } | 1481 } |
1999 /* If one of the ranges is UNDEFINED drop it to VARYING for the following | 1482 /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
2000 code. At some point we may want to special-case operations that | 1483 code. At some point we may want to special-case operations that |
2001 have UNDEFINED result for all or some value-ranges of the not UNDEFINED | 1484 have UNDEFINED result for all or some value-ranges of the not UNDEFINED |
2002 operand. */ | 1485 operand. */ |
2003 else if (vr0.type == VR_UNDEFINED) | 1486 else if (vr0.undefined_p ()) |
2004 set_value_range_to_varying (&vr0); | 1487 set_value_range_to_varying (&vr0); |
2005 else if (vr1.type == VR_UNDEFINED) | 1488 else if (vr1.undefined_p ()) |
2006 set_value_range_to_varying (&vr1); | 1489 set_value_range_to_varying (&vr1); |
2007 | 1490 |
2008 /* We get imprecise results from ranges_from_anti_range when | 1491 /* We get imprecise results from ranges_from_anti_range when |
2009 code is EXACT_DIV_EXPR. We could mask out bits in the resulting | 1492 code is EXACT_DIV_EXPR. We could mask out bits in the resulting |
2010 range, but then we also need to hack up vrp_meet. It's just | 1493 range, but then we also need to hack up vrp_union. It's just |
2011 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ | 1494 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ |
2012 if (code == EXACT_DIV_EXPR | 1495 if (code == EXACT_DIV_EXPR && range_is_nonnull (&vr0)) |
2013 && vr0.type == VR_ANTI_RANGE | |
2014 && vr0.min == vr0.max | |
2015 && integer_zerop (vr0.min)) | |
2016 { | 1496 { |
2017 set_value_range_to_nonnull (vr, expr_type); | 1497 set_value_range_to_nonnull (vr, expr_type); |
2018 return; | 1498 return; |
2019 } | 1499 } |
2020 | 1500 |
2021 /* Now canonicalize anti-ranges to ranges when they are not symbolic | 1501 /* Now canonicalize anti-ranges to ranges when they are not symbolic |
2022 and express ~[] op X as ([]' op X) U ([]'' op X). */ | 1502 and express ~[] op X as ([]' op X) U ([]'' op X). */ |
2023 if (vr0.type == VR_ANTI_RANGE | 1503 if (vr0.kind () == VR_ANTI_RANGE |
2024 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | 1504 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
2025 { | 1505 { |
2026 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); | 1506 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); |
2027 if (vrtem1.type != VR_UNDEFINED) | 1507 if (!vrtem1.undefined_p ()) |
2028 { | 1508 { |
2029 value_range vrres = VR_INITIALIZER; | 1509 value_range vrres; |
2030 extract_range_from_binary_expr_1 (&vrres, code, expr_type, | 1510 extract_range_from_binary_expr_1 (&vrres, code, expr_type, &vrtem1, vr1_); |
2031 &vrtem1, vr1_); | 1511 vr->union_ (&vrres); |
2032 vrp_meet (vr, &vrres); | |
2033 } | 1512 } |
2034 return; | 1513 return; |
2035 } | 1514 } |
2036 /* Likewise for X op ~[]. */ | 1515 /* Likewise for X op ~[]. */ |
2037 if (vr1.type == VR_ANTI_RANGE | 1516 if (vr1.kind () == VR_ANTI_RANGE |
2038 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) | 1517 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) |
2039 { | 1518 { |
2040 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); | 1519 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); |
2041 if (vrtem1.type != VR_UNDEFINED) | 1520 if (!vrtem1.undefined_p ()) |
2042 { | 1521 { |
2043 value_range vrres = VR_INITIALIZER; | 1522 value_range vrres; |
2044 extract_range_from_binary_expr_1 (&vrres, code, expr_type, | 1523 extract_range_from_binary_expr_1 (&vrres, code, expr_type, |
2045 vr0_, &vrtem1); | 1524 vr0_, &vrtem1); |
2046 vrp_meet (vr, &vrres); | 1525 vr->union_ (&vrres); |
2047 } | 1526 } |
2048 return; | 1527 return; |
2049 } | 1528 } |
2050 | 1529 |
2051 /* The type of the resulting value range defaults to VR0.TYPE. */ | 1530 /* The type of the resulting value range defaults to VR0.TYPE. */ |
2052 type = vr0.type; | 1531 type = vr0.kind (); |
2053 | 1532 |
2054 /* Refuse to operate on VARYING ranges, ranges of different kinds | 1533 /* Refuse to operate on VARYING ranges, ranges of different kinds |
2055 and symbolic ranges. As an exception, we allow BIT_{AND,IOR} | 1534 and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |
2056 because we may be able to derive a useful range even if one of | 1535 because we may be able to derive a useful range even if one of |
2057 the operands is VR_VARYING or symbolic range. Similarly for | 1536 the operands is VR_VARYING or symbolic range. Similarly for |
2069 && code != MIN_EXPR | 1548 && code != MIN_EXPR |
2070 && code != MAX_EXPR | 1549 && code != MAX_EXPR |
2071 && code != PLUS_EXPR | 1550 && code != PLUS_EXPR |
2072 && code != MINUS_EXPR | 1551 && code != MINUS_EXPR |
2073 && code != RSHIFT_EXPR | 1552 && code != RSHIFT_EXPR |
2074 && (vr0.type == VR_VARYING | 1553 && code != POINTER_PLUS_EXPR |
2075 || vr1.type == VR_VARYING | 1554 && (vr0.varying_p () |
2076 || vr0.type != vr1.type | 1555 || vr1.varying_p () |
2077 || symbolic_range_p (&vr0) | 1556 || vr0.kind () != vr1.kind () |
2078 || symbolic_range_p (&vr1))) | 1557 || vr0.symbolic_p () |
1558 || vr1.symbolic_p ())) | |
2079 { | 1559 { |
2080 set_value_range_to_varying (vr); | 1560 set_value_range_to_varying (vr); |
2081 return; | 1561 return; |
2082 } | 1562 } |
2083 | 1563 |
2088 { | 1568 { |
2089 /* For MIN/MAX expressions with pointers, we only care about | 1569 /* For MIN/MAX expressions with pointers, we only care about |
2090 nullness, if both are non null, then the result is nonnull. | 1570 nullness, if both are non null, then the result is nonnull. |
2091 If both are null, then the result is null. Otherwise they | 1571 If both are null, then the result is null. Otherwise they |
2092 are varying. */ | 1572 are varying. */ |
2093 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | 1573 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1)) |
2094 set_value_range_to_nonnull (vr, expr_type); | 1574 set_value_range_to_nonnull (vr, expr_type); |
2095 else if (range_is_null (&vr0) && range_is_null (&vr1)) | 1575 else if (range_is_null (&vr0) && range_is_null (&vr1)) |
2096 set_value_range_to_null (vr, expr_type); | 1576 set_value_range_to_null (vr, expr_type); |
2097 else | 1577 else |
2098 set_value_range_to_varying (vr); | 1578 set_value_range_to_varying (vr); |
2099 } | 1579 } |
2100 else if (code == POINTER_PLUS_EXPR) | 1580 else if (code == POINTER_PLUS_EXPR) |
2101 { | 1581 { |
2102 /* For pointer types, we are really only interested in asserting | 1582 /* For pointer types, we are really only interested in asserting |
2103 whether the expression evaluates to non-NULL. */ | 1583 whether the expression evaluates to non-NULL. */ |
2104 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) | 1584 if (!range_includes_zero_p (&vr0) |
1585 || !range_includes_zero_p (&vr1)) | |
2105 set_value_range_to_nonnull (vr, expr_type); | 1586 set_value_range_to_nonnull (vr, expr_type); |
2106 else if (range_is_null (&vr0) && range_is_null (&vr1)) | 1587 else if (range_is_null (&vr0) && range_is_null (&vr1)) |
2107 set_value_range_to_null (vr, expr_type); | 1588 set_value_range_to_null (vr, expr_type); |
2108 else | 1589 else |
2109 set_value_range_to_varying (vr); | 1590 set_value_range_to_varying (vr); |
2110 } | 1591 } |
2111 else if (code == BIT_AND_EXPR) | 1592 else if (code == BIT_AND_EXPR) |
2112 { | 1593 { |
2113 /* For pointer types, we are really only interested in asserting | 1594 /* For pointer types, we are really only interested in asserting |
2114 whether the expression evaluates to non-NULL. */ | 1595 whether the expression evaluates to non-NULL. */ |
2115 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | 1596 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1)) |
2116 set_value_range_to_nonnull (vr, expr_type); | 1597 set_value_range_to_nonnull (vr, expr_type); |
2117 else if (range_is_null (&vr0) || range_is_null (&vr1)) | 1598 else if (range_is_null (&vr0) || range_is_null (&vr1)) |
2118 set_value_range_to_null (vr, expr_type); | 1599 set_value_range_to_null (vr, expr_type); |
2119 else | 1600 else |
2120 set_value_range_to_varying (vr); | 1601 set_value_range_to_varying (vr); |
2127 | 1608 |
2128 /* For integer ranges, apply the operation to each end of the | 1609 /* For integer ranges, apply the operation to each end of the |
2129 range and see what we end up with. */ | 1610 range and see what we end up with. */ |
2130 if (code == PLUS_EXPR || code == MINUS_EXPR) | 1611 if (code == PLUS_EXPR || code == MINUS_EXPR) |
2131 { | 1612 { |
1613 /* This will normalize things such that calculating | |
1614 [0,0] - VR_VARYING is not dropped to varying, but is | |
1615 calculated as [MIN+1, MAX]. */ | |
1616 if (vr0.varying_p ()) | |
1617 vr0.update (VR_RANGE, | |
1618 vrp_val_min (expr_type), | |
1619 vrp_val_max (expr_type)); | |
1620 if (vr1.varying_p ()) | |
1621 vr1.update (VR_RANGE, | |
1622 vrp_val_min (expr_type), | |
1623 vrp_val_max (expr_type)); | |
1624 | |
2132 const bool minus_p = (code == MINUS_EXPR); | 1625 const bool minus_p = (code == MINUS_EXPR); |
2133 tree min_op0 = vr0.min; | 1626 tree min_op0 = vr0.min (); |
2134 tree min_op1 = minus_p ? vr1.max : vr1.min; | 1627 tree min_op1 = minus_p ? vr1.max () : vr1.min (); |
2135 tree max_op0 = vr0.max; | 1628 tree max_op0 = vr0.max (); |
2136 tree max_op1 = minus_p ? vr1.min : vr1.max; | 1629 tree max_op1 = minus_p ? vr1.min () : vr1.max (); |
2137 tree sym_min_op0 = NULL_TREE; | 1630 tree sym_min_op0 = NULL_TREE; |
2138 tree sym_min_op1 = NULL_TREE; | 1631 tree sym_min_op1 = NULL_TREE; |
2139 tree sym_max_op0 = NULL_TREE; | 1632 tree sym_max_op0 = NULL_TREE; |
2140 tree sym_max_op1 = NULL_TREE; | 1633 tree sym_max_op1 = NULL_TREE; |
2141 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; | 1634 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; |
2142 | 1635 |
1636 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false; | |
1637 | |
2143 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or | 1638 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or |
2144 single-symbolic ranges, try to compute the precise resulting range, | 1639 single-symbolic ranges, try to compute the precise resulting range, |
2145 but only if we know that this resulting range will also be constant | 1640 but only if we know that this resulting range will also be constant |
2146 or single-symbolic. */ | 1641 or single-symbolic. */ |
2147 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE | 1642 if (vr0.kind () == VR_RANGE && vr1.kind () == VR_RANGE |
2148 && (TREE_CODE (min_op0) == INTEGER_CST | 1643 && (TREE_CODE (min_op0) == INTEGER_CST |
2149 || (sym_min_op0 | 1644 || (sym_min_op0 |
2150 = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) | 1645 = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) |
2151 && (TREE_CODE (min_op1) == INTEGER_CST | 1646 && (TREE_CODE (min_op1) == INTEGER_CST |
2152 || (sym_min_op1 | 1647 || (sym_min_op1 |
2162 = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) | 1657 = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) |
2163 && (!(sym_max_op0 && sym_max_op1) | 1658 && (!(sym_max_op0 && sym_max_op1) |
2164 || (sym_max_op0 == sym_max_op1 | 1659 || (sym_max_op0 == sym_max_op1 |
2165 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) | 1660 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) |
2166 { | 1661 { |
2167 const signop sgn = TYPE_SIGN (expr_type); | 1662 wide_int wmin, wmax; |
2168 const unsigned int prec = TYPE_PRECISION (expr_type); | 1663 wi::overflow_type min_ovf = wi::OVF_NONE; |
2169 wide_int type_min, type_max, wmin, wmax; | 1664 wi::overflow_type max_ovf = wi::OVF_NONE; |
2170 int min_ovf = 0; | 1665 |
2171 int max_ovf = 0; | 1666 /* Build the bounds. */ |
2172 | 1667 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1); |
2173 /* Get the lower and upper bounds of the type. */ | 1668 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1); |
2174 if (TYPE_OVERFLOW_WRAPS (expr_type)) | |
2175 { | |
2176 type_min = wi::min_value (prec, sgn); | |
2177 type_max = wi::max_value (prec, sgn); | |
2178 } | |
2179 else | |
2180 { | |
2181 type_min = wi::to_wide (vrp_val_min (expr_type)); | |
2182 type_max = wi::to_wide (vrp_val_max (expr_type)); | |
2183 } | |
2184 | |
2185 /* Combine the lower bounds, if any. */ | |
2186 if (min_op0 && min_op1) | |
2187 { | |
2188 if (minus_p) | |
2189 { | |
2190 wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1); | |
2191 | |
2192 /* Check for overflow. */ | |
2193 if (wi::cmp (0, wi::to_wide (min_op1), sgn) | |
2194 != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) | |
2195 min_ovf = wi::cmp (wi::to_wide (min_op0), | |
2196 wi::to_wide (min_op1), sgn); | |
2197 } | |
2198 else | |
2199 { | |
2200 wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1); | |
2201 | |
2202 /* Check for overflow. */ | |
2203 if (wi::cmp (wi::to_wide (min_op1), 0, sgn) | |
2204 != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) | |
2205 min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn); | |
2206 } | |
2207 } | |
2208 else if (min_op0) | |
2209 wmin = wi::to_wide (min_op0); | |
2210 else if (min_op1) | |
2211 { | |
2212 if (minus_p) | |
2213 { | |
2214 wmin = -wi::to_wide (min_op1); | |
2215 | |
2216 /* Check for overflow. */ | |
2217 if (sgn == SIGNED | |
2218 && wi::neg_p (wi::to_wide (min_op1)) | |
2219 && wi::neg_p (wmin)) | |
2220 min_ovf = 1; | |
2221 else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0) | |
2222 min_ovf = -1; | |
2223 } | |
2224 else | |
2225 wmin = wi::to_wide (min_op1); | |
2226 } | |
2227 else | |
2228 wmin = wi::shwi (0, prec); | |
2229 | |
2230 /* Combine the upper bounds, if any. */ | |
2231 if (max_op0 && max_op1) | |
2232 { | |
2233 if (minus_p) | |
2234 { | |
2235 wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1); | |
2236 | |
2237 /* Check for overflow. */ | |
2238 if (wi::cmp (0, wi::to_wide (max_op1), sgn) | |
2239 != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) | |
2240 max_ovf = wi::cmp (wi::to_wide (max_op0), | |
2241 wi::to_wide (max_op1), sgn); | |
2242 } | |
2243 else | |
2244 { | |
2245 wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1); | |
2246 | |
2247 if (wi::cmp (wi::to_wide (max_op1), 0, sgn) | |
2248 != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) | |
2249 max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn); | |
2250 } | |
2251 } | |
2252 else if (max_op0) | |
2253 wmax = wi::to_wide (max_op0); | |
2254 else if (max_op1) | |
2255 { | |
2256 if (minus_p) | |
2257 { | |
2258 wmax = -wi::to_wide (max_op1); | |
2259 | |
2260 /* Check for overflow. */ | |
2261 if (sgn == SIGNED | |
2262 && wi::neg_p (wi::to_wide (max_op1)) | |
2263 && wi::neg_p (wmax)) | |
2264 max_ovf = 1; | |
2265 else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0) | |
2266 max_ovf = -1; | |
2267 } | |
2268 else | |
2269 wmax = wi::to_wide (max_op1); | |
2270 } | |
2271 else | |
2272 wmax = wi::shwi (0, prec); | |
2273 | |
2274 /* Check for type overflow. */ | |
2275 if (min_ovf == 0) | |
2276 { | |
2277 if (wi::cmp (wmin, type_min, sgn) == -1) | |
2278 min_ovf = -1; | |
2279 else if (wi::cmp (wmin, type_max, sgn) == 1) | |
2280 min_ovf = 1; | |
2281 } | |
2282 if (max_ovf == 0) | |
2283 { | |
2284 if (wi::cmp (wmax, type_min, sgn) == -1) | |
2285 max_ovf = -1; | |
2286 else if (wi::cmp (wmax, type_max, sgn) == 1) | |
2287 max_ovf = 1; | |
2288 } | |
2289 | 1669 |
2290 /* If we have overflow for the constant part and the resulting | 1670 /* If we have overflow for the constant part and the resulting |
2291 range will be symbolic, drop to VR_VARYING. */ | 1671 range will be symbolic, drop to VR_VARYING. */ |
2292 if ((min_ovf && sym_min_op0 != sym_min_op1) | 1672 if (((bool)min_ovf && sym_min_op0 != sym_min_op1) |
2293 || (max_ovf && sym_max_op0 != sym_max_op1)) | 1673 || ((bool)max_ovf && sym_max_op0 != sym_max_op1)) |
2294 { | 1674 { |
2295 set_value_range_to_varying (vr); | 1675 set_value_range_to_varying (vr); |
2296 return; | 1676 return; |
2297 } | 1677 } |
2298 | 1678 |
2299 if (TYPE_OVERFLOW_WRAPS (expr_type)) | 1679 /* Adjust the range for possible overflow. */ |
1680 min = NULL_TREE; | |
1681 max = NULL_TREE; | |
1682 set_value_range_with_overflow (type, min, max, expr_type, | |
1683 wmin, wmax, min_ovf, max_ovf); | |
1684 if (type == VR_VARYING) | |
2300 { | 1685 { |
2301 /* If overflow wraps, truncate the values and adjust the | 1686 set_value_range_to_varying (vr); |
2302 range kind and bounds appropriately. */ | 1687 return; |
2303 wide_int tmin = wide_int::from (wmin, prec, sgn); | |
2304 wide_int tmax = wide_int::from (wmax, prec, sgn); | |
2305 if (min_ovf == max_ovf) | |
2306 { | |
2307 /* No overflow or both overflow or underflow. The | |
2308 range kind stays VR_RANGE. */ | |
2309 min = wide_int_to_tree (expr_type, tmin); | |
2310 max = wide_int_to_tree (expr_type, tmax); | |
2311 } | |
2312 else if ((min_ovf == -1 && max_ovf == 0) | |
2313 || (max_ovf == 1 && min_ovf == 0)) | |
2314 { | |
2315 /* Min underflow or max overflow. The range kind | |
2316 changes to VR_ANTI_RANGE. */ | |
2317 bool covers = false; | |
2318 wide_int tem = tmin; | |
2319 type = VR_ANTI_RANGE; | |
2320 tmin = tmax + 1; | |
2321 if (wi::cmp (tmin, tmax, sgn) < 0) | |
2322 covers = true; | |
2323 tmax = tem - 1; | |
2324 if (wi::cmp (tmax, tem, sgn) > 0) | |
2325 covers = true; | |
2326 /* If the anti-range would cover nothing, drop to varying. | |
2327 Likewise if the anti-range bounds are outside of the | |
2328 types values. */ | |
2329 if (covers || wi::cmp (tmin, tmax, sgn) > 0) | |
2330 { | |
2331 set_value_range_to_varying (vr); | |
2332 return; | |
2333 } | |
2334 min = wide_int_to_tree (expr_type, tmin); | |
2335 max = wide_int_to_tree (expr_type, tmax); | |
2336 } | |
2337 else | |
2338 { | |
2339 /* Other underflow and/or overflow, drop to VR_VARYING. */ | |
2340 set_value_range_to_varying (vr); | |
2341 return; | |
2342 } | |
2343 } | 1688 } |
2344 else | 1689 |
2345 { | 1690 /* Build the symbolic bounds if needed. */ |
2346 /* If overflow does not wrap, saturate to the types min/max | 1691 adjust_symbolic_bound (min, code, expr_type, |
2347 value. */ | 1692 sym_min_op0, sym_min_op1, |
2348 if (min_ovf == -1) | 1693 neg_min_op0, neg_min_op1); |
2349 min = wide_int_to_tree (expr_type, type_min); | 1694 adjust_symbolic_bound (max, code, expr_type, |
2350 else if (min_ovf == 1) | 1695 sym_max_op0, sym_max_op1, |
2351 min = wide_int_to_tree (expr_type, type_max); | 1696 neg_max_op0, neg_max_op1); |
2352 else | |
2353 min = wide_int_to_tree (expr_type, wmin); | |
2354 | |
2355 if (max_ovf == -1) | |
2356 max = wide_int_to_tree (expr_type, type_min); | |
2357 else if (max_ovf == 1) | |
2358 max = wide_int_to_tree (expr_type, type_max); | |
2359 else | |
2360 max = wide_int_to_tree (expr_type, wmax); | |
2361 } | |
2362 | |
2363 /* If the result lower bound is constant, we're done; | |
2364 otherwise, build the symbolic lower bound. */ | |
2365 if (sym_min_op0 == sym_min_op1) | |
2366 ; | |
2367 else if (sym_min_op0) | |
2368 min = build_symbolic_expr (expr_type, sym_min_op0, | |
2369 neg_min_op0, min); | |
2370 else if (sym_min_op1) | |
2371 { | |
2372 /* We may not negate if that might introduce | |
2373 undefined overflow. */ | |
2374 if (! minus_p | |
2375 || neg_min_op1 | |
2376 || TYPE_OVERFLOW_WRAPS (expr_type)) | |
2377 min = build_symbolic_expr (expr_type, sym_min_op1, | |
2378 neg_min_op1 ^ minus_p, min); | |
2379 else | |
2380 min = NULL_TREE; | |
2381 } | |
2382 | |
2383 /* Likewise for the upper bound. */ | |
2384 if (sym_max_op0 == sym_max_op1) | |
2385 ; | |
2386 else if (sym_max_op0) | |
2387 max = build_symbolic_expr (expr_type, sym_max_op0, | |
2388 neg_max_op0, max); | |
2389 else if (sym_max_op1) | |
2390 { | |
2391 /* We may not negate if that might introduce | |
2392 undefined overflow. */ | |
2393 if (! minus_p | |
2394 || neg_max_op1 | |
2395 || TYPE_OVERFLOW_WRAPS (expr_type)) | |
2396 max = build_symbolic_expr (expr_type, sym_max_op1, | |
2397 neg_max_op1 ^ minus_p, max); | |
2398 else | |
2399 max = NULL_TREE; | |
2400 } | |
2401 } | 1697 } |
2402 else | 1698 else |
2403 { | 1699 { |
2404 /* For other cases, for example if we have a PLUS_EXPR with two | 1700 /* For other cases, for example if we have a PLUS_EXPR with two |
2405 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort | 1701 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort |
2418 } | 1714 } |
2419 } | 1715 } |
2420 else if (code == MIN_EXPR | 1716 else if (code == MIN_EXPR |
2421 || code == MAX_EXPR) | 1717 || code == MAX_EXPR) |
2422 { | 1718 { |
2423 if (vr0.type == VR_RANGE | 1719 wide_int wmin, wmax; |
2424 && !symbolic_range_p (&vr0)) | 1720 wide_int vr0_min, vr0_max; |
2425 { | 1721 wide_int vr1_min, vr1_max; |
2426 type = VR_RANGE; | 1722 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
2427 if (vr1.type == VR_RANGE | 1723 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); |
2428 && !symbolic_range_p (&vr1)) | 1724 if (wide_int_range_min_max (wmin, wmax, code, sign, prec, |
2429 { | 1725 vr0_min, vr0_max, vr1_min, vr1_max)) |
2430 /* For operations that make the resulting range directly | 1726 vr->update (VR_RANGE, wide_int_to_tree (expr_type, wmin), |
2431 proportional to the original ranges, apply the operation to | 1727 wide_int_to_tree (expr_type, wmax)); |
2432 the same end of each range. */ | |
2433 min = int_const_binop (code, vr0.min, vr1.min); | |
2434 max = int_const_binop (code, vr0.max, vr1.max); | |
2435 } | |
2436 else if (code == MIN_EXPR) | |
2437 { | |
2438 min = vrp_val_min (expr_type); | |
2439 max = vr0.max; | |
2440 } | |
2441 else if (code == MAX_EXPR) | |
2442 { | |
2443 min = vr0.min; | |
2444 max = vrp_val_max (expr_type); | |
2445 } | |
2446 } | |
2447 else if (vr1.type == VR_RANGE | |
2448 && !symbolic_range_p (&vr1)) | |
2449 { | |
2450 type = VR_RANGE; | |
2451 if (code == MIN_EXPR) | |
2452 { | |
2453 min = vrp_val_min (expr_type); | |
2454 max = vr1.max; | |
2455 } | |
2456 else if (code == MAX_EXPR) | |
2457 { | |
2458 min = vr1.min; | |
2459 max = vrp_val_max (expr_type); | |
2460 } | |
2461 } | |
2462 else | 1728 else |
1729 set_value_range_to_varying (vr); | |
1730 return; | |
1731 } | |
1732 else if (code == MULT_EXPR) | |
1733 { | |
1734 if (!range_int_cst_p (&vr0) | |
1735 || !range_int_cst_p (&vr1)) | |
2463 { | 1736 { |
2464 set_value_range_to_varying (vr); | 1737 set_value_range_to_varying (vr); |
2465 return; | 1738 return; |
2466 } | 1739 } |
2467 } | 1740 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1); |
2468 else if (code == MULT_EXPR) | |
2469 { | |
2470 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not | |
2471 drop to varying. This test requires 2*prec bits if both | |
2472 operands are signed and 2*prec + 2 bits if either is not. */ | |
2473 | |
2474 signop sign = TYPE_SIGN (expr_type); | |
2475 unsigned int prec = TYPE_PRECISION (expr_type); | |
2476 | |
2477 if (!range_int_cst_p (&vr0) | |
2478 || !range_int_cst_p (&vr1)) | |
2479 { | |
2480 set_value_range_to_varying (vr); | |
2481 return; | |
2482 } | |
2483 | |
2484 if (TYPE_OVERFLOW_WRAPS (expr_type)) | |
2485 { | |
2486 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; | |
2487 typedef generic_wide_int | |
2488 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; | |
2489 vrp_int sizem1 = wi::mask <vrp_int> (prec, false); | |
2490 vrp_int size = sizem1 + 1; | |
2491 | |
2492 /* Extend the values using the sign of the result to PREC2. | |
2493 From here on out, everthing is just signed math no matter | |
2494 what the input types were. */ | |
2495 vrp_int min0 = vrp_int_cst (vr0.min); | |
2496 vrp_int max0 = vrp_int_cst (vr0.max); | |
2497 vrp_int min1 = vrp_int_cst (vr1.min); | |
2498 vrp_int max1 = vrp_int_cst (vr1.max); | |
2499 /* Canonicalize the intervals. */ | |
2500 if (sign == UNSIGNED) | |
2501 { | |
2502 if (wi::ltu_p (size, min0 + max0)) | |
2503 { | |
2504 min0 -= size; | |
2505 max0 -= size; | |
2506 } | |
2507 | |
2508 if (wi::ltu_p (size, min1 + max1)) | |
2509 { | |
2510 min1 -= size; | |
2511 max1 -= size; | |
2512 } | |
2513 } | |
2514 | |
2515 vrp_int prod0 = min0 * min1; | |
2516 vrp_int prod1 = min0 * max1; | |
2517 vrp_int prod2 = max0 * min1; | |
2518 vrp_int prod3 = max0 * max1; | |
2519 | |
2520 /* Sort the 4 products so that min is in prod0 and max is in | |
2521 prod3. */ | |
2522 /* min0min1 > max0max1 */ | |
2523 if (prod0 > prod3) | |
2524 std::swap (prod0, prod3); | |
2525 | |
2526 /* min0max1 > max0min1 */ | |
2527 if (prod1 > prod2) | |
2528 std::swap (prod1, prod2); | |
2529 | |
2530 if (prod0 > prod1) | |
2531 std::swap (prod0, prod1); | |
2532 | |
2533 if (prod2 > prod3) | |
2534 std::swap (prod2, prod3); | |
2535 | |
2536 /* diff = max - min. */ | |
2537 prod2 = prod3 - prod0; | |
2538 if (wi::geu_p (prod2, sizem1)) | |
2539 { | |
2540 /* the range covers all values. */ | |
2541 set_value_range_to_varying (vr); | |
2542 return; | |
2543 } | |
2544 | |
2545 /* The following should handle the wrapping and selecting | |
2546 VR_ANTI_RANGE for us. */ | |
2547 min = wide_int_to_tree (expr_type, prod0); | |
2548 max = wide_int_to_tree (expr_type, prod3); | |
2549 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
2550 return; | |
2551 } | |
2552 | |
2553 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, | |
2554 drop to VR_VARYING. It would take more effort to compute a | |
2555 precise range for such a case. For example, if we have | |
2556 op0 == 65536 and op1 == 65536 with their ranges both being | |
2557 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so | |
2558 we cannot claim that the product is in ~[0,0]. Note that we | |
2559 are guaranteed to have vr0.type == vr1.type at this | |
2560 point. */ | |
2561 if (vr0.type == VR_ANTI_RANGE | |
2562 && !TYPE_OVERFLOW_UNDEFINED (expr_type)) | |
2563 { | |
2564 set_value_range_to_varying (vr); | |
2565 return; | |
2566 } | |
2567 | |
2568 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2569 return; | 1741 return; |
2570 } | 1742 } |
2571 else if (code == RSHIFT_EXPR | 1743 else if (code == RSHIFT_EXPR |
2572 || code == LSHIFT_EXPR) | 1744 || code == LSHIFT_EXPR) |
2573 { | 1745 { |
2574 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], | |
2575 then drop to VR_VARYING. Outside of this range we get undefined | |
2576 behavior from the shift operation. We cannot even trust | |
2577 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl | |
2578 shifts, and the operation at the tree level may be widened. */ | |
2579 if (range_int_cst_p (&vr1) | 1746 if (range_int_cst_p (&vr1) |
2580 && compare_tree_int (vr1.min, 0) >= 0 | 1747 && !wide_int_range_shift_undefined_p |
2581 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) | 1748 (TYPE_SIGN (TREE_TYPE (vr1.min ())), |
1749 prec, | |
1750 wi::to_wide (vr1.min ()), | |
1751 wi::to_wide (vr1.max ()))) | |
2582 { | 1752 { |
2583 if (code == RSHIFT_EXPR) | 1753 if (code == RSHIFT_EXPR) |
2584 { | 1754 { |
2585 /* Even if vr0 is VARYING or otherwise not usable, we can derive | 1755 /* Even if vr0 is VARYING or otherwise not usable, we can derive |
2586 useful ranges just from the shift count. E.g. | 1756 useful ranges just from the shift count. E.g. |
2587 x >> 63 for signed 64-bit x is always [-1, 0]. */ | 1757 x >> 63 for signed 64-bit x is always [-1, 0]. */ |
2588 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | 1758 if (vr0.kind () != VR_RANGE || vr0.symbolic_p ()) |
2589 { | 1759 vr0.update (VR_RANGE, |
2590 vr0.type = type = VR_RANGE; | 1760 vrp_val_min (expr_type), |
2591 vr0.min = vrp_val_min (expr_type); | 1761 vrp_val_max (expr_type)); |
2592 vr0.max = vrp_val_max (expr_type); | 1762 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1); |
2593 } | |
2594 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2595 return; | |
2596 } | |
2597 /* We can map lshifts by constants to MULT_EXPR handling. */ | |
2598 else if (code == LSHIFT_EXPR | |
2599 && range_int_cst_singleton_p (&vr1)) | |
2600 { | |
2601 bool saved_flag_wrapv; | |
2602 value_range vr1p = VR_INITIALIZER; | |
2603 vr1p.type = VR_RANGE; | |
2604 vr1p.min = (wide_int_to_tree | |
2605 (expr_type, | |
2606 wi::set_bit_in_zero (tree_to_shwi (vr1.min), | |
2607 TYPE_PRECISION (expr_type)))); | |
2608 vr1p.max = vr1p.min; | |
2609 /* We have to use a wrapping multiply though as signed overflow | |
2610 on lshifts is implementation defined in C89. */ | |
2611 saved_flag_wrapv = flag_wrapv; | |
2612 flag_wrapv = 1; | |
2613 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, | |
2614 &vr0, &vr1p); | |
2615 flag_wrapv = saved_flag_wrapv; | |
2616 return; | 1763 return; |
2617 } | 1764 } |
2618 else if (code == LSHIFT_EXPR | 1765 else if (code == LSHIFT_EXPR |
2619 && range_int_cst_p (&vr0)) | 1766 && range_int_cst_p (&vr0)) |
2620 { | 1767 { |
2621 int prec = TYPE_PRECISION (expr_type); | 1768 wide_int res_lb, res_ub; |
2622 int overflow_pos = prec; | 1769 if (wide_int_range_lshift (res_lb, res_ub, sign, prec, |
2623 int bound_shift; | 1770 wi::to_wide (vr0.min ()), |
2624 wide_int low_bound, high_bound; | 1771 wi::to_wide (vr0.max ()), |
2625 bool uns = TYPE_UNSIGNED (expr_type); | 1772 wi::to_wide (vr1.min ()), |
2626 bool in_bounds = false; | 1773 wi::to_wide (vr1.max ()), |
2627 | 1774 TYPE_OVERFLOW_UNDEFINED (expr_type))) |
2628 if (!uns) | |
2629 overflow_pos -= 1; | |
2630 | |
2631 bound_shift = overflow_pos - tree_to_shwi (vr1.max); | |
2632 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can | |
2633 overflow. However, for that to happen, vr1.max needs to be | |
2634 zero, which means vr1 is a singleton range of zero, which | |
2635 means it should be handled by the previous LSHIFT_EXPR | |
2636 if-clause. */ | |
2637 wide_int bound = wi::set_bit_in_zero (bound_shift, prec); | |
2638 wide_int complement = ~(bound - 1); | |
2639 | |
2640 if (uns) | |
2641 { | 1775 { |
2642 low_bound = bound; | 1776 min = wide_int_to_tree (expr_type, res_lb); |
2643 high_bound = complement; | 1777 max = wide_int_to_tree (expr_type, res_ub); |
2644 if (wi::ltu_p (wi::to_wide (vr0.max), low_bound)) | 1778 vr->set_and_canonicalize (VR_RANGE, min, max, NULL); |
2645 { | |
2646 /* [5, 6] << [1, 2] == [10, 24]. */ | |
2647 /* We're shifting out only zeroes, the value increases | |
2648 monotonically. */ | |
2649 in_bounds = true; | |
2650 } | |
2651 else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min))) | |
2652 { | |
2653 /* [0xffffff00, 0xffffffff] << [1, 2] | |
2654 == [0xfffffc00, 0xfffffffe]. */ | |
2655 /* We're shifting out only ones, the value decreases | |
2656 monotonically. */ | |
2657 in_bounds = true; | |
2658 } | |
2659 } | |
2660 else | |
2661 { | |
2662 /* [-1, 1] << [1, 2] == [-4, 4]. */ | |
2663 low_bound = complement; | |
2664 high_bound = bound; | |
2665 if (wi::lts_p (wi::to_wide (vr0.max), high_bound) | |
2666 && wi::lts_p (low_bound, wi::to_wide (vr0.min))) | |
2667 { | |
2668 /* For non-negative numbers, we're shifting out only | |
2669 zeroes, the value increases monotonically. | |
2670 For negative numbers, we're shifting out only ones, the | |
2671 value decreases monotomically. */ | |
2672 in_bounds = true; | |
2673 } | |
2674 } | |
2675 | |
2676 if (in_bounds) | |
2677 { | |
2678 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2679 return; | 1779 return; |
2680 } | 1780 } |
2681 } | 1781 } |
2682 } | 1782 } |
2683 set_value_range_to_varying (vr); | 1783 set_value_range_to_varying (vr); |
2687 || code == FLOOR_DIV_EXPR | 1787 || code == FLOOR_DIV_EXPR |
2688 || code == CEIL_DIV_EXPR | 1788 || code == CEIL_DIV_EXPR |
2689 || code == EXACT_DIV_EXPR | 1789 || code == EXACT_DIV_EXPR |
2690 || code == ROUND_DIV_EXPR) | 1790 || code == ROUND_DIV_EXPR) |
2691 { | 1791 { |
2692 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | 1792 wide_int dividend_min, dividend_max, divisor_min, divisor_max; |
2693 { | 1793 wide_int wmin, wmax, extra_min, extra_max; |
2694 /* For division, if op1 has VR_RANGE but op0 does not, something | 1794 bool extra_range_p; |
2695 can be deduced just from that range. Say [min, max] / [4, max] | 1795 |
2696 gives [min / 4, max / 4] range. */ | 1796 /* Special case explicit division by zero as undefined. */ |
2697 if (vr1.type == VR_RANGE | 1797 if (range_is_null (&vr1)) |
2698 && !symbolic_range_p (&vr1) | 1798 { |
2699 && range_includes_zero_p (vr1.min, vr1.max) == 0) | 1799 set_value_range_to_undefined (vr); |
1800 return; | |
1801 } | |
1802 | |
1803 /* First, normalize ranges into constants we can handle. Note | |
1804 that VR_ANTI_RANGE's of constants were already normalized | |
1805 before arriving here. | |
1806 | |
1807 NOTE: As a future improvement, we may be able to do better | |
1808 with mixed symbolic (anti-)ranges like [0, A]. See note in | |
1809 ranges_from_anti_range. */ | |
1810 extract_range_into_wide_ints (&vr0, sign, prec, | |
1811 dividend_min, dividend_max); | |
1812 extract_range_into_wide_ints (&vr1, sign, prec, | |
1813 divisor_min, divisor_max); | |
1814 if (!wide_int_range_div (wmin, wmax, code, sign, prec, | |
1815 dividend_min, dividend_max, | |
1816 divisor_min, divisor_max, | |
1817 TYPE_OVERFLOW_UNDEFINED (expr_type), | |
1818 extra_range_p, extra_min, extra_max)) | |
1819 { | |
1820 set_value_range_to_varying (vr); | |
1821 return; | |
1822 } | |
1823 set_value_range (vr, VR_RANGE, | |
1824 wide_int_to_tree (expr_type, wmin), | |
1825 wide_int_to_tree (expr_type, wmax), NULL); | |
1826 if (extra_range_p) | |
1827 { | |
1828 value_range extra_range; | |
1829 set_value_range (&extra_range, VR_RANGE, | |
1830 wide_int_to_tree (expr_type, extra_min), | |
1831 wide_int_to_tree (expr_type, extra_max), NULL); | |
1832 vr->union_ (&extra_range); | |
1833 } | |
1834 return; | |
1835 } | |
1836 else if (code == TRUNC_MOD_EXPR) | |
1837 { | |
1838 if (range_is_null (&vr1)) | |
1839 { | |
1840 set_value_range_to_undefined (vr); | |
1841 return; | |
1842 } | |
1843 wide_int wmin, wmax, tmp; | |
1844 wide_int vr0_min, vr0_max, vr1_min, vr1_max; | |
1845 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); | |
1846 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); | |
1847 wide_int_range_trunc_mod (wmin, wmax, sign, prec, | |
1848 vr0_min, vr0_max, vr1_min, vr1_max); | |
1849 min = wide_int_to_tree (expr_type, wmin); | |
1850 max = wide_int_to_tree (expr_type, wmax); | |
1851 set_value_range (vr, VR_RANGE, min, max, NULL); | |
1852 return; | |
1853 } | |
1854 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) | |
1855 { | |
1856 wide_int may_be_nonzero0, may_be_nonzero1; | |
1857 wide_int must_be_nonzero0, must_be_nonzero1; | |
1858 wide_int wmin, wmax; | |
1859 wide_int vr0_min, vr0_max, vr1_min, vr1_max; | |
1860 vrp_set_zero_nonzero_bits (expr_type, &vr0, | |
1861 &may_be_nonzero0, &must_be_nonzero0); | |
1862 vrp_set_zero_nonzero_bits (expr_type, &vr1, | |
1863 &may_be_nonzero1, &must_be_nonzero1); | |
1864 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); | |
1865 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max); | |
1866 if (code == BIT_AND_EXPR) | |
1867 { | |
1868 if (wide_int_range_bit_and (wmin, wmax, sign, prec, | |
1869 vr0_min, vr0_max, | |
1870 vr1_min, vr1_max, | |
1871 must_be_nonzero0, | |
1872 may_be_nonzero0, | |
1873 must_be_nonzero1, | |
1874 may_be_nonzero1)) | |
2700 { | 1875 { |
2701 vr0.type = type = VR_RANGE; | 1876 min = wide_int_to_tree (expr_type, wmin); |
2702 vr0.min = vrp_val_min (expr_type); | 1877 max = wide_int_to_tree (expr_type, wmax); |
2703 vr0.max = vrp_val_max (expr_type); | 1878 set_value_range (vr, VR_RANGE, min, max, NULL); |
2704 } | 1879 } |
2705 else | 1880 else |
1881 set_value_range_to_varying (vr); | |
1882 return; | |
1883 } | |
1884 else if (code == BIT_IOR_EXPR) | |
1885 { | |
1886 if (wide_int_range_bit_ior (wmin, wmax, sign, | |
1887 vr0_min, vr0_max, | |
1888 vr1_min, vr1_max, | |
1889 must_be_nonzero0, | |
1890 may_be_nonzero0, | |
1891 must_be_nonzero1, | |
1892 may_be_nonzero1)) | |
2706 { | 1893 { |
2707 set_value_range_to_varying (vr); | 1894 min = wide_int_to_tree (expr_type, wmin); |
2708 return; | 1895 max = wide_int_to_tree (expr_type, wmax); |
2709 } | 1896 set_value_range (vr, VR_RANGE, min, max, NULL); |
2710 } | |
2711 | |
2712 /* For divisions, if flag_non_call_exceptions is true, we must | |
2713 not eliminate a division by zero. */ | |
2714 if (cfun->can_throw_non_call_exceptions | |
2715 && (vr1.type != VR_RANGE | |
2716 || range_includes_zero_p (vr1.min, vr1.max) != 0)) | |
2717 { | |
2718 set_value_range_to_varying (vr); | |
2719 return; | |
2720 } | |
2721 | |
2722 /* For divisions, if op0 is VR_RANGE, we can deduce a range | |
2723 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can | |
2724 include 0. */ | |
2725 if (vr0.type == VR_RANGE | |
2726 && (vr1.type != VR_RANGE | |
2727 || range_includes_zero_p (vr1.min, vr1.max) != 0)) | |
2728 { | |
2729 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); | |
2730 int cmp; | |
2731 | |
2732 min = NULL_TREE; | |
2733 max = NULL_TREE; | |
2734 if (TYPE_UNSIGNED (expr_type) | |
2735 || value_range_nonnegative_p (&vr1)) | |
2736 { | |
2737 /* For unsigned division or when divisor is known | |
2738 to be non-negative, the range has to cover | |
2739 all numbers from 0 to max for positive max | |
2740 and all numbers from min to 0 for negative min. */ | |
2741 cmp = compare_values (vr0.max, zero); | |
2742 if (cmp == -1) | |
2743 { | |
2744 /* When vr0.max < 0, vr1.min != 0 and value | |
2745 ranges for dividend and divisor are available. */ | |
2746 if (vr1.type == VR_RANGE | |
2747 && !symbolic_range_p (&vr0) | |
2748 && !symbolic_range_p (&vr1) | |
2749 && compare_values (vr1.min, zero) != 0) | |
2750 max = int_const_binop (code, vr0.max, vr1.min); | |
2751 else | |
2752 max = zero; | |
2753 } | |
2754 else if (cmp == 0 || cmp == 1) | |
2755 max = vr0.max; | |
2756 else | |
2757 type = VR_VARYING; | |
2758 cmp = compare_values (vr0.min, zero); | |
2759 if (cmp == 1) | |
2760 { | |
2761 /* For unsigned division when value ranges for dividend | |
2762 and divisor are available. */ | |
2763 if (vr1.type == VR_RANGE | |
2764 && !symbolic_range_p (&vr0) | |
2765 && !symbolic_range_p (&vr1) | |
2766 && compare_values (vr1.max, zero) != 0) | |
2767 min = int_const_binop (code, vr0.min, vr1.max); | |
2768 else | |
2769 min = zero; | |
2770 } | |
2771 else if (cmp == 0 || cmp == -1) | |
2772 min = vr0.min; | |
2773 else | |
2774 type = VR_VARYING; | |
2775 } | 1897 } |
2776 else | 1898 else |
1899 set_value_range_to_varying (vr); | |
1900 return; | |
1901 } | |
1902 else if (code == BIT_XOR_EXPR) | |
1903 { | |
1904 if (wide_int_range_bit_xor (wmin, wmax, sign, prec, | |
1905 must_be_nonzero0, | |
1906 may_be_nonzero0, | |
1907 must_be_nonzero1, | |
1908 may_be_nonzero1)) | |
2777 { | 1909 { |
2778 /* Otherwise the range is -max .. max or min .. -min | 1910 min = wide_int_to_tree (expr_type, wmin); |
2779 depending on which bound is bigger in absolute value, | 1911 max = wide_int_to_tree (expr_type, wmax); |
2780 as the division can change the sign. */ | 1912 set_value_range (vr, VR_RANGE, min, max, NULL); |
2781 abs_extent_range (vr, vr0.min, vr0.max); | |
2782 return; | |
2783 } | 1913 } |
2784 if (type == VR_VARYING) | 1914 else |
2785 { | 1915 set_value_range_to_varying (vr); |
2786 set_value_range_to_varying (vr); | |
2787 return; | |
2788 } | |
2789 } | |
2790 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1)) | |
2791 { | |
2792 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2793 return; | 1916 return; |
2794 } | |
2795 } | |
2796 else if (code == TRUNC_MOD_EXPR) | |
2797 { | |
2798 if (range_is_null (&vr1)) | |
2799 { | |
2800 set_value_range_to_undefined (vr); | |
2801 return; | |
2802 } | |
2803 /* ABS (A % B) < ABS (B) and either | |
2804 0 <= A % B <= A or A <= A % B <= 0. */ | |
2805 type = VR_RANGE; | |
2806 signop sgn = TYPE_SIGN (expr_type); | |
2807 unsigned int prec = TYPE_PRECISION (expr_type); | |
2808 wide_int wmin, wmax, tmp; | |
2809 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) | |
2810 { | |
2811 wmax = wi::to_wide (vr1.max) - 1; | |
2812 if (sgn == SIGNED) | |
2813 { | |
2814 tmp = -1 - wi::to_wide (vr1.min); | |
2815 wmax = wi::smax (wmax, tmp); | |
2816 } | |
2817 } | |
2818 else | |
2819 { | |
2820 wmax = wi::max_value (prec, sgn); | |
2821 /* X % INT_MIN may be INT_MAX. */ | |
2822 if (sgn == UNSIGNED) | |
2823 wmax = wmax - 1; | |
2824 } | |
2825 | |
2826 if (sgn == UNSIGNED) | |
2827 wmin = wi::zero (prec); | |
2828 else | |
2829 { | |
2830 wmin = -wmax; | |
2831 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST) | |
2832 { | |
2833 tmp = wi::to_wide (vr0.min); | |
2834 if (wi::gts_p (tmp, 0)) | |
2835 tmp = wi::zero (prec); | |
2836 wmin = wi::smax (wmin, tmp); | |
2837 } | |
2838 } | |
2839 | |
2840 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST) | |
2841 { | |
2842 tmp = wi::to_wide (vr0.max); | |
2843 if (sgn == SIGNED && wi::neg_p (tmp)) | |
2844 tmp = wi::zero (prec); | |
2845 wmax = wi::min (wmax, tmp, sgn); | |
2846 } | |
2847 | |
2848 min = wide_int_to_tree (expr_type, wmin); | |
2849 max = wide_int_to_tree (expr_type, wmax); | |
2850 } | |
2851 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) | |
2852 { | |
2853 bool int_cst_range0, int_cst_range1; | |
2854 wide_int may_be_nonzero0, may_be_nonzero1; | |
2855 wide_int must_be_nonzero0, must_be_nonzero1; | |
2856 | |
2857 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, | |
2858 &may_be_nonzero0, | |
2859 &must_be_nonzero0); | |
2860 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, | |
2861 &may_be_nonzero1, | |
2862 &must_be_nonzero1); | |
2863 | |
2864 if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR) | |
2865 { | |
2866 value_range *vr0p = NULL, *vr1p = NULL; | |
2867 if (range_int_cst_singleton_p (&vr1)) | |
2868 { | |
2869 vr0p = &vr0; | |
2870 vr1p = &vr1; | |
2871 } | |
2872 else if (range_int_cst_singleton_p (&vr0)) | |
2873 { | |
2874 vr0p = &vr1; | |
2875 vr1p = &vr0; | |
2876 } | |
2877 /* For op & or | attempt to optimize: | |
2878 [x, y] op z into [x op z, y op z] | |
2879 if z is a constant which (for op | its bitwise not) has n | |
2880 consecutive least significant bits cleared followed by m 1 | |
2881 consecutive bits set immediately above it and either | |
2882 m + n == precision, or (x >> (m + n)) == (y >> (m + n)). | |
2883 The least significant n bits of all the values in the range are | |
2884 cleared or set, the m bits above it are preserved and any bits | |
2885 above these are required to be the same for all values in the | |
2886 range. */ | |
2887 if (vr0p && range_int_cst_p (vr0p)) | |
2888 { | |
2889 wide_int w = wi::to_wide (vr1p->min); | |
2890 int m = 0, n = 0; | |
2891 if (code == BIT_IOR_EXPR) | |
2892 w = ~w; | |
2893 if (wi::eq_p (w, 0)) | |
2894 n = TYPE_PRECISION (expr_type); | |
2895 else | |
2896 { | |
2897 n = wi::ctz (w); | |
2898 w = ~(w | wi::mask (n, false, w.get_precision ())); | |
2899 if (wi::eq_p (w, 0)) | |
2900 m = TYPE_PRECISION (expr_type) - n; | |
2901 else | |
2902 m = wi::ctz (w) - n; | |
2903 } | |
2904 wide_int mask = wi::mask (m + n, true, w.get_precision ()); | |
2905 if ((mask & wi::to_wide (vr0p->min)) | |
2906 == (mask & wi::to_wide (vr0p->max))) | |
2907 { | |
2908 min = int_const_binop (code, vr0p->min, vr1p->min); | |
2909 max = int_const_binop (code, vr0p->max, vr1p->min); | |
2910 } | |
2911 } | |
2912 } | |
2913 | |
2914 type = VR_RANGE; | |
2915 if (min && max) | |
2916 /* Optimized above already. */; | |
2917 else if (code == BIT_AND_EXPR) | |
2918 { | |
2919 min = wide_int_to_tree (expr_type, | |
2920 must_be_nonzero0 & must_be_nonzero1); | |
2921 wide_int wmax = may_be_nonzero0 & may_be_nonzero1; | |
2922 /* If both input ranges contain only negative values we can | |
2923 truncate the result range maximum to the minimum of the | |
2924 input range maxima. */ | |
2925 if (int_cst_range0 && int_cst_range1 | |
2926 && tree_int_cst_sgn (vr0.max) < 0 | |
2927 && tree_int_cst_sgn (vr1.max) < 0) | |
2928 { | |
2929 wmax = wi::min (wmax, wi::to_wide (vr0.max), | |
2930 TYPE_SIGN (expr_type)); | |
2931 wmax = wi::min (wmax, wi::to_wide (vr1.max), | |
2932 TYPE_SIGN (expr_type)); | |
2933 } | |
2934 /* If either input range contains only non-negative values | |
2935 we can truncate the result range maximum to the respective | |
2936 maximum of the input range. */ | |
2937 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) | |
2938 wmax = wi::min (wmax, wi::to_wide (vr0.max), | |
2939 TYPE_SIGN (expr_type)); | |
2940 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) | |
2941 wmax = wi::min (wmax, wi::to_wide (vr1.max), | |
2942 TYPE_SIGN (expr_type)); | |
2943 max = wide_int_to_tree (expr_type, wmax); | |
2944 cmp = compare_values (min, max); | |
2945 /* PR68217: In case of signed & sign-bit-CST should | |
2946 result in [-INF, 0] instead of [-INF, INF]. */ | |
2947 if (cmp == -2 || cmp == 1) | |
2948 { | |
2949 wide_int sign_bit | |
2950 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1, | |
2951 TYPE_PRECISION (expr_type)); | |
2952 if (!TYPE_UNSIGNED (expr_type) | |
2953 && ((int_cst_range0 | |
2954 && value_range_constant_singleton (&vr0) | |
2955 && !wi::cmps (wi::to_wide (vr0.min), sign_bit)) | |
2956 || (int_cst_range1 | |
2957 && value_range_constant_singleton (&vr1) | |
2958 && !wi::cmps (wi::to_wide (vr1.min), sign_bit)))) | |
2959 { | |
2960 min = TYPE_MIN_VALUE (expr_type); | |
2961 max = build_int_cst (expr_type, 0); | |
2962 } | |
2963 } | |
2964 } | |
2965 else if (code == BIT_IOR_EXPR) | |
2966 { | |
2967 max = wide_int_to_tree (expr_type, | |
2968 may_be_nonzero0 | may_be_nonzero1); | |
2969 wide_int wmin = must_be_nonzero0 | must_be_nonzero1; | |
2970 /* If the input ranges contain only positive values we can | |
2971 truncate the minimum of the result range to the maximum | |
2972 of the input range minima. */ | |
2973 if (int_cst_range0 && int_cst_range1 | |
2974 && tree_int_cst_sgn (vr0.min) >= 0 | |
2975 && tree_int_cst_sgn (vr1.min) >= 0) | |
2976 { | |
2977 wmin = wi::max (wmin, wi::to_wide (vr0.min), | |
2978 TYPE_SIGN (expr_type)); | |
2979 wmin = wi::max (wmin, wi::to_wide (vr1.min), | |
2980 TYPE_SIGN (expr_type)); | |
2981 } | |
2982 /* If either input range contains only negative values | |
2983 we can truncate the minimum of the result range to the | |
2984 respective minimum range. */ | |
2985 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) | |
2986 wmin = wi::max (wmin, wi::to_wide (vr0.min), | |
2987 TYPE_SIGN (expr_type)); | |
2988 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) | |
2989 wmin = wi::max (wmin, wi::to_wide (vr1.min), | |
2990 TYPE_SIGN (expr_type)); | |
2991 min = wide_int_to_tree (expr_type, wmin); | |
2992 } | |
2993 else if (code == BIT_XOR_EXPR) | |
2994 { | |
2995 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) | |
2996 | ~(may_be_nonzero0 | may_be_nonzero1)); | |
2997 wide_int result_one_bits | |
2998 = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1) | |
2999 | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0)); | |
3000 max = wide_int_to_tree (expr_type, ~result_zero_bits); | |
3001 min = wide_int_to_tree (expr_type, result_one_bits); | |
3002 /* If the range has all positive or all negative values the | |
3003 result is better than VARYING. */ | |
3004 if (tree_int_cst_sgn (min) < 0 | |
3005 || tree_int_cst_sgn (max) >= 0) | |
3006 ; | |
3007 else | |
3008 max = min = NULL_TREE; | |
3009 } | 1917 } |
3010 } | 1918 } |
3011 else | 1919 else |
3012 gcc_unreachable (); | 1920 gcc_unreachable (); |
3013 | 1921 |
3041 } | 1949 } |
3042 else | 1950 else |
3043 set_value_range (vr, type, min, max, NULL); | 1951 set_value_range (vr, type, min, max, NULL); |
3044 } | 1952 } |
3045 | 1953 |
3046 /* Extract range information from a binary expression OP0 CODE OP1 based on | |
3047 the ranges of each of its operands with resulting type EXPR_TYPE. | |
3048 The resulting range is stored in *VR. */ | |
3049 | |
3050 static void | |
3051 extract_range_from_binary_expr (value_range *vr, | |
3052 enum tree_code code, | |
3053 tree expr_type, tree op0, tree op1) | |
3054 { | |
3055 value_range vr0 = VR_INITIALIZER; | |
3056 value_range vr1 = VR_INITIALIZER; | |
3057 | |
3058 /* Get value ranges for each operand. For constant operands, create | |
3059 a new value range with the operand to simplify processing. */ | |
3060 if (TREE_CODE (op0) == SSA_NAME) | |
3061 vr0 = *(get_value_range (op0)); | |
3062 else if (is_gimple_min_invariant (op0)) | |
3063 set_value_range_to_value (&vr0, op0, NULL); | |
3064 else | |
3065 set_value_range_to_varying (&vr0); | |
3066 | |
3067 if (TREE_CODE (op1) == SSA_NAME) | |
3068 vr1 = *(get_value_range (op1)); | |
3069 else if (is_gimple_min_invariant (op1)) | |
3070 set_value_range_to_value (&vr1, op1, NULL); | |
3071 else | |
3072 set_value_range_to_varying (&vr1); | |
3073 | |
3074 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); | |
3075 | |
3076 /* Try harder for PLUS and MINUS if the range of one operand is symbolic | |
3077 and based on the other operand, for example if it was deduced from a | |
3078 symbolic comparison. When a bound of the range of the first operand | |
3079 is invariant, we set the corresponding bound of the new range to INF | |
3080 in order to avoid recursing on the range of the second operand. */ | |
3081 if (vr->type == VR_VARYING | |
3082 && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3083 && TREE_CODE (op1) == SSA_NAME | |
3084 && vr0.type == VR_RANGE | |
3085 && symbolic_range_based_on_p (&vr0, op1)) | |
3086 { | |
3087 const bool minus_p = (code == MINUS_EXPR); | |
3088 value_range n_vr1 = VR_INITIALIZER; | |
3089 | |
3090 /* Try with VR0 and [-INF, OP1]. */ | |
3091 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min)) | |
3092 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL); | |
3093 | |
3094 /* Try with VR0 and [OP1, +INF]. */ | |
3095 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max)) | |
3096 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL); | |
3097 | |
3098 /* Try with VR0 and [OP1, OP1]. */ | |
3099 else | |
3100 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL); | |
3101 | |
3102 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1); | |
3103 } | |
3104 | |
3105 if (vr->type == VR_VARYING | |
3106 && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3107 && TREE_CODE (op0) == SSA_NAME | |
3108 && vr1.type == VR_RANGE | |
3109 && symbolic_range_based_on_p (&vr1, op0)) | |
3110 { | |
3111 const bool minus_p = (code == MINUS_EXPR); | |
3112 value_range n_vr0 = VR_INITIALIZER; | |
3113 | |
3114 /* Try with [-INF, OP0] and VR1. */ | |
3115 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min)) | |
3116 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL); | |
3117 | |
3118 /* Try with [OP0, +INF] and VR1. */ | |
3119 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max)) | |
3120 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL); | |
3121 | |
3122 /* Try with [OP0, OP0] and VR1. */ | |
3123 else | |
3124 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL); | |
3125 | |
3126 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); | |
3127 } | |
3128 | |
3129 /* If we didn't derive a range for MINUS_EXPR, and | |
3130 op1's range is ~[op0,op0] or vice-versa, then we | |
3131 can derive a non-null range. This happens often for | |
3132 pointer subtraction. */ | |
3133 if (vr->type == VR_VARYING | |
3134 && code == MINUS_EXPR | |
3135 && TREE_CODE (op0) == SSA_NAME | |
3136 && ((vr0.type == VR_ANTI_RANGE | |
3137 && vr0.min == op1 | |
3138 && vr0.min == vr0.max) | |
3139 || (vr1.type == VR_ANTI_RANGE | |
3140 && vr1.min == op0 | |
3141 && vr1.min == vr1.max))) | |
3142 set_value_range_to_nonnull (vr, TREE_TYPE (op0)); | |
3143 } | |
3144 | |
3145 /* Extract range information from a unary operation CODE based on | 1954 /* Extract range information from a unary operation CODE based on |
3146 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. | 1955 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. |
3147 The resulting range is stored in *VR. */ | 1956 The resulting range is stored in *VR. */ |
3148 | 1957 |
3149 void | 1958 void |
3150 extract_range_from_unary_expr (value_range *vr, | 1959 extract_range_from_unary_expr (value_range *vr, |
3151 enum tree_code code, tree type, | 1960 enum tree_code code, tree type, |
3152 value_range *vr0_, tree op0_type) | 1961 const value_range *vr0_, tree op0_type) |
3153 { | 1962 { |
3154 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; | 1963 signop sign = TYPE_SIGN (type); |
1964 unsigned int prec = TYPE_PRECISION (type); | |
1965 value_range vr0 = *vr0_; | |
1966 value_range vrtem0, vrtem1; | |
3155 | 1967 |
3156 /* VRP only operates on integral and pointer types. */ | 1968 /* VRP only operates on integral and pointer types. */ |
3157 if (!(INTEGRAL_TYPE_P (op0_type) | 1969 if (!(INTEGRAL_TYPE_P (op0_type) |
3158 || POINTER_TYPE_P (op0_type)) | 1970 || POINTER_TYPE_P (op0_type)) |
3159 || !(INTEGRAL_TYPE_P (type) | 1971 || !(INTEGRAL_TYPE_P (type) |
3162 set_value_range_to_varying (vr); | 1974 set_value_range_to_varying (vr); |
3163 return; | 1975 return; |
3164 } | 1976 } |
3165 | 1977 |
3166 /* If VR0 is UNDEFINED, so is the result. */ | 1978 /* If VR0 is UNDEFINED, so is the result. */ |
3167 if (vr0.type == VR_UNDEFINED) | 1979 if (vr0.undefined_p ()) |
3168 { | 1980 { |
3169 set_value_range_to_undefined (vr); | 1981 set_value_range_to_undefined (vr); |
3170 return; | 1982 return; |
3171 } | 1983 } |
3172 | 1984 |
3173 /* Handle operations that we express in terms of others. */ | 1985 /* Handle operations that we express in terms of others. */ |
3174 if (code == PAREN_EXPR || code == OBJ_TYPE_REF) | 1986 if (code == PAREN_EXPR || code == OBJ_TYPE_REF) |
3175 { | 1987 { |
3176 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ | 1988 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |
3177 copy_value_range (vr, &vr0); | 1989 vr->deep_copy (&vr0); |
3178 return; | 1990 return; |
3179 } | 1991 } |
3180 else if (code == NEGATE_EXPR) | 1992 else if (code == NEGATE_EXPR) |
3181 { | 1993 { |
3182 /* -X is simply 0 - X, so re-use existing code that also handles | 1994 /* -X is simply 0 - X, so re-use existing code that also handles |
3183 anti-ranges fine. */ | 1995 anti-ranges fine. */ |
3184 value_range zero = VR_INITIALIZER; | 1996 value_range zero; |
3185 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); | 1997 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); |
3186 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); | 1998 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); |
3187 return; | 1999 return; |
3188 } | 2000 } |
3189 else if (code == BIT_NOT_EXPR) | 2001 else if (code == BIT_NOT_EXPR) |
3190 { | 2002 { |
3191 /* ~X is simply -1 - X, so re-use existing code that also handles | 2003 /* ~X is simply -1 - X, so re-use existing code that also handles |
3192 anti-ranges fine. */ | 2004 anti-ranges fine. */ |
3193 value_range minusone = VR_INITIALIZER; | 2005 value_range minusone; |
3194 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); | 2006 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); |
3195 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, | 2007 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, |
3196 type, &minusone, &vr0); | 2008 type, &minusone, &vr0); |
3197 return; | 2009 return; |
3198 } | 2010 } |
3199 | 2011 |
3200 /* Now canonicalize anti-ranges to ranges when they are not symbolic | 2012 /* Now canonicalize anti-ranges to ranges when they are not symbolic |
3201 and express op ~[] as (op []') U (op []''). */ | 2013 and express op ~[] as (op []') U (op []''). */ |
3202 if (vr0.type == VR_ANTI_RANGE | 2014 if (vr0.kind () == VR_ANTI_RANGE |
3203 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | 2015 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |
3204 { | 2016 { |
3205 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type); | 2017 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type); |
3206 if (vrtem1.type != VR_UNDEFINED) | 2018 if (!vrtem1.undefined_p ()) |
3207 { | 2019 { |
3208 value_range vrres = VR_INITIALIZER; | 2020 value_range vrres; |
3209 extract_range_from_unary_expr (&vrres, code, type, | 2021 extract_range_from_unary_expr (&vrres, code, type, |
3210 &vrtem1, op0_type); | 2022 &vrtem1, op0_type); |
3211 vrp_meet (vr, &vrres); | 2023 vr->union_ (&vrres); |
3212 } | 2024 } |
3213 return; | 2025 return; |
3214 } | 2026 } |
3215 | 2027 |
3216 if (CONVERT_EXPR_CODE_P (code)) | 2028 if (CONVERT_EXPR_CODE_P (code)) |
3217 { | 2029 { |
3218 tree inner_type = op0_type; | 2030 tree inner_type = op0_type; |
3219 tree outer_type = type; | 2031 tree outer_type = type; |
3220 | 2032 |
3221 /* If the expression evaluates to a pointer, we are only interested in | 2033 /* If the expression involves a pointer, we are only interested in |
3222 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ | 2034 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). |
3223 if (POINTER_TYPE_P (type)) | 2035 |
3224 { | 2036 This may lose precision when converting (char *)~[0,2] to |
3225 if (range_is_nonnull (&vr0)) | 2037 int, because we'll forget that the pointer can also not be 1 |
2038 or 2. In practice we don't care, as this is some idiot | |
2039 storing a magic constant to a pointer. */ | |
2040 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type)) | |
2041 { | |
2042 if (!range_includes_zero_p (&vr0)) | |
3226 set_value_range_to_nonnull (vr, type); | 2043 set_value_range_to_nonnull (vr, type); |
3227 else if (range_is_null (&vr0)) | 2044 else if (range_is_null (&vr0)) |
3228 set_value_range_to_null (vr, type); | 2045 set_value_range_to_null (vr, type); |
3229 else | 2046 else |
3230 set_value_range_to_varying (vr); | 2047 set_value_range_to_varying (vr); |
3231 return; | 2048 return; |
3232 } | 2049 } |
3233 | 2050 |
3234 /* If VR0 is varying and we increase the type precision, assume | 2051 /* The POINTER_TYPE_P code above will have dealt with all |
3235 a full range for the following transformation. */ | 2052 pointer anti-ranges. Any remaining anti-ranges at this point |
3236 if (vr0.type == VR_VARYING | 2053 will be integer conversions from SSA names that will be |
3237 && INTEGRAL_TYPE_P (inner_type) | 2054 normalized into VARYING. For instance: ~[x_55, x_55]. */ |
3238 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) | 2055 gcc_assert (vr0.kind () != VR_ANTI_RANGE |
3239 { | 2056 || TREE_CODE (vr0.min ()) != INTEGER_CST); |
3240 vr0.type = VR_RANGE; | 2057 |
3241 vr0.min = TYPE_MIN_VALUE (inner_type); | 2058 /* NOTES: Previously we were returning VARYING for all symbolics, but |
3242 vr0.max = TYPE_MAX_VALUE (inner_type); | 2059 we can do better by treating them as [-MIN, +MAX]. For |
3243 } | 2060 example, converting [SYM, SYM] from INT to LONG UNSIGNED, |
3244 | 2061 we can return: ~[0x8000000, 0xffffffff7fffffff]. |
3245 /* If VR0 is a constant range or anti-range and the conversion is | 2062 |
3246 not truncating we can convert the min and max values and | 2063 We were also failing to convert ~[0,0] from char* to unsigned, |
3247 canonicalize the resulting range. Otherwise we can do the | 2064 instead choosing to return VR_VARYING. Now we return ~[0,0]. */ |
3248 conversion if the size of the range is less than what the | 2065 wide_int vr0_min, vr0_max, wmin, wmax; |
3249 precision of the target type can represent and the range is | 2066 signop inner_sign = TYPE_SIGN (inner_type); |
3250 not an anti-range. */ | 2067 signop outer_sign = TYPE_SIGN (outer_type); |
3251 if ((vr0.type == VR_RANGE | 2068 unsigned inner_prec = TYPE_PRECISION (inner_type); |
3252 || vr0.type == VR_ANTI_RANGE) | 2069 unsigned outer_prec = TYPE_PRECISION (outer_type); |
3253 && TREE_CODE (vr0.min) == INTEGER_CST | 2070 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec, |
3254 && TREE_CODE (vr0.max) == INTEGER_CST | 2071 vr0_min, vr0_max); |
3255 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) | 2072 if (wide_int_range_convert (wmin, wmax, |
3256 || (vr0.type == VR_RANGE | 2073 inner_sign, inner_prec, |
3257 && integer_zerop (int_const_binop (RSHIFT_EXPR, | 2074 outer_sign, outer_prec, |
3258 int_const_binop (MINUS_EXPR, vr0.max, vr0.min), | 2075 vr0_min, vr0_max)) |
3259 size_int (TYPE_PRECISION (outer_type))))))) | 2076 { |
3260 { | 2077 tree min = wide_int_to_tree (outer_type, wmin); |
3261 tree new_min, new_max; | 2078 tree max = wide_int_to_tree (outer_type, wmax); |
3262 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), | 2079 vr->set_and_canonicalize (VR_RANGE, min, max, NULL); |
3263 0, false); | 2080 } |
3264 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), | 2081 else |
3265 0, false); | 2082 set_value_range_to_varying (vr); |
3266 set_and_canonicalize_value_range (vr, vr0.type, | |
3267 new_min, new_max, NULL); | |
3268 return; | |
3269 } | |
3270 | |
3271 set_value_range_to_varying (vr); | |
3272 return; | 2083 return; |
3273 } | 2084 } |
3274 else if (code == ABS_EXPR) | 2085 else if (code == ABS_EXPR) |
3275 { | 2086 { |
3276 tree min, max; | 2087 wide_int wmin, wmax; |
3277 int cmp; | 2088 wide_int vr0_min, vr0_max; |
3278 | 2089 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max); |
3279 /* Pass through vr0 in the easy cases. */ | 2090 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max, |
3280 if (TYPE_UNSIGNED (type) | 2091 TYPE_OVERFLOW_UNDEFINED (type))) |
3281 || value_range_nonnegative_p (&vr0)) | 2092 set_value_range (vr, VR_RANGE, |
3282 { | 2093 wide_int_to_tree (type, wmin), |
3283 copy_value_range (vr, &vr0); | 2094 wide_int_to_tree (type, wmax), NULL); |
3284 return; | |
3285 } | |
3286 | |
3287 /* For the remaining varying or symbolic ranges we can't do anything | |
3288 useful. */ | |
3289 if (vr0.type == VR_VARYING | |
3290 || symbolic_range_p (&vr0)) | |
3291 { | |
3292 set_value_range_to_varying (vr); | |
3293 return; | |
3294 } | |
3295 | |
3296 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a | |
3297 useful range. */ | |
3298 if (!TYPE_OVERFLOW_UNDEFINED (type) | |
3299 && ((vr0.type == VR_RANGE | |
3300 && vrp_val_is_min (vr0.min)) | |
3301 || (vr0.type == VR_ANTI_RANGE | |
3302 && !vrp_val_is_min (vr0.min)))) | |
3303 { | |
3304 set_value_range_to_varying (vr); | |
3305 return; | |
3306 } | |
3307 | |
3308 /* ABS_EXPR may flip the range around, if the original range | |
3309 included negative values. */ | |
3310 if (!vrp_val_is_min (vr0.min)) | |
3311 min = fold_unary_to_constant (code, type, vr0.min); | |
3312 else | 2095 else |
3313 min = TYPE_MAX_VALUE (type); | 2096 set_value_range_to_varying (vr); |
3314 | |
3315 if (!vrp_val_is_min (vr0.max)) | |
3316 max = fold_unary_to_constant (code, type, vr0.max); | |
3317 else | |
3318 max = TYPE_MAX_VALUE (type); | |
3319 | |
3320 cmp = compare_values (min, max); | |
3321 | |
3322 /* If a VR_ANTI_RANGEs contains zero, then we have | |
3323 ~[-INF, min(MIN, MAX)]. */ | |
3324 if (vr0.type == VR_ANTI_RANGE) | |
3325 { | |
3326 if (range_includes_zero_p (vr0.min, vr0.max) == 1) | |
3327 { | |
3328 /* Take the lower of the two values. */ | |
3329 if (cmp != 1) | |
3330 max = min; | |
3331 | |
3332 /* Create ~[-INF, min (abs(MIN), abs(MAX))] | |
3333 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when | |
3334 flag_wrapv is set and the original anti-range doesn't include | |
3335 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ | |
3336 if (TYPE_OVERFLOW_WRAPS (type)) | |
3337 { | |
3338 tree type_min_value = TYPE_MIN_VALUE (type); | |
3339 | |
3340 min = (vr0.min != type_min_value | |
3341 ? int_const_binop (PLUS_EXPR, type_min_value, | |
3342 build_int_cst (TREE_TYPE (type_min_value), 1)) | |
3343 : type_min_value); | |
3344 } | |
3345 else | |
3346 min = TYPE_MIN_VALUE (type); | |
3347 } | |
3348 else | |
3349 { | |
3350 /* All else has failed, so create the range [0, INF], even for | |
3351 flag_wrapv since TYPE_MIN_VALUE is in the original | |
3352 anti-range. */ | |
3353 vr0.type = VR_RANGE; | |
3354 min = build_int_cst (type, 0); | |
3355 max = TYPE_MAX_VALUE (type); | |
3356 } | |
3357 } | |
3358 | |
3359 /* If the range contains zero then we know that the minimum value in the | |
3360 range will be zero. */ | |
3361 else if (range_includes_zero_p (vr0.min, vr0.max) == 1) | |
3362 { | |
3363 if (cmp == 1) | |
3364 max = min; | |
3365 min = build_int_cst (type, 0); | |
3366 } | |
3367 else | |
3368 { | |
3369 /* If the range was reversed, swap MIN and MAX. */ | |
3370 if (cmp == 1) | |
3371 std::swap (min, max); | |
3372 } | |
3373 | |
3374 cmp = compare_values (min, max); | |
3375 if (cmp == -2 || cmp == 1) | |
3376 { | |
3377 /* If the new range has its limits swapped around (MIN > MAX), | |
3378 then the operation caused one of them to wrap around, mark | |
3379 the new range VARYING. */ | |
3380 set_value_range_to_varying (vr); | |
3381 } | |
3382 else | |
3383 set_value_range (vr, vr0.type, min, max, NULL); | |
3384 return; | 2097 return; |
3385 } | 2098 } |
3386 | 2099 |
3387 /* For unhandled operations fall back to varying. */ | 2100 /* For unhandled operations fall back to varying. */ |
3388 set_value_range_to_varying (vr); | 2101 set_value_range_to_varying (vr); |
3389 return; | 2102 return; |
3390 } | 2103 } |
3391 | 2104 |
3392 | |
3393 /* Extract range information from a unary expression CODE OP0 based on | |
3394 the range of its operand with resulting type TYPE. | |
3395 The resulting range is stored in *VR. */ | |
3396 | |
3397 static void | |
3398 extract_range_from_unary_expr (value_range *vr, enum tree_code code, | |
3399 tree type, tree op0) | |
3400 { | |
3401 value_range vr0 = VR_INITIALIZER; | |
3402 | |
3403 /* Get value ranges for the operand. For constant operands, create | |
3404 a new value range with the operand to simplify processing. */ | |
3405 if (TREE_CODE (op0) == SSA_NAME) | |
3406 vr0 = *(get_value_range (op0)); | |
3407 else if (is_gimple_min_invariant (op0)) | |
3408 set_value_range_to_value (&vr0, op0, NULL); | |
3409 else | |
3410 set_value_range_to_varying (&vr0); | |
3411 | |
3412 extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0)); | |
3413 } | |
3414 | |
3415 | |
3416 /* Extract range information from a conditional expression STMT based on | |
3417 the ranges of each of its operands and the expression code. */ | |
3418 | |
3419 static void | |
3420 extract_range_from_cond_expr (value_range *vr, gassign *stmt) | |
3421 { | |
3422 tree op0, op1; | |
3423 value_range vr0 = VR_INITIALIZER; | |
3424 value_range vr1 = VR_INITIALIZER; | |
3425 | |
3426 /* Get value ranges for each operand. For constant operands, create | |
3427 a new value range with the operand to simplify processing. */ | |
3428 op0 = gimple_assign_rhs2 (stmt); | |
3429 if (TREE_CODE (op0) == SSA_NAME) | |
3430 vr0 = *(get_value_range (op0)); | |
3431 else if (is_gimple_min_invariant (op0)) | |
3432 set_value_range_to_value (&vr0, op0, NULL); | |
3433 else | |
3434 set_value_range_to_varying (&vr0); | |
3435 | |
3436 op1 = gimple_assign_rhs3 (stmt); | |
3437 if (TREE_CODE (op1) == SSA_NAME) | |
3438 vr1 = *(get_value_range (op1)); | |
3439 else if (is_gimple_min_invariant (op1)) | |
3440 set_value_range_to_value (&vr1, op1, NULL); | |
3441 else | |
3442 set_value_range_to_varying (&vr1); | |
3443 | |
3444 /* The resulting value range is the union of the operand ranges */ | |
3445 copy_value_range (vr, &vr0); | |
3446 vrp_meet (vr, &vr1); | |
3447 } | |
3448 | |
3449 | |
3450 /* Extract range information from a comparison expression EXPR based | |
3451 on the range of its operand and the expression code. */ | |
3452 | |
3453 static void | |
3454 extract_range_from_comparison (value_range *vr, enum tree_code code, | |
3455 tree type, tree op0, tree op1) | |
3456 { | |
3457 bool sop; | |
3458 tree val; | |
3459 | |
3460 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, | |
3461 NULL); | |
3462 if (val) | |
3463 { | |
3464 /* Since this expression was found on the RHS of an assignment, | |
3465 its type may be different from _Bool. Convert VAL to EXPR's | |
3466 type. */ | |
3467 val = fold_convert (type, val); | |
3468 if (is_gimple_min_invariant (val)) | |
3469 set_value_range_to_value (vr, val, vr->equiv); | |
3470 else | |
3471 set_value_range (vr, VR_RANGE, val, val, vr->equiv); | |
3472 } | |
3473 else | |
3474 /* The result of a comparison is always true or false. */ | |
3475 set_value_range_to_truthvalue (vr, type); | |
3476 } | |
3477 | |
3478 /* Helper function for simplify_internal_call_using_ranges and | |
3479 extract_range_basic. Return true if OP0 SUBCODE OP1 for | |
3480 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or | |
3481 always overflow. Set *OVF to true if it is known to always | |
3482 overflow. */ | |
3483 | |
3484 static bool | |
3485 check_for_binary_op_overflow (enum tree_code subcode, tree type, | |
3486 tree op0, tree op1, bool *ovf) | |
3487 { | |
3488 value_range vr0 = VR_INITIALIZER; | |
3489 value_range vr1 = VR_INITIALIZER; | |
3490 if (TREE_CODE (op0) == SSA_NAME) | |
3491 vr0 = *get_value_range (op0); | |
3492 else if (TREE_CODE (op0) == INTEGER_CST) | |
3493 set_value_range_to_value (&vr0, op0, NULL); | |
3494 else | |
3495 set_value_range_to_varying (&vr0); | |
3496 | |
3497 if (TREE_CODE (op1) == SSA_NAME) | |
3498 vr1 = *get_value_range (op1); | |
3499 else if (TREE_CODE (op1) == INTEGER_CST) | |
3500 set_value_range_to_value (&vr1, op1, NULL); | |
3501 else | |
3502 set_value_range_to_varying (&vr1); | |
3503 | |
3504 if (!range_int_cst_p (&vr0) | |
3505 || TREE_OVERFLOW (vr0.min) | |
3506 || TREE_OVERFLOW (vr0.max)) | |
3507 { | |
3508 vr0.min = vrp_val_min (TREE_TYPE (op0)); | |
3509 vr0.max = vrp_val_max (TREE_TYPE (op0)); | |
3510 } | |
3511 if (!range_int_cst_p (&vr1) | |
3512 || TREE_OVERFLOW (vr1.min) | |
3513 || TREE_OVERFLOW (vr1.max)) | |
3514 { | |
3515 vr1.min = vrp_val_min (TREE_TYPE (op1)); | |
3516 vr1.max = vrp_val_max (TREE_TYPE (op1)); | |
3517 } | |
3518 *ovf = arith_overflowed_p (subcode, type, vr0.min, | |
3519 subcode == MINUS_EXPR ? vr1.max : vr1.min); | |
3520 if (arith_overflowed_p (subcode, type, vr0.max, | |
3521 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf) | |
3522 return false; | |
3523 if (subcode == MULT_EXPR) | |
3524 { | |
3525 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf | |
3526 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf) | |
3527 return false; | |
3528 } | |
3529 if (*ovf) | |
3530 { | |
3531 /* So far we found that there is an overflow on the boundaries. | |
3532 That doesn't prove that there is an overflow even for all values | |
3533 in between the boundaries. For that compute widest_int range | |
3534 of the result and see if it doesn't overlap the range of | |
3535 type. */ | |
3536 widest_int wmin, wmax; | |
3537 widest_int w[4]; | |
3538 int i; | |
3539 w[0] = wi::to_widest (vr0.min); | |
3540 w[1] = wi::to_widest (vr0.max); | |
3541 w[2] = wi::to_widest (vr1.min); | |
3542 w[3] = wi::to_widest (vr1.max); | |
3543 for (i = 0; i < 4; i++) | |
3544 { | |
3545 widest_int wt; | |
3546 switch (subcode) | |
3547 { | |
3548 case PLUS_EXPR: | |
3549 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]); | |
3550 break; | |
3551 case MINUS_EXPR: | |
3552 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]); | |
3553 break; | |
3554 case MULT_EXPR: | |
3555 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]); | |
3556 break; | |
3557 default: | |
3558 gcc_unreachable (); | |
3559 } | |
3560 if (i == 0) | |
3561 { | |
3562 wmin = wt; | |
3563 wmax = wt; | |
3564 } | |
3565 else | |
3566 { | |
3567 wmin = wi::smin (wmin, wt); | |
3568 wmax = wi::smax (wmax, wt); | |
3569 } | |
3570 } | |
3571 /* The result of op0 CODE op1 is known to be in range | |
3572 [wmin, wmax]. */ | |
3573 widest_int wtmin = wi::to_widest (vrp_val_min (type)); | |
3574 widest_int wtmax = wi::to_widest (vrp_val_max (type)); | |
3575 /* If all values in [wmin, wmax] are smaller than | |
3576 [wtmin, wtmax] or all are larger than [wtmin, wtmax], | |
3577 the arithmetic operation will always overflow. */ | |
3578 if (wmax < wtmin || wmin > wtmax) | |
3579 return true; | |
3580 return false; | |
3581 } | |
3582 return true; | |
3583 } | |
3584 | |
3585 /* Try to derive a nonnegative or nonzero range out of STMT relying | |
3586 primarily on generic routines in fold in conjunction with range data. | |
3587 Store the result in *VR */ | |
3588 | |
3589 static void | |
3590 extract_range_basic (value_range *vr, gimple *stmt) | |
3591 { | |
3592 bool sop; | |
3593 tree type = gimple_expr_type (stmt); | |
3594 | |
3595 if (is_gimple_call (stmt)) | |
3596 { | |
3597 tree arg; | |
3598 int mini, maxi, zerov = 0, prec; | |
3599 enum tree_code subcode = ERROR_MARK; | |
3600 combined_fn cfn = gimple_call_combined_fn (stmt); | |
3601 scalar_int_mode mode; | |
3602 | |
3603 switch (cfn) | |
3604 { | |
3605 case CFN_BUILT_IN_CONSTANT_P: | |
3606 /* If the call is __builtin_constant_p and the argument is a | |
3607 function parameter resolve it to false. This avoids bogus | |
3608 array bound warnings. | |
3609 ??? We could do this as early as inlining is finished. */ | |
3610 arg = gimple_call_arg (stmt, 0); | |
3611 if (TREE_CODE (arg) == SSA_NAME | |
3612 && SSA_NAME_IS_DEFAULT_DEF (arg) | |
3613 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL | |
3614 && cfun->after_inlining) | |
3615 { | |
3616 set_value_range_to_null (vr, type); | |
3617 return; | |
3618 } | |
3619 break; | |
3620 /* Both __builtin_ffs* and __builtin_popcount return | |
3621 [0, prec]. */ | |
3622 CASE_CFN_FFS: | |
3623 CASE_CFN_POPCOUNT: | |
3624 arg = gimple_call_arg (stmt, 0); | |
3625 prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3626 mini = 0; | |
3627 maxi = prec; | |
3628 if (TREE_CODE (arg) == SSA_NAME) | |
3629 { | |
3630 value_range *vr0 = get_value_range (arg); | |
3631 /* If arg is non-zero, then ffs or popcount | |
3632 are non-zero. */ | |
3633 if ((vr0->type == VR_RANGE | |
3634 && range_includes_zero_p (vr0->min, vr0->max) == 0) | |
3635 || (vr0->type == VR_ANTI_RANGE | |
3636 && range_includes_zero_p (vr0->min, vr0->max) == 1)) | |
3637 mini = 1; | |
3638 /* If some high bits are known to be zero, | |
3639 we can decrease the maximum. */ | |
3640 if (vr0->type == VR_RANGE | |
3641 && TREE_CODE (vr0->max) == INTEGER_CST | |
3642 && !operand_less_p (vr0->min, | |
3643 build_zero_cst (TREE_TYPE (vr0->min)))) | |
3644 maxi = tree_floor_log2 (vr0->max) + 1; | |
3645 } | |
3646 goto bitop_builtin; | |
3647 /* __builtin_parity* returns [0, 1]. */ | |
3648 CASE_CFN_PARITY: | |
3649 mini = 0; | |
3650 maxi = 1; | |
3651 goto bitop_builtin; | |
3652 /* __builtin_c[lt]z* return [0, prec-1], except for | |
3653 when the argument is 0, but that is undefined behavior. | |
3654 On many targets where the CLZ RTL or optab value is defined | |
3655 for 0 the value is prec, so include that in the range | |
3656 by default. */ | |
3657 CASE_CFN_CLZ: | |
3658 arg = gimple_call_arg (stmt, 0); | |
3659 prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3660 mini = 0; | |
3661 maxi = prec; | |
3662 mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg)); | |
3663 if (optab_handler (clz_optab, mode) != CODE_FOR_nothing | |
3664 && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov) | |
3665 /* Handle only the single common value. */ | |
3666 && zerov != prec) | |
3667 /* Magic value to give up, unless vr0 proves | |
3668 arg is non-zero. */ | |
3669 mini = -2; | |
3670 if (TREE_CODE (arg) == SSA_NAME) | |
3671 { | |
3672 value_range *vr0 = get_value_range (arg); | |
3673 /* From clz of VR_RANGE minimum we can compute | |
3674 result maximum. */ | |
3675 if (vr0->type == VR_RANGE | |
3676 && TREE_CODE (vr0->min) == INTEGER_CST) | |
3677 { | |
3678 maxi = prec - 1 - tree_floor_log2 (vr0->min); | |
3679 if (maxi != prec) | |
3680 mini = 0; | |
3681 } | |
3682 else if (vr0->type == VR_ANTI_RANGE | |
3683 && integer_zerop (vr0->min)) | |
3684 { | |
3685 maxi = prec - 1; | |
3686 mini = 0; | |
3687 } | |
3688 if (mini == -2) | |
3689 break; | |
3690 /* From clz of VR_RANGE maximum we can compute | |
3691 result minimum. */ | |
3692 if (vr0->type == VR_RANGE | |
3693 && TREE_CODE (vr0->max) == INTEGER_CST) | |
3694 { | |
3695 mini = prec - 1 - tree_floor_log2 (vr0->max); | |
3696 if (mini == prec) | |
3697 break; | |
3698 } | |
3699 } | |
3700 if (mini == -2) | |
3701 break; | |
3702 goto bitop_builtin; | |
3703 /* __builtin_ctz* return [0, prec-1], except for | |
3704 when the argument is 0, but that is undefined behavior. | |
3705 If there is a ctz optab for this mode and | |
3706 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range, | |
3707 otherwise just assume 0 won't be seen. */ | |
3708 CASE_CFN_CTZ: | |
3709 arg = gimple_call_arg (stmt, 0); | |
3710 prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3711 mini = 0; | |
3712 maxi = prec - 1; | |
3713 mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg)); | |
3714 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing | |
3715 && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov)) | |
3716 { | |
3717 /* Handle only the two common values. */ | |
3718 if (zerov == -1) | |
3719 mini = -1; | |
3720 else if (zerov == prec) | |
3721 maxi = prec; | |
3722 else | |
3723 /* Magic value to give up, unless vr0 proves | |
3724 arg is non-zero. */ | |
3725 mini = -2; | |
3726 } | |
3727 if (TREE_CODE (arg) == SSA_NAME) | |
3728 { | |
3729 value_range *vr0 = get_value_range (arg); | |
3730 /* If arg is non-zero, then use [0, prec - 1]. */ | |
3731 if ((vr0->type == VR_RANGE | |
3732 && integer_nonzerop (vr0->min)) | |
3733 || (vr0->type == VR_ANTI_RANGE | |
3734 && integer_zerop (vr0->min))) | |
3735 { | |
3736 mini = 0; | |
3737 maxi = prec - 1; | |
3738 } | |
3739 /* If some high bits are known to be zero, | |
3740 we can decrease the result maximum. */ | |
3741 if (vr0->type == VR_RANGE | |
3742 && TREE_CODE (vr0->max) == INTEGER_CST) | |
3743 { | |
3744 maxi = tree_floor_log2 (vr0->max); | |
3745 /* For vr0 [0, 0] give up. */ | |
3746 if (maxi == -1) | |
3747 break; | |
3748 } | |
3749 } | |
3750 if (mini == -2) | |
3751 break; | |
3752 goto bitop_builtin; | |
3753 /* __builtin_clrsb* returns [0, prec-1]. */ | |
3754 CASE_CFN_CLRSB: | |
3755 arg = gimple_call_arg (stmt, 0); | |
3756 prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3757 mini = 0; | |
3758 maxi = prec - 1; | |
3759 goto bitop_builtin; | |
3760 bitop_builtin: | |
3761 set_value_range (vr, VR_RANGE, build_int_cst (type, mini), | |
3762 build_int_cst (type, maxi), NULL); | |
3763 return; | |
3764 case CFN_UBSAN_CHECK_ADD: | |
3765 subcode = PLUS_EXPR; | |
3766 break; | |
3767 case CFN_UBSAN_CHECK_SUB: | |
3768 subcode = MINUS_EXPR; | |
3769 break; | |
3770 case CFN_UBSAN_CHECK_MUL: | |
3771 subcode = MULT_EXPR; | |
3772 break; | |
3773 case CFN_GOACC_DIM_SIZE: | |
3774 case CFN_GOACC_DIM_POS: | |
3775 /* Optimizing these two internal functions helps the loop | |
3776 optimizer eliminate outer comparisons. Size is [1,N] | |
3777 and pos is [0,N-1]. */ | |
3778 { | |
3779 bool is_pos = cfn == CFN_GOACC_DIM_POS; | |
3780 int axis = oacc_get_ifn_dim_arg (stmt); | |
3781 int size = oacc_get_fn_dim_size (current_function_decl, axis); | |
3782 | |
3783 if (!size) | |
3784 /* If it's dynamic, the backend might know a hardware | |
3785 limitation. */ | |
3786 size = targetm.goacc.dim_limit (axis); | |
3787 | |
3788 tree type = TREE_TYPE (gimple_call_lhs (stmt)); | |
3789 set_value_range (vr, VR_RANGE, | |
3790 build_int_cst (type, is_pos ? 0 : 1), | |
3791 size ? build_int_cst (type, size - is_pos) | |
3792 : vrp_val_max (type), NULL); | |
3793 } | |
3794 return; | |
3795 case CFN_BUILT_IN_STRLEN: | |
3796 if (tree lhs = gimple_call_lhs (stmt)) | |
3797 if (ptrdiff_type_node | |
3798 && (TYPE_PRECISION (ptrdiff_type_node) | |
3799 == TYPE_PRECISION (TREE_TYPE (lhs)))) | |
3800 { | |
3801 tree type = TREE_TYPE (lhs); | |
3802 tree max = vrp_val_max (ptrdiff_type_node); | |
3803 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max))); | |
3804 tree range_min = build_zero_cst (type); | |
3805 tree range_max = wide_int_to_tree (type, wmax - 1); | |
3806 set_value_range (vr, VR_RANGE, range_min, range_max, NULL); | |
3807 return; | |
3808 } | |
3809 break; | |
3810 default: | |
3811 break; | |
3812 } | |
3813 if (subcode != ERROR_MARK) | |
3814 { | |
3815 bool saved_flag_wrapv = flag_wrapv; | |
3816 /* Pretend the arithmetics is wrapping. If there is | |
3817 any overflow, we'll complain, but will actually do | |
3818 wrapping operation. */ | |
3819 flag_wrapv = 1; | |
3820 extract_range_from_binary_expr (vr, subcode, type, | |
3821 gimple_call_arg (stmt, 0), | |
3822 gimple_call_arg (stmt, 1)); | |
3823 flag_wrapv = saved_flag_wrapv; | |
3824 | |
3825 /* If for both arguments vrp_valueize returned non-NULL, | |
3826 this should have been already folded and if not, it | |
3827 wasn't folded because of overflow. Avoid removing the | |
3828 UBSAN_CHECK_* calls in that case. */ | |
3829 if (vr->type == VR_RANGE | |
3830 && (vr->min == vr->max | |
3831 || operand_equal_p (vr->min, vr->max, 0))) | |
3832 set_value_range_to_varying (vr); | |
3833 return; | |
3834 } | |
3835 } | |
3836 /* Handle extraction of the two results (result of arithmetics and | |
3837 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW | |
3838 internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */ | |
3839 else if (is_gimple_assign (stmt) | |
3840 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR | |
3841 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR) | |
3842 && INTEGRAL_TYPE_P (type)) | |
3843 { | |
3844 enum tree_code code = gimple_assign_rhs_code (stmt); | |
3845 tree op = gimple_assign_rhs1 (stmt); | |
3846 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME) | |
3847 { | |
3848 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0)); | |
3849 if (is_gimple_call (g) && gimple_call_internal_p (g)) | |
3850 { | |
3851 enum tree_code subcode = ERROR_MARK; | |
3852 switch (gimple_call_internal_fn (g)) | |
3853 { | |
3854 case IFN_ADD_OVERFLOW: | |
3855 subcode = PLUS_EXPR; | |
3856 break; | |
3857 case IFN_SUB_OVERFLOW: | |
3858 subcode = MINUS_EXPR; | |
3859 break; | |
3860 case IFN_MUL_OVERFLOW: | |
3861 subcode = MULT_EXPR; | |
3862 break; | |
3863 case IFN_ATOMIC_COMPARE_EXCHANGE: | |
3864 if (code == IMAGPART_EXPR) | |
3865 { | |
3866 /* This is the boolean return value whether compare and | |
3867 exchange changed anything or not. */ | |
3868 set_value_range (vr, VR_RANGE, build_int_cst (type, 0), | |
3869 build_int_cst (type, 1), NULL); | |
3870 return; | |
3871 } | |
3872 break; | |
3873 default: | |
3874 break; | |
3875 } | |
3876 if (subcode != ERROR_MARK) | |
3877 { | |
3878 tree op0 = gimple_call_arg (g, 0); | |
3879 tree op1 = gimple_call_arg (g, 1); | |
3880 if (code == IMAGPART_EXPR) | |
3881 { | |
3882 bool ovf = false; | |
3883 if (check_for_binary_op_overflow (subcode, type, | |
3884 op0, op1, &ovf)) | |
3885 set_value_range_to_value (vr, | |
3886 build_int_cst (type, ovf), | |
3887 NULL); | |
3888 else if (TYPE_PRECISION (type) == 1 | |
3889 && !TYPE_UNSIGNED (type)) | |
3890 set_value_range_to_varying (vr); | |
3891 else | |
3892 set_value_range (vr, VR_RANGE, build_int_cst (type, 0), | |
3893 build_int_cst (type, 1), NULL); | |
3894 } | |
3895 else if (types_compatible_p (type, TREE_TYPE (op0)) | |
3896 && types_compatible_p (type, TREE_TYPE (op1))) | |
3897 { | |
3898 bool saved_flag_wrapv = flag_wrapv; | |
3899 /* Pretend the arithmetics is wrapping. If there is | |
3900 any overflow, IMAGPART_EXPR will be set. */ | |
3901 flag_wrapv = 1; | |
3902 extract_range_from_binary_expr (vr, subcode, type, | |
3903 op0, op1); | |
3904 flag_wrapv = saved_flag_wrapv; | |
3905 } | |
3906 else | |
3907 { | |
3908 value_range vr0 = VR_INITIALIZER; | |
3909 value_range vr1 = VR_INITIALIZER; | |
3910 bool saved_flag_wrapv = flag_wrapv; | |
3911 /* Pretend the arithmetics is wrapping. If there is | |
3912 any overflow, IMAGPART_EXPR will be set. */ | |
3913 flag_wrapv = 1; | |
3914 extract_range_from_unary_expr (&vr0, NOP_EXPR, | |
3915 type, op0); | |
3916 extract_range_from_unary_expr (&vr1, NOP_EXPR, | |
3917 type, op1); | |
3918 extract_range_from_binary_expr_1 (vr, subcode, type, | |
3919 &vr0, &vr1); | |
3920 flag_wrapv = saved_flag_wrapv; | |
3921 } | |
3922 return; | |
3923 } | |
3924 } | |
3925 } | |
3926 } | |
3927 if (INTEGRAL_TYPE_P (type) | |
3928 && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) | |
3929 set_value_range_to_nonnegative (vr, type); | |
3930 else if (vrp_stmt_computes_nonzero (stmt)) | |
3931 set_value_range_to_nonnull (vr, type); | |
3932 else | |
3933 set_value_range_to_varying (vr); | |
3934 } | |
3935 | |
3936 | |
3937 /* Try to compute a useful range out of assignment STMT and store it | |
3938 in *VR. */ | |
3939 | |
3940 static void | |
3941 extract_range_from_assignment (value_range *vr, gassign *stmt) | |
3942 { | |
3943 enum tree_code code = gimple_assign_rhs_code (stmt); | |
3944 | |
3945 if (code == ASSERT_EXPR) | |
3946 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); | |
3947 else if (code == SSA_NAME) | |
3948 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); | |
3949 else if (TREE_CODE_CLASS (code) == tcc_binary) | |
3950 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), | |
3951 gimple_expr_type (stmt), | |
3952 gimple_assign_rhs1 (stmt), | |
3953 gimple_assign_rhs2 (stmt)); | |
3954 else if (TREE_CODE_CLASS (code) == tcc_unary) | |
3955 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), | |
3956 gimple_expr_type (stmt), | |
3957 gimple_assign_rhs1 (stmt)); | |
3958 else if (code == COND_EXPR) | |
3959 extract_range_from_cond_expr (vr, stmt); | |
3960 else if (TREE_CODE_CLASS (code) == tcc_comparison) | |
3961 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), | |
3962 gimple_expr_type (stmt), | |
3963 gimple_assign_rhs1 (stmt), | |
3964 gimple_assign_rhs2 (stmt)); | |
3965 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS | |
3966 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) | |
3967 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); | |
3968 else | |
3969 set_value_range_to_varying (vr); | |
3970 | |
3971 if (vr->type == VR_VARYING) | |
3972 extract_range_basic (vr, stmt); | |
3973 } | |
3974 | |
3975 /* Given a range VR, a LOOP and a variable VAR, determine whether it | |
3976 would be profitable to adjust VR using scalar evolution information | |
3977 for VAR. If so, update VR with the new limits. */ | |
3978 | |
3979 static void | |
3980 adjust_range_with_scev (value_range *vr, struct loop *loop, | |
3981 gimple *stmt, tree var) | |
3982 { | |
3983 tree init, step, chrec, tmin, tmax, min, max, type, tem; | |
3984 enum ev_direction dir; | |
3985 | |
3986 /* TODO. Don't adjust anti-ranges. An anti-range may provide | |
3987 better opportunities than a regular range, but I'm not sure. */ | |
3988 if (vr->type == VR_ANTI_RANGE) | |
3989 return; | |
3990 | |
3991 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); | |
3992 | |
3993 /* Like in PR19590, scev can return a constant function. */ | |
3994 if (is_gimple_min_invariant (chrec)) | |
3995 { | |
3996 set_value_range_to_value (vr, chrec, vr->equiv); | |
3997 return; | |
3998 } | |
3999 | |
4000 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) | |
4001 return; | |
4002 | |
4003 init = initial_condition_in_loop_num (chrec, loop->num); | |
4004 tem = op_with_constant_singleton_value_range (init); | |
4005 if (tem) | |
4006 init = tem; | |
4007 step = evolution_part_in_loop_num (chrec, loop->num); | |
4008 tem = op_with_constant_singleton_value_range (step); | |
4009 if (tem) | |
4010 step = tem; | |
4011 | |
4012 /* If STEP is symbolic, we can't know whether INIT will be the | |
4013 minimum or maximum value in the range. Also, unless INIT is | |
4014 a simple expression, compare_values and possibly other functions | |
4015 in tree-vrp won't be able to handle it. */ | |
4016 if (step == NULL_TREE | |
4017 || !is_gimple_min_invariant (step) | |
4018 || !valid_value_p (init)) | |
4019 return; | |
4020 | |
4021 dir = scev_direction (chrec); | |
4022 if (/* Do not adjust ranges if we do not know whether the iv increases | |
4023 or decreases, ... */ | |
4024 dir == EV_DIR_UNKNOWN | |
4025 /* ... or if it may wrap. */ | |
4026 || scev_probably_wraps_p (NULL_TREE, init, step, stmt, | |
4027 get_chrec_loop (chrec), true)) | |
4028 return; | |
4029 | |
4030 type = TREE_TYPE (var); | |
4031 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) | |
4032 tmin = lower_bound_in_type (type, type); | |
4033 else | |
4034 tmin = TYPE_MIN_VALUE (type); | |
4035 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) | |
4036 tmax = upper_bound_in_type (type, type); | |
4037 else | |
4038 tmax = TYPE_MAX_VALUE (type); | |
4039 | |
4040 /* Try to use estimated number of iterations for the loop to constrain the | |
4041 final value in the evolution. */ | |
4042 if (TREE_CODE (step) == INTEGER_CST | |
4043 && is_gimple_val (init) | |
4044 && (TREE_CODE (init) != SSA_NAME | |
4045 || get_value_range (init)->type == VR_RANGE)) | |
4046 { | |
4047 widest_int nit; | |
4048 | |
4049 /* We are only entering here for loop header PHI nodes, so using | |
4050 the number of latch executions is the correct thing to use. */ | |
4051 if (max_loop_iterations (loop, &nit)) | |
4052 { | |
4053 value_range maxvr = VR_INITIALIZER; | |
4054 signop sgn = TYPE_SIGN (TREE_TYPE (step)); | |
4055 bool overflow; | |
4056 | |
4057 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, | |
4058 &overflow); | |
4059 /* If the multiplication overflowed we can't do a meaningful | |
4060 adjustment. Likewise if the result doesn't fit in the type | |
4061 of the induction variable. For a signed type we have to | |
4062 check whether the result has the expected signedness which | |
4063 is that of the step as number of iterations is unsigned. */ | |
4064 if (!overflow | |
4065 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) | |
4066 && (sgn == UNSIGNED | |
4067 || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0))) | |
4068 { | |
4069 tem = wide_int_to_tree (TREE_TYPE (init), wtmp); | |
4070 extract_range_from_binary_expr (&maxvr, PLUS_EXPR, | |
4071 TREE_TYPE (init), init, tem); | |
4072 /* Likewise if the addition did. */ | |
4073 if (maxvr.type == VR_RANGE) | |
4074 { | |
4075 value_range initvr = VR_INITIALIZER; | |
4076 | |
4077 if (TREE_CODE (init) == SSA_NAME) | |
4078 initvr = *(get_value_range (init)); | |
4079 else if (is_gimple_min_invariant (init)) | |
4080 set_value_range_to_value (&initvr, init, NULL); | |
4081 else | |
4082 return; | |
4083 | |
4084 /* Check if init + nit * step overflows. Though we checked | |
4085 scev {init, step}_loop doesn't wrap, it is not enough | |
4086 because the loop may exit immediately. Overflow could | |
4087 happen in the plus expression in this case. */ | |
4088 if ((dir == EV_DIR_DECREASES | |
4089 && compare_values (maxvr.min, initvr.min) != -1) | |
4090 || (dir == EV_DIR_GROWS | |
4091 && compare_values (maxvr.max, initvr.max) != 1)) | |
4092 return; | |
4093 | |
4094 tmin = maxvr.min; | |
4095 tmax = maxvr.max; | |
4096 } | |
4097 } | |
4098 } | |
4099 } | |
4100 | |
4101 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
4102 { | |
4103 min = tmin; | |
4104 max = tmax; | |
4105 | |
4106 /* For VARYING or UNDEFINED ranges, just about anything we get | |
4107 from scalar evolutions should be better. */ | |
4108 | |
4109 if (dir == EV_DIR_DECREASES) | |
4110 max = init; | |
4111 else | |
4112 min = init; | |
4113 } | |
4114 else if (vr->type == VR_RANGE) | |
4115 { | |
4116 min = vr->min; | |
4117 max = vr->max; | |
4118 | |
4119 if (dir == EV_DIR_DECREASES) | |
4120 { | |
4121 /* INIT is the maximum value. If INIT is lower than VR->MAX | |
4122 but no smaller than VR->MIN, set VR->MAX to INIT. */ | |
4123 if (compare_values (init, max) == -1) | |
4124 max = init; | |
4125 | |
4126 /* According to the loop information, the variable does not | |
4127 overflow. */ | |
4128 if (compare_values (min, tmin) == -1) | |
4129 min = tmin; | |
4130 | |
4131 } | |
4132 else | |
4133 { | |
4134 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ | |
4135 if (compare_values (init, min) == 1) | |
4136 min = init; | |
4137 | |
4138 if (compare_values (tmax, max) == -1) | |
4139 max = tmax; | |
4140 } | |
4141 } | |
4142 else | |
4143 return; | |
4144 | |
4145 /* If we just created an invalid range with the minimum | |
4146 greater than the maximum, we fail conservatively. | |
4147 This should happen only in unreachable | |
4148 parts of code, or for invalid programs. */ | |
4149 if (compare_values (min, max) == 1) | |
4150 return; | |
4151 | |
4152 /* Even for valid range info, sometimes overflow flag will leak in. | |
4153 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we | |
4154 drop them. */ | |
4155 if (TREE_OVERFLOW_P (min)) | |
4156 min = drop_tree_overflow (min); | |
4157 if (TREE_OVERFLOW_P (max)) | |
4158 max = drop_tree_overflow (max); | |
4159 | |
4160 set_value_range (vr, VR_RANGE, min, max, vr->equiv); | |
4161 } | |
4162 | |
4163 | |
4164 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: | |
4165 | |
4166 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for | |
4167 all the values in the ranges. | |
4168 | |
4169 - Return BOOLEAN_FALSE_NODE if the comparison always returns false. | |
4170 | |
4171 - Return NULL_TREE if it is not always possible to determine the | |
4172 value of the comparison. | |
4173 | |
4174 Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation | |
4175 assumed signed overflow is undefined. */ | |
4176 | |
4177 | |
4178 static tree | |
4179 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1, | |
4180 bool *strict_overflow_p) | |
4181 { | |
4182 /* VARYING or UNDEFINED ranges cannot be compared. */ | |
4183 if (vr0->type == VR_VARYING | |
4184 || vr0->type == VR_UNDEFINED | |
4185 || vr1->type == VR_VARYING | |
4186 || vr1->type == VR_UNDEFINED) | |
4187 return NULL_TREE; | |
4188 | |
4189 /* Anti-ranges need to be handled separately. */ | |
4190 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
4191 { | |
4192 /* If both are anti-ranges, then we cannot compute any | |
4193 comparison. */ | |
4194 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
4195 return NULL_TREE; | |
4196 | |
4197 /* These comparisons are never statically computable. */ | |
4198 if (comp == GT_EXPR | |
4199 || comp == GE_EXPR | |
4200 || comp == LT_EXPR | |
4201 || comp == LE_EXPR) | |
4202 return NULL_TREE; | |
4203 | |
4204 /* Equality can be computed only between a range and an | |
4205 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ | |
4206 if (vr0->type == VR_RANGE) | |
4207 { | |
4208 /* To simplify processing, make VR0 the anti-range. */ | |
4209 value_range *tmp = vr0; | |
4210 vr0 = vr1; | |
4211 vr1 = tmp; | |
4212 } | |
4213 | |
4214 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); | |
4215 | |
4216 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 | |
4217 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) | |
4218 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; | |
4219 | |
4220 return NULL_TREE; | |
4221 } | |
4222 | |
4223 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the | |
4224 operands around and change the comparison code. */ | |
4225 if (comp == GT_EXPR || comp == GE_EXPR) | |
4226 { | |
4227 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; | |
4228 std::swap (vr0, vr1); | |
4229 } | |
4230 | |
4231 if (comp == EQ_EXPR) | |
4232 { | |
4233 /* Equality may only be computed if both ranges represent | |
4234 exactly one value. */ | |
4235 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 | |
4236 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) | |
4237 { | |
4238 int cmp_min = compare_values_warnv (vr0->min, vr1->min, | |
4239 strict_overflow_p); | |
4240 int cmp_max = compare_values_warnv (vr0->max, vr1->max, | |
4241 strict_overflow_p); | |
4242 if (cmp_min == 0 && cmp_max == 0) | |
4243 return boolean_true_node; | |
4244 else if (cmp_min != -2 && cmp_max != -2) | |
4245 return boolean_false_node; | |
4246 } | |
4247 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ | |
4248 else if (compare_values_warnv (vr0->min, vr1->max, | |
4249 strict_overflow_p) == 1 | |
4250 || compare_values_warnv (vr1->min, vr0->max, | |
4251 strict_overflow_p) == 1) | |
4252 return boolean_false_node; | |
4253 | |
4254 return NULL_TREE; | |
4255 } | |
4256 else if (comp == NE_EXPR) | |
4257 { | |
4258 int cmp1, cmp2; | |
4259 | |
4260 /* If VR0 is completely to the left or completely to the right | |
4261 of VR1, they are always different. Notice that we need to | |
4262 make sure that both comparisons yield similar results to | |
4263 avoid comparing values that cannot be compared at | |
4264 compile-time. */ | |
4265 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); | |
4266 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
4267 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) | |
4268 return boolean_true_node; | |
4269 | |
4270 /* If VR0 and VR1 represent a single value and are identical, | |
4271 return false. */ | |
4272 else if (compare_values_warnv (vr0->min, vr0->max, | |
4273 strict_overflow_p) == 0 | |
4274 && compare_values_warnv (vr1->min, vr1->max, | |
4275 strict_overflow_p) == 0 | |
4276 && compare_values_warnv (vr0->min, vr1->min, | |
4277 strict_overflow_p) == 0 | |
4278 && compare_values_warnv (vr0->max, vr1->max, | |
4279 strict_overflow_p) == 0) | |
4280 return boolean_false_node; | |
4281 | |
4282 /* Otherwise, they may or may not be different. */ | |
4283 else | |
4284 return NULL_TREE; | |
4285 } | |
4286 else if (comp == LT_EXPR || comp == LE_EXPR) | |
4287 { | |
4288 int tst; | |
4289 | |
4290 /* If VR0 is to the left of VR1, return true. */ | |
4291 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); | |
4292 if ((comp == LT_EXPR && tst == -1) | |
4293 || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
4294 return boolean_true_node; | |
4295 | |
4296 /* If VR0 is to the right of VR1, return false. */ | |
4297 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
4298 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) | |
4299 || (comp == LE_EXPR && tst == 1)) | |
4300 return boolean_false_node; | |
4301 | |
4302 /* Otherwise, we don't know. */ | |
4303 return NULL_TREE; | |
4304 } | |
4305 | |
4306 gcc_unreachable (); | |
4307 } | |
4308 | |
4309 | |
4310 /* Given a value range VR, a value VAL and a comparison code COMP, return | |
4311 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the | |
4312 values in VR. Return BOOLEAN_FALSE_NODE if the comparison | |
4313 always returns false. Return NULL_TREE if it is not always | |
4314 possible to determine the value of the comparison. Also set | |
4315 *STRICT_OVERFLOW_P to indicate whether comparision evaluation | |
4316 assumed signed overflow is undefined. */ | |
4317 | |
4318 static tree | |
4319 compare_range_with_value (enum tree_code comp, value_range *vr, tree val, | |
4320 bool *strict_overflow_p) | |
4321 { | |
4322 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
4323 return NULL_TREE; | |
4324 | |
4325 /* Anti-ranges need to be handled separately. */ | |
4326 if (vr->type == VR_ANTI_RANGE) | |
4327 { | |
4328 /* For anti-ranges, the only predicates that we can compute at | |
4329 compile time are equality and inequality. */ | |
4330 if (comp == GT_EXPR | |
4331 || comp == GE_EXPR | |
4332 || comp == LT_EXPR | |
4333 || comp == LE_EXPR) | |
4334 return NULL_TREE; | |
4335 | |
4336 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ | |
4337 if (value_inside_range (val, vr->min, vr->max) == 1) | |
4338 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; | |
4339 | |
4340 return NULL_TREE; | |
4341 } | |
4342 | |
4343 if (comp == EQ_EXPR) | |
4344 { | |
4345 /* EQ_EXPR may only be computed if VR represents exactly | |
4346 one value. */ | |
4347 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) | |
4348 { | |
4349 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); | |
4350 if (cmp == 0) | |
4351 return boolean_true_node; | |
4352 else if (cmp == -1 || cmp == 1 || cmp == 2) | |
4353 return boolean_false_node; | |
4354 } | |
4355 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 | |
4356 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) | |
4357 return boolean_false_node; | |
4358 | |
4359 return NULL_TREE; | |
4360 } | |
4361 else if (comp == NE_EXPR) | |
4362 { | |
4363 /* If VAL is not inside VR, then they are always different. */ | |
4364 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 | |
4365 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) | |
4366 return boolean_true_node; | |
4367 | |
4368 /* If VR represents exactly one value equal to VAL, then return | |
4369 false. */ | |
4370 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 | |
4371 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) | |
4372 return boolean_false_node; | |
4373 | |
4374 /* Otherwise, they may or may not be different. */ | |
4375 return NULL_TREE; | |
4376 } | |
4377 else if (comp == LT_EXPR || comp == LE_EXPR) | |
4378 { | |
4379 int tst; | |
4380 | |
4381 /* If VR is to the left of VAL, return true. */ | |
4382 tst = compare_values_warnv (vr->max, val, strict_overflow_p); | |
4383 if ((comp == LT_EXPR && tst == -1) | |
4384 || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
4385 return boolean_true_node; | |
4386 | |
4387 /* If VR is to the right of VAL, return false. */ | |
4388 tst = compare_values_warnv (vr->min, val, strict_overflow_p); | |
4389 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) | |
4390 || (comp == LE_EXPR && tst == 1)) | |
4391 return boolean_false_node; | |
4392 | |
4393 /* Otherwise, we don't know. */ | |
4394 return NULL_TREE; | |
4395 } | |
4396 else if (comp == GT_EXPR || comp == GE_EXPR) | |
4397 { | |
4398 int tst; | |
4399 | |
4400 /* If VR is to the right of VAL, return true. */ | |
4401 tst = compare_values_warnv (vr->min, val, strict_overflow_p); | |
4402 if ((comp == GT_EXPR && tst == 1) | |
4403 || (comp == GE_EXPR && (tst == 0 || tst == 1))) | |
4404 return boolean_true_node; | |
4405 | |
4406 /* If VR is to the left of VAL, return false. */ | |
4407 tst = compare_values_warnv (vr->max, val, strict_overflow_p); | |
4408 if ((comp == GT_EXPR && (tst == -1 || tst == 0)) | |
4409 || (comp == GE_EXPR && tst == -1)) | |
4410 return boolean_false_node; | |
4411 | |
4412 /* Otherwise, we don't know. */ | |
4413 return NULL_TREE; | |
4414 } | |
4415 | |
4416 gcc_unreachable (); | |
4417 } | |
4418 | |
4419 | |
4420 /* Debugging dumps. */ | 2105 /* Debugging dumps. */ |
4421 | 2106 |
4422 void dump_value_range (FILE *, const value_range *); | 2107 void dump_value_range (FILE *, const value_range *); |
4423 void debug_value_range (value_range *); | 2108 void debug_value_range (const value_range *); |
4424 void dump_all_value_ranges (FILE *); | 2109 void dump_all_value_ranges (FILE *); |
4425 void debug_all_value_ranges (void); | |
4426 void dump_vr_equiv (FILE *, bitmap); | 2110 void dump_vr_equiv (FILE *, bitmap); |
4427 void debug_vr_equiv (bitmap); | 2111 void debug_vr_equiv (bitmap); |
4428 | 2112 |
4429 | |
4430 /* Dump value range VR to FILE. */ | |
4431 | |
4432 void | 2113 void |
4433 dump_value_range (FILE *file, const value_range *vr) | 2114 dump_value_range (FILE *file, const value_range *vr) |
4434 { | 2115 { |
4435 if (vr == NULL) | 2116 if (!vr) |
4436 fprintf (file, "[]"); | 2117 fprintf (file, "[]"); |
4437 else if (vr->type == VR_UNDEFINED) | |
4438 fprintf (file, "UNDEFINED"); | |
4439 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
4440 { | |
4441 tree type = TREE_TYPE (vr->min); | |
4442 | |
4443 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); | |
4444 | |
4445 if (INTEGRAL_TYPE_P (type) | |
4446 && !TYPE_UNSIGNED (type) | |
4447 && vrp_val_is_min (vr->min)) | |
4448 fprintf (file, "-INF"); | |
4449 else | |
4450 print_generic_expr (file, vr->min); | |
4451 | |
4452 fprintf (file, ", "); | |
4453 | |
4454 if (INTEGRAL_TYPE_P (type) | |
4455 && vrp_val_is_max (vr->max)) | |
4456 fprintf (file, "+INF"); | |
4457 else | |
4458 print_generic_expr (file, vr->max); | |
4459 | |
4460 fprintf (file, "]"); | |
4461 | |
4462 if (vr->equiv) | |
4463 { | |
4464 bitmap_iterator bi; | |
4465 unsigned i, c = 0; | |
4466 | |
4467 fprintf (file, " EQUIVALENCES: { "); | |
4468 | |
4469 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) | |
4470 { | |
4471 print_generic_expr (file, ssa_name (i)); | |
4472 fprintf (file, " "); | |
4473 c++; | |
4474 } | |
4475 | |
4476 fprintf (file, "} (%u elements)", c); | |
4477 } | |
4478 } | |
4479 else if (vr->type == VR_VARYING) | |
4480 fprintf (file, "VARYING"); | |
4481 else | 2118 else |
4482 fprintf (file, "INVALID RANGE"); | 2119 vr->dump (file); |
4483 } | 2120 } |
4484 | |
4485 | 2121 |
4486 /* Dump value range VR to stderr. */ | 2122 /* Dump value range VR to stderr. */ |
4487 | 2123 |
4488 DEBUG_FUNCTION void | 2124 DEBUG_FUNCTION void |
4489 debug_value_range (value_range *vr) | 2125 debug_value_range (const value_range *vr) |
4490 { | 2126 { |
4491 dump_value_range (stderr, vr); | 2127 vr->dump (); |
4492 fprintf (stderr, "\n"); | |
4493 } | |
4494 | |
4495 | |
4496 /* Dump value ranges of all SSA_NAMEs to FILE. */ | |
4497 | |
4498 void | |
4499 dump_all_value_ranges (FILE *file) | |
4500 { | |
4501 size_t i; | |
4502 | |
4503 for (i = 0; i < num_vr_values; i++) | |
4504 { | |
4505 if (vr_value[i]) | |
4506 { | |
4507 print_generic_expr (file, ssa_name (i)); | |
4508 fprintf (file, ": "); | |
4509 dump_value_range (file, vr_value[i]); | |
4510 fprintf (file, "\n"); | |
4511 } | |
4512 } | |
4513 | |
4514 fprintf (file, "\n"); | |
4515 } | |
4516 | |
4517 | |
4518 /* Dump all value ranges to stderr. */ | |
4519 | |
4520 DEBUG_FUNCTION void | |
4521 debug_all_value_ranges (void) | |
4522 { | |
4523 dump_all_value_ranges (stderr); | |
4524 } | 2128 } |
4525 | 2129 |
4526 | 2130 |
4527 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, | 2131 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
4528 create a new SSA name N and return the assertion assignment | 2132 create a new SSA name N and return the assertion assignment |
4569 /* If the range of values taken by OP can be inferred after STMT executes, | 2173 /* If the range of values taken by OP can be inferred after STMT executes, |
4570 return the comparison code (COMP_CODE_P) and value (VAL_P) that | 2174 return the comparison code (COMP_CODE_P) and value (VAL_P) that |
4571 describes the inferred range. Return true if a range could be | 2175 describes the inferred range. Return true if a range could be |
4572 inferred. */ | 2176 inferred. */ |
4573 | 2177 |
4574 static bool | 2178 bool |
4575 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) | 2179 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) |
4576 { | 2180 { |
4577 *val_p = NULL_TREE; | 2181 *val_p = NULL_TREE; |
4578 *comp_code_p = ERROR_MARK; | 2182 *comp_code_p = ERROR_MARK; |
4579 | 2183 |
4688 tree name, tree expr, enum tree_code comp_code, tree val) | 2292 tree name, tree expr, enum tree_code comp_code, tree val) |
4689 { | 2293 { |
4690 assert_info info; | 2294 assert_info info; |
4691 info.comp_code = comp_code; | 2295 info.comp_code = comp_code; |
4692 info.name = name; | 2296 info.name = name; |
2297 if (TREE_OVERFLOW_P (val)) | |
2298 val = drop_tree_overflow (val); | |
4693 info.val = val; | 2299 info.val = val; |
4694 info.expr = expr; | 2300 info.expr = expr; |
4695 asserts.safe_push (info); | 2301 asserts.safe_push (info); |
2302 if (dump_enabled_p ()) | |
2303 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS, | |
2304 "Adding assert for %T from %T %s %T\n", | |
2305 name, expr, op_symbol_code (comp_code), val); | |
4696 } | 2306 } |
4697 | 2307 |
4698 /* If NAME doesn't have an ASSERT_EXPR registered for asserting | 2308 /* If NAME doesn't have an ASSERT_EXPR registered for asserting |
4699 'EXPR COMP_CODE VAL' at a location that dominates block BB or | 2309 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
4700 E->DEST, then register this location as a possible insertion point | 2310 E->DEST, then register this location as a possible insertion point |
5004 | 2614 |
5005 These statements are left as-is in the IL to facilitate discovery of | 2615 These statements are left as-is in the IL to facilitate discovery of |
5006 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But | 2616 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But |
5007 the alternate range representation is often useful within VRP. */ | 2617 the alternate range representation is often useful within VRP. */ |
5008 | 2618 |
5009 static bool | 2619 bool |
5010 overflow_comparison_p (tree_code code, tree name, tree val, | 2620 overflow_comparison_p (tree_code code, tree name, tree val, |
5011 bool use_equiv_p, tree *new_cst) | 2621 bool use_equiv_p, tree *new_cst) |
5012 { | 2622 { |
5013 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst)) | 2623 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst)) |
5014 return true; | 2624 return true; |
5090 | 2700 |
5091 /* Build an expression for the range test. */ | 2701 /* Build an expression for the range test. */ |
5092 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); | 2702 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); |
5093 if (cst2 != NULL_TREE) | 2703 if (cst2 != NULL_TREE) |
5094 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | 2704 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |
5095 | |
5096 if (dump_file) | |
5097 { | |
5098 fprintf (dump_file, "Adding assert for "); | |
5099 print_generic_expr (dump_file, name3); | |
5100 fprintf (dump_file, " from "); | |
5101 print_generic_expr (dump_file, tmp); | |
5102 fprintf (dump_file, "\n"); | |
5103 } | |
5104 | |
5105 add_assert_info (asserts, name3, tmp, comp_code, val); | 2705 add_assert_info (asserts, name3, tmp, comp_code, val); |
5106 } | 2706 } |
5107 | 2707 |
5108 /* If name2 is used later, create an ASSERT_EXPR for it. */ | 2708 /* If name2 is used later, create an ASSERT_EXPR for it. */ |
5109 if (name2 != NULL_TREE | 2709 if (name2 != NULL_TREE |
5117 tmp = name2; | 2717 tmp = name2; |
5118 if (TREE_TYPE (name) != TREE_TYPE (name2)) | 2718 if (TREE_TYPE (name) != TREE_TYPE (name2)) |
5119 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); | 2719 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); |
5120 if (cst2 != NULL_TREE) | 2720 if (cst2 != NULL_TREE) |
5121 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | 2721 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |
5122 | |
5123 if (dump_file) | |
5124 { | |
5125 fprintf (dump_file, "Adding assert for "); | |
5126 print_generic_expr (dump_file, name2); | |
5127 fprintf (dump_file, " from "); | |
5128 print_generic_expr (dump_file, tmp); | |
5129 fprintf (dump_file, "\n"); | |
5130 } | |
5131 | |
5132 add_assert_info (asserts, name2, tmp, comp_code, val); | 2722 add_assert_info (asserts, name2, tmp, comp_code, val); |
5133 } | 2723 } |
5134 } | 2724 } |
5135 | 2725 |
5136 /* In the case of post-in/decrement tests like if (i++) ... and uses | 2726 /* In the case of post-in/decrement tests like if (i++) ... and uses |
5249 { | 2839 { |
5250 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; | 2840 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; |
5251 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, | 2841 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, |
5252 build_int_cst (TREE_TYPE (name2), 1)); | 2842 build_int_cst (TREE_TYPE (name2), 1)); |
5253 } | 2843 } |
5254 | |
5255 if (dump_file) | |
5256 { | |
5257 fprintf (dump_file, "Adding assert for "); | |
5258 print_generic_expr (dump_file, name2); | |
5259 fprintf (dump_file, " from "); | |
5260 print_generic_expr (dump_file, tmp); | |
5261 fprintf (dump_file, "\n"); | |
5262 } | |
5263 | |
5264 add_assert_info (asserts, name2, tmp, new_comp_code, cst); | 2844 add_assert_info (asserts, name2, tmp, new_comp_code, cst); |
5265 } | 2845 } |
5266 } | 2846 } |
5267 | 2847 |
5268 /* Add asserts for NAME cmp CST and NAME being defined as | 2848 /* Add asserts for NAME cmp CST and NAME being defined as |
5323 else | 2903 else |
5324 new_val = wide_int_to_tree (TREE_TYPE (val2), mask); | 2904 new_val = wide_int_to_tree (TREE_TYPE (val2), mask); |
5325 } | 2905 } |
5326 | 2906 |
5327 if (new_val) | 2907 if (new_val) |
5328 { | 2908 add_assert_info (asserts, name2, tmp, new_comp_code, new_val); |
5329 if (dump_file) | |
5330 { | |
5331 fprintf (dump_file, "Adding assert for "); | |
5332 print_generic_expr (dump_file, name2); | |
5333 fprintf (dump_file, " from "); | |
5334 print_generic_expr (dump_file, tmp); | |
5335 fprintf (dump_file, "\n"); | |
5336 } | |
5337 | |
5338 add_assert_info (asserts, name2, tmp, new_comp_code, new_val); | |
5339 } | |
5340 } | 2909 } |
5341 | 2910 |
5342 /* Add asserts for NAME cmp CST and NAME being defined as | 2911 /* Add asserts for NAME cmp CST and NAME being defined as |
5343 NAME = NAME2 & CST2. | 2912 NAME = NAME2 & CST2. |
5344 | 2913 |
5562 tmp = build2 (PLUS_EXPR, type, tmp, | 3131 tmp = build2 (PLUS_EXPR, type, tmp, |
5563 wide_int_to_tree (type, -minv)); | 3132 wide_int_to_tree (type, -minv)); |
5564 maxv2 = maxv - minv; | 3133 maxv2 = maxv - minv; |
5565 } | 3134 } |
5566 new_val = wide_int_to_tree (type, maxv2); | 3135 new_val = wide_int_to_tree (type, maxv2); |
5567 | |
5568 if (dump_file) | |
5569 { | |
5570 fprintf (dump_file, "Adding assert for "); | |
5571 print_generic_expr (dump_file, names[i]); | |
5572 fprintf (dump_file, " from "); | |
5573 print_generic_expr (dump_file, tmp); | |
5574 fprintf (dump_file, "\n"); | |
5575 } | |
5576 | |
5577 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val); | 3136 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val); |
5578 } | 3137 } |
5579 } | 3138 } |
5580 } | 3139 } |
5581 } | 3140 } |
5668 has a form of | 3227 has a form of |
5669 (X & 11...100..0) COND_OP XX...X00...0 | 3228 (X & 11...100..0) COND_OP XX...X00...0 |
5670 Such comparison can yield assertions like | 3229 Such comparison can yield assertions like |
5671 X >= XX...X00...0 | 3230 X >= XX...X00...0 |
5672 X <= XX...X11...1 | 3231 X <= XX...X11...1 |
5673 in case of COND_OP being NE_EXPR or | 3232 in case of COND_OP being EQ_EXPR or |
5674 X < XX...X00...0 | 3233 X < XX...X00...0 |
5675 X > XX...X11...1 | 3234 X > XX...X11...1 |
5676 in case of EQ_EXPR. */ | 3235 in case of NE_EXPR. */ |
5677 | 3236 |
5678 static bool | 3237 static bool |
5679 is_masked_range_test (tree name, tree valt, enum tree_code cond_code, | 3238 is_masked_range_test (tree name, tree valt, enum tree_code cond_code, |
5680 tree *new_name, tree *low, enum tree_code *low_code, | 3239 tree *new_name, tree *low, enum tree_code *low_code, |
5681 tree *high, enum tree_code *high_code) | 3240 tree *high, enum tree_code *high_code) |
5691 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST) | 3250 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST) |
5692 return false; | 3251 return false; |
5693 | 3252 |
5694 wi::tree_to_wide_ref mask = wi::to_wide (maskt); | 3253 wi::tree_to_wide_ref mask = wi::to_wide (maskt); |
5695 wide_int inv_mask = ~mask; | 3254 wide_int inv_mask = ~mask; |
3255 /* Must have been removed by now so don't bother optimizing. */ | |
3256 if (mask == 0 || inv_mask == 0) | |
3257 return false; | |
3258 | |
5696 /* Assume VALT is INTEGER_CST. */ | 3259 /* Assume VALT is INTEGER_CST. */ |
5697 wi::tree_to_wide_ref val = wi::to_wide (valt); | 3260 wi::tree_to_wide_ref val = wi::to_wide (valt); |
5698 | 3261 |
5699 if ((inv_mask & (inv_mask + 1)) != 0 | 3262 if ((inv_mask & (inv_mask + 1)) != 0 |
5700 || (val & mask) != val) | 3263 || (val & mask) != val) |
5731 | 3294 |
5732 *new_name = t; | 3295 *new_name = t; |
5733 *low = wide_int_to_tree (type, val); | 3296 *low = wide_int_to_tree (type, val); |
5734 *high = wide_int_to_tree (type, val | inv_mask); | 3297 *high = wide_int_to_tree (type, val | inv_mask); |
5735 | 3298 |
5736 if (wi::neg_p (val, TYPE_SIGN (type))) | |
5737 std::swap (*low, *high); | |
5738 | |
5739 return true; | 3299 return true; |
5740 } | 3300 } |
5741 | 3301 |
5742 /* Try to register an edge assertion for SSA name NAME on edge E for | 3302 /* Try to register an edge assertion for SSA name NAME on edge E for |
5743 the condition COND contributing to the conditional jump pointed to by | 3303 the condition COND contributing to the conditional jump pointed to by |
5744 SI. */ | 3304 SI. */ |
5745 | 3305 |
5746 static void | 3306 void |
5747 register_edge_assert_for (tree name, edge e, | 3307 register_edge_assert_for (tree name, edge e, |
5748 enum tree_code cond_code, tree cond_op0, | 3308 enum tree_code cond_code, tree cond_op0, |
5749 tree cond_op1, vec<assert_info> &asserts) | 3309 tree cond_op1, vec<assert_info> &asserts) |
5750 { | 3310 { |
5751 tree val; | 3311 tree val; |
5953 /* Build a vector of case labels sorted by destination label. */ | 3513 /* Build a vector of case labels sorted by destination label. */ |
5954 ci = XNEWVEC (struct case_info, n); | 3514 ci = XNEWVEC (struct case_info, n); |
5955 for (idx = 0; idx < n; ++idx) | 3515 for (idx = 0; idx < n; ++idx) |
5956 { | 3516 { |
5957 ci[idx].expr = gimple_switch_label (last, idx); | 3517 ci[idx].expr = gimple_switch_label (last, idx); |
5958 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); | 3518 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr)); |
5959 } | 3519 } |
5960 edge default_edge = find_edge (bb, ci[0].bb); | 3520 edge default_edge = find_edge (bb, ci[0].bb); |
5961 qsort (ci, n, sizeof (struct case_info), compare_case_labels); | 3521 qsort (ci, n, sizeof (struct case_info), compare_case_labels); |
5962 | 3522 |
5963 for (idx = 0; idx < n; ++idx) | 3523 for (idx = 0; idx < n; ++idx) |
6653 | 4213 |
6654 free (asserts_for); | 4214 free (asserts_for); |
6655 BITMAP_FREE (need_assert_for); | 4215 BITMAP_FREE (need_assert_for); |
6656 } | 4216 } |
6657 | 4217 |
4218 class vrp_prop : public ssa_propagation_engine | |
4219 { | |
4220 public: | |
4221 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE; | |
4222 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE; | |
4223 | |
4224 void vrp_initialize (void); | |
4225 void vrp_finalize (bool); | |
4226 void check_all_array_refs (void); | |
4227 void check_array_ref (location_t, tree, bool); | |
4228 void check_mem_ref (location_t, tree, bool); | |
4229 void search_for_addr_array (tree, location_t); | |
4230 | |
4231 class vr_values vr_values; | |
4232 /* Temporary delegator to minimize code churn. */ | |
4233 value_range *get_value_range (const_tree op) | |
4234 { return vr_values.get_value_range (op); } | |
4235 void set_defs_to_varying (gimple *stmt) | |
4236 { return vr_values.set_defs_to_varying (stmt); } | |
4237 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p, | |
4238 tree *output_p, value_range *vr) | |
4239 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); } | |
4240 bool update_value_range (const_tree op, value_range *vr) | |
4241 { return vr_values.update_value_range (op, vr); } | |
4242 void extract_range_basic (value_range *vr, gimple *stmt) | |
4243 { vr_values.extract_range_basic (vr, stmt); } | |
4244 void extract_range_from_phi_node (gphi *phi, value_range *vr) | |
4245 { vr_values.extract_range_from_phi_node (phi, vr); } | |
4246 }; | |
6658 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays | 4247 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays |
6659 and "struct" hacks. If VRP can determine that the | 4248 and "struct" hacks. If VRP can determine that the |
6660 array subscript is a constant, check if it is outside valid | 4249 array subscript is a constant, check if it is outside valid |
6661 range. If the array subscript is a RANGE, warn if it is | 4250 range. If the array subscript is a RANGE, warn if it is |
6662 non-overlapping with valid range. | 4251 non-overlapping with valid range. |
6663 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ | 4252 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ |
6664 | 4253 |
6665 static void | 4254 void |
6666 check_array_ref (location_t location, tree ref, bool ignore_off_by_one) | 4255 vrp_prop::check_array_ref (location_t location, tree ref, |
6667 { | 4256 bool ignore_off_by_one) |
6668 value_range *vr = NULL; | 4257 { |
4258 const value_range *vr = NULL; | |
6669 tree low_sub, up_sub; | 4259 tree low_sub, up_sub; |
6670 tree low_bound, up_bound, up_bound_p1; | 4260 tree low_bound, up_bound, up_bound_p1; |
6671 | 4261 |
6672 if (TREE_NO_WARNING (ref)) | 4262 if (TREE_NO_WARNING (ref)) |
6673 return; | 4263 return; |
6674 | 4264 |
6675 low_sub = up_sub = TREE_OPERAND (ref, 1); | 4265 low_sub = up_sub = TREE_OPERAND (ref, 1); |
6676 up_bound = array_ref_up_bound (ref); | 4266 up_bound = array_ref_up_bound (ref); |
6677 | 4267 |
6678 /* Can not check flexible arrays. */ | |
6679 if (!up_bound | 4268 if (!up_bound |
6680 || TREE_CODE (up_bound) != INTEGER_CST) | 4269 || TREE_CODE (up_bound) != INTEGER_CST |
6681 return; | 4270 || (warn_array_bounds < 2 |
6682 | 4271 && array_at_struct_end_p (ref))) |
6683 /* Accesses to trailing arrays via pointers may access storage | 4272 { |
6684 beyond the types array bounds. */ | 4273 /* Accesses to trailing arrays via pointers may access storage |
6685 if (warn_array_bounds < 2 | 4274 beyond the types array bounds. For such arrays, or for flexible |
6686 && array_at_struct_end_p (ref)) | 4275 array members, as well as for other arrays of an unknown size, |
6687 return; | 4276 replace the upper bound with a more permissive one that assumes |
4277 the size of the largest object is PTRDIFF_MAX. */ | |
4278 tree eltsize = array_ref_element_size (ref); | |
4279 | |
4280 if (TREE_CODE (eltsize) != INTEGER_CST | |
4281 || integer_zerop (eltsize)) | |
4282 { | |
4283 up_bound = NULL_TREE; | |
4284 up_bound_p1 = NULL_TREE; | |
4285 } | |
4286 else | |
4287 { | |
4288 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node); | |
4289 tree arg = TREE_OPERAND (ref, 0); | |
4290 poly_int64 off; | |
4291 | |
4292 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0)) | |
4293 maxbound = wide_int_to_tree (sizetype, | |
4294 wi::sub (wi::to_wide (maxbound), | |
4295 off)); | |
4296 else | |
4297 maxbound = fold_convert (sizetype, maxbound); | |
4298 | |
4299 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize); | |
4300 | |
4301 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1, | |
4302 build_int_cst (ptrdiff_type_node, 1)); | |
4303 } | |
4304 } | |
4305 else | |
4306 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, | |
4307 build_int_cst (TREE_TYPE (up_bound), 1)); | |
6688 | 4308 |
6689 low_bound = array_ref_low_bound (ref); | 4309 low_bound = array_ref_low_bound (ref); |
6690 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, | 4310 |
6691 build_int_cst (TREE_TYPE (up_bound), 1)); | 4311 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0)); |
4312 | |
4313 bool warned = false; | |
6692 | 4314 |
6693 /* Empty array. */ | 4315 /* Empty array. */ |
6694 if (tree_int_cst_equal (low_bound, up_bound_p1)) | 4316 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1)) |
6695 { | 4317 warned = warning_at (location, OPT_Warray_bounds, |
6696 warning_at (location, OPT_Warray_bounds, | 4318 "array subscript %E is above array bounds of %qT", |
6697 "array subscript is above array bounds"); | 4319 low_bound, artype); |
6698 TREE_NO_WARNING (ref) = 1; | |
6699 } | |
6700 | 4320 |
6701 if (TREE_CODE (low_sub) == SSA_NAME) | 4321 if (TREE_CODE (low_sub) == SSA_NAME) |
6702 { | 4322 { |
6703 vr = get_value_range (low_sub); | 4323 vr = get_value_range (low_sub); |
6704 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | 4324 if (!vr->undefined_p () && !vr->varying_p ()) |
6705 { | 4325 { |
6706 low_sub = vr->type == VR_RANGE ? vr->max : vr->min; | 4326 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min (); |
6707 up_sub = vr->type == VR_RANGE ? vr->min : vr->max; | 4327 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max (); |
6708 } | 4328 } |
6709 } | 4329 } |
6710 | 4330 |
6711 if (vr && vr->type == VR_ANTI_RANGE) | 4331 if (vr && vr->kind () == VR_ANTI_RANGE) |
6712 { | 4332 { |
6713 if (TREE_CODE (up_sub) == INTEGER_CST | 4333 if (up_bound |
4334 && TREE_CODE (up_sub) == INTEGER_CST | |
6714 && (ignore_off_by_one | 4335 && (ignore_off_by_one |
6715 ? tree_int_cst_lt (up_bound, up_sub) | 4336 ? tree_int_cst_lt (up_bound, up_sub) |
6716 : tree_int_cst_le (up_bound, up_sub)) | 4337 : tree_int_cst_le (up_bound, up_sub)) |
6717 && TREE_CODE (low_sub) == INTEGER_CST | 4338 && TREE_CODE (low_sub) == INTEGER_CST |
6718 && tree_int_cst_le (low_sub, low_bound)) | 4339 && tree_int_cst_le (low_sub, low_bound)) |
6719 { | 4340 warned = warning_at (location, OPT_Warray_bounds, |
6720 warning_at (location, OPT_Warray_bounds, | 4341 "array subscript [%E, %E] is outside " |
6721 "array subscript is outside array bounds"); | 4342 "array bounds of %qT", |
6722 TREE_NO_WARNING (ref) = 1; | 4343 low_sub, up_sub, artype); |
6723 } | 4344 } |
6724 } | 4345 else if (up_bound |
6725 else if (TREE_CODE (up_sub) == INTEGER_CST | 4346 && TREE_CODE (up_sub) == INTEGER_CST |
6726 && (ignore_off_by_one | 4347 && (ignore_off_by_one |
6727 ? !tree_int_cst_le (up_sub, up_bound_p1) | 4348 ? !tree_int_cst_le (up_sub, up_bound_p1) |
6728 : !tree_int_cst_le (up_sub, up_bound))) | 4349 : !tree_int_cst_le (up_sub, up_bound))) |
6729 { | 4350 { |
6730 if (dump_file && (dump_flags & TDF_DETAILS)) | 4351 if (dump_file && (dump_flags & TDF_DETAILS)) |
6731 { | 4352 { |
6732 fprintf (dump_file, "Array bound warning for "); | 4353 fprintf (dump_file, "Array bound warning for "); |
6733 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | 4354 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
6734 fprintf (dump_file, "\n"); | 4355 fprintf (dump_file, "\n"); |
6735 } | 4356 } |
6736 warning_at (location, OPT_Warray_bounds, | 4357 warned = warning_at (location, OPT_Warray_bounds, |
6737 "array subscript is above array bounds"); | 4358 "array subscript %E is above array bounds of %qT", |
6738 TREE_NO_WARNING (ref) = 1; | 4359 up_sub, artype); |
6739 } | 4360 } |
6740 else if (TREE_CODE (low_sub) == INTEGER_CST | 4361 else if (TREE_CODE (low_sub) == INTEGER_CST |
6741 && tree_int_cst_lt (low_sub, low_bound)) | 4362 && tree_int_cst_lt (low_sub, low_bound)) |
6742 { | 4363 { |
6743 if (dump_file && (dump_flags & TDF_DETAILS)) | 4364 if (dump_file && (dump_flags & TDF_DETAILS)) |
6744 { | 4365 { |
6745 fprintf (dump_file, "Array bound warning for "); | 4366 fprintf (dump_file, "Array bound warning for "); |
6746 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | 4367 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
6747 fprintf (dump_file, "\n"); | 4368 fprintf (dump_file, "\n"); |
6748 } | 4369 } |
4370 warned = warning_at (location, OPT_Warray_bounds, | |
4371 "array subscript %E is below array bounds of %qT", | |
4372 low_sub, artype); | |
4373 } | |
4374 | |
4375 if (warned) | |
4376 { | |
4377 ref = TREE_OPERAND (ref, 0); | |
4378 | |
4379 if (DECL_P (ref)) | |
4380 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref); | |
4381 | |
4382 TREE_NO_WARNING (ref) = 1; | |
4383 } | |
4384 } | |
4385 | |
4386 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds | |
4387 references to string constants. If VRP can determine that the array | |
4388 subscript is a constant, check if it is outside valid range. | |
4389 If the array subscript is a RANGE, warn if it is non-overlapping | |
4390 with valid range. | |
4391 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR | |
4392 (used to allow one-past-the-end indices for code that takes | |
4393 the address of the just-past-the-end element of an array). */ | |
4394 | |
4395 void | |
4396 vrp_prop::check_mem_ref (location_t location, tree ref, | |
4397 bool ignore_off_by_one) | |
4398 { | |
4399 if (TREE_NO_WARNING (ref)) | |
4400 return; | |
4401 | |
4402 tree arg = TREE_OPERAND (ref, 0); | |
4403 /* The constant and variable offset of the reference. */ | |
4404 tree cstoff = TREE_OPERAND (ref, 1); | |
4405 tree varoff = NULL_TREE; | |
4406 | |
4407 const offset_int maxobjsize = tree_to_shwi (max_object_size ()); | |
4408 | |
4409 /* The array or string constant bounds in bytes. Initially set | |
4410 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is | |
4411 determined. */ | |
4412 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize }; | |
4413 | |
4414 /* The minimum and maximum intermediate offset. For a reference | |
4415 to be valid, not only does the final offset/subscript must be | |
4416 in bounds but all intermediate offsets should be as well. | |
4417 GCC may be able to deal gracefully with such out-of-bounds | |
4418 offsets so the checking is only enbaled at -Warray-bounds=2 | |
4419 where it may help detect bugs in uses of the intermediate | |
4420 offsets that could otherwise not be detectable. */ | |
4421 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff)); | |
4422 offset_int extrema[2] = { 0, wi::abs (ioff) }; | |
4423 | |
4424 /* The range of the byte offset into the reference. */ | |
4425 offset_int offrange[2] = { 0, 0 }; | |
4426 | |
4427 const value_range *vr = NULL; | |
4428 | |
4429 /* Determine the offsets and increment OFFRANGE for the bounds of each. | |
4430 The loop computes the the range of the final offset for expressions | |
4431 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs | |
4432 in some range. */ | |
4433 while (TREE_CODE (arg) == SSA_NAME) | |
4434 { | |
4435 gimple *def = SSA_NAME_DEF_STMT (arg); | |
4436 if (!is_gimple_assign (def)) | |
4437 break; | |
4438 | |
4439 tree_code code = gimple_assign_rhs_code (def); | |
4440 if (code == POINTER_PLUS_EXPR) | |
4441 { | |
4442 arg = gimple_assign_rhs1 (def); | |
4443 varoff = gimple_assign_rhs2 (def); | |
4444 } | |
4445 else if (code == ASSERT_EXPR) | |
4446 { | |
4447 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0); | |
4448 continue; | |
4449 } | |
4450 else | |
4451 return; | |
4452 | |
4453 /* VAROFF should always be a SSA_NAME here (and not even | |
4454 INTEGER_CST) but there's no point in taking chances. */ | |
4455 if (TREE_CODE (varoff) != SSA_NAME) | |
4456 break; | |
4457 | |
4458 vr = get_value_range (varoff); | |
4459 if (!vr || vr->undefined_p () || vr->varying_p ()) | |
4460 break; | |
4461 | |
4462 if (!vr->constant_p ()) | |
4463 break; | |
4464 | |
4465 if (vr->kind () == VR_RANGE) | |
4466 { | |
4467 if (tree_int_cst_lt (vr->min (), vr->max ())) | |
4468 { | |
4469 offset_int min | |
4470 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ())); | |
4471 offset_int max | |
4472 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ())); | |
4473 if (min < max) | |
4474 { | |
4475 offrange[0] += min; | |
4476 offrange[1] += max; | |
4477 } | |
4478 else | |
4479 { | |
4480 offrange[0] += max; | |
4481 offrange[1] += min; | |
4482 } | |
4483 } | |
4484 else | |
4485 { | |
4486 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE] | |
4487 to OFFRANGE. */ | |
4488 offrange[0] += arrbounds[0]; | |
4489 offrange[1] += arrbounds[1]; | |
4490 } | |
4491 } | |
4492 else | |
4493 { | |
4494 /* For an anti-range, analogously to the above, conservatively | |
4495 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */ | |
4496 offrange[0] += arrbounds[0]; | |
4497 offrange[1] += arrbounds[1]; | |
4498 } | |
4499 | |
4500 /* Keep track of the minimum and maximum offset. */ | |
4501 if (offrange[1] < 0 && offrange[1] < extrema[0]) | |
4502 extrema[0] = offrange[1]; | |
4503 if (offrange[0] > 0 && offrange[0] > extrema[1]) | |
4504 extrema[1] = offrange[0]; | |
4505 | |
4506 if (offrange[0] < arrbounds[0]) | |
4507 offrange[0] = arrbounds[0]; | |
4508 | |
4509 if (offrange[1] > arrbounds[1]) | |
4510 offrange[1] = arrbounds[1]; | |
4511 } | |
4512 | |
4513 if (TREE_CODE (arg) == ADDR_EXPR) | |
4514 { | |
4515 arg = TREE_OPERAND (arg, 0); | |
4516 if (TREE_CODE (arg) != STRING_CST | |
4517 && TREE_CODE (arg) != VAR_DECL) | |
4518 return; | |
4519 } | |
4520 else | |
4521 return; | |
4522 | |
4523 /* The type of the object being referred to. It can be an array, | |
4524 string literal, or a non-array type when the MEM_REF represents | |
4525 a reference/subscript via a pointer to an object that is not | |
4526 an element of an array. References to members of structs and | |
4527 unions are excluded because MEM_REF doesn't make it possible | |
4528 to identify the member where the reference originated. | |
4529 Incomplete types are excluded as well because their size is | |
4530 not known. */ | |
4531 tree reftype = TREE_TYPE (arg); | |
4532 if (POINTER_TYPE_P (reftype) | |
4533 || !COMPLETE_TYPE_P (reftype) | |
4534 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST | |
4535 || RECORD_OR_UNION_TYPE_P (reftype)) | |
4536 return; | |
4537 | |
4538 offset_int eltsize; | |
4539 if (TREE_CODE (reftype) == ARRAY_TYPE) | |
4540 { | |
4541 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype))); | |
4542 | |
4543 if (tree dom = TYPE_DOMAIN (reftype)) | |
4544 { | |
4545 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) }; | |
4546 if (array_at_struct_end_p (arg) | |
4547 || !bnds[0] || !bnds[1]) | |
4548 { | |
4549 arrbounds[0] = 0; | |
4550 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize)); | |
4551 } | |
4552 else | |
4553 { | |
4554 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize; | |
4555 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize; | |
4556 } | |
4557 } | |
4558 else | |
4559 { | |
4560 arrbounds[0] = 0; | |
4561 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize)); | |
4562 } | |
4563 | |
4564 if (TREE_CODE (ref) == MEM_REF) | |
4565 { | |
4566 /* For MEM_REF determine a tighter bound of the non-array | |
4567 element type. */ | |
4568 tree eltype = TREE_TYPE (reftype); | |
4569 while (TREE_CODE (eltype) == ARRAY_TYPE) | |
4570 eltype = TREE_TYPE (eltype); | |
4571 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype)); | |
4572 } | |
4573 } | |
4574 else | |
4575 { | |
4576 eltsize = 1; | |
4577 arrbounds[0] = 0; | |
4578 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype)); | |
4579 } | |
4580 | |
4581 offrange[0] += ioff; | |
4582 offrange[1] += ioff; | |
4583 | |
4584 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE | |
4585 is set (when taking the address of the one-past-last element | |
4586 of an array) but always use the stricter bound in diagnostics. */ | |
4587 offset_int ubound = arrbounds[1]; | |
4588 if (ignore_off_by_one) | |
4589 ubound += 1; | |
4590 | |
4591 if (offrange[0] >= ubound || offrange[1] < arrbounds[0]) | |
4592 { | |
4593 /* Treat a reference to a non-array object as one to an array | |
4594 of a single element. */ | |
4595 if (TREE_CODE (reftype) != ARRAY_TYPE) | |
4596 reftype = build_array_type_nelts (reftype, 1); | |
4597 | |
4598 if (TREE_CODE (ref) == MEM_REF) | |
4599 { | |
4600 /* Extract the element type out of MEM_REF and use its size | |
4601 to compute the index to print in the diagnostic; arrays | |
4602 in MEM_REF don't mean anything. */ | |
4603 tree type = TREE_TYPE (ref); | |
4604 while (TREE_CODE (type) == ARRAY_TYPE) | |
4605 type = TREE_TYPE (type); | |
4606 tree size = TYPE_SIZE_UNIT (type); | |
4607 offrange[0] = offrange[0] / wi::to_offset (size); | |
4608 offrange[1] = offrange[1] / wi::to_offset (size); | |
4609 } | |
4610 else | |
4611 { | |
4612 /* For anything other than MEM_REF, compute the index to | |
4613 print in the diagnostic as the offset over element size. */ | |
4614 offrange[0] = offrange[0] / eltsize; | |
4615 offrange[1] = offrange[1] / eltsize; | |
4616 } | |
4617 | |
4618 bool warned; | |
4619 if (offrange[0] == offrange[1]) | |
4620 warned = warning_at (location, OPT_Warray_bounds, | |
4621 "array subscript %wi is outside array bounds " | |
4622 "of %qT", | |
4623 offrange[0].to_shwi (), reftype); | |
4624 else | |
4625 warned = warning_at (location, OPT_Warray_bounds, | |
4626 "array subscript [%wi, %wi] is outside " | |
4627 "array bounds of %qT", | |
4628 offrange[0].to_shwi (), | |
4629 offrange[1].to_shwi (), reftype); | |
4630 if (warned && DECL_P (arg)) | |
4631 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg); | |
4632 | |
4633 TREE_NO_WARNING (ref) = 1; | |
4634 return; | |
4635 } | |
4636 | |
4637 if (warn_array_bounds < 2) | |
4638 return; | |
4639 | |
4640 /* At level 2 check also intermediate offsets. */ | |
4641 int i = 0; | |
4642 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound) | |
4643 { | |
4644 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi (); | |
4645 | |
6749 warning_at (location, OPT_Warray_bounds, | 4646 warning_at (location, OPT_Warray_bounds, |
6750 "array subscript is below array bounds"); | 4647 "intermediate array offset %wi is outside array bounds " |
4648 "of %qT", | |
4649 tmpidx, reftype); | |
6751 TREE_NO_WARNING (ref) = 1; | 4650 TREE_NO_WARNING (ref) = 1; |
6752 } | 4651 } |
6753 } | 4652 } |
6754 | 4653 |
6755 /* Searches if the expr T, located at LOCATION computes | 4654 /* Searches if the expr T, located at LOCATION computes |
6756 address of an ARRAY_REF, and call check_array_ref on it. */ | 4655 address of an ARRAY_REF, and call check_array_ref on it. */ |
6757 | 4656 |
6758 static void | 4657 void |
6759 search_for_addr_array (tree t, location_t location) | 4658 vrp_prop::search_for_addr_array (tree t, location_t location) |
6760 { | 4659 { |
6761 /* Check each ARRAY_REFs in the reference chain. */ | 4660 /* Check each ARRAY_REF and MEM_REF in the reference chain. */ |
6762 do | 4661 do |
6763 { | 4662 { |
6764 if (TREE_CODE (t) == ARRAY_REF) | 4663 if (TREE_CODE (t) == ARRAY_REF) |
6765 check_array_ref (location, t, true /*ignore_off_by_one*/); | 4664 check_array_ref (location, t, true /*ignore_off_by_one*/); |
4665 else if (TREE_CODE (t) == MEM_REF) | |
4666 check_mem_ref (location, t, true /*ignore_off_by_one*/); | |
6766 | 4667 |
6767 t = TREE_OPERAND (t, 0); | 4668 t = TREE_OPERAND (t, 0); |
6768 } | 4669 } |
6769 while (handled_component_p (t)); | 4670 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF); |
6770 | 4671 |
6771 if (TREE_CODE (t) == MEM_REF | 4672 if (TREE_CODE (t) != MEM_REF |
6772 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR | 4673 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR |
6773 && !TREE_NO_WARNING (t)) | 4674 || TREE_NO_WARNING (t)) |
6774 { | 4675 return; |
6775 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); | 4676 |
6776 tree low_bound, up_bound, el_sz; | 4677 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); |
6777 offset_int idx; | 4678 tree low_bound, up_bound, el_sz; |
6778 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE | 4679 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE |
6779 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE | 4680 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE |
6780 || !TYPE_DOMAIN (TREE_TYPE (tem))) | 4681 || !TYPE_DOMAIN (TREE_TYPE (tem))) |
6781 return; | 4682 return; |
6782 | 4683 |
6783 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | 4684 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); |
6784 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | 4685 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); |
6785 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); | 4686 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); |
6786 if (!low_bound | 4687 if (!low_bound |
6787 || TREE_CODE (low_bound) != INTEGER_CST | 4688 || TREE_CODE (low_bound) != INTEGER_CST |
6788 || !up_bound | 4689 || !up_bound |
6789 || TREE_CODE (up_bound) != INTEGER_CST | 4690 || TREE_CODE (up_bound) != INTEGER_CST |
6790 || !el_sz | 4691 || !el_sz |
6791 || TREE_CODE (el_sz) != INTEGER_CST) | 4692 || TREE_CODE (el_sz) != INTEGER_CST) |
6792 return; | 4693 return; |
6793 | 4694 |
6794 idx = mem_ref_offset (t); | 4695 offset_int idx; |
6795 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); | 4696 if (!mem_ref_offset (t).is_constant (&idx)) |
6796 if (idx < 0) | 4697 return; |
6797 { | 4698 |
6798 if (dump_file && (dump_flags & TDF_DETAILS)) | 4699 bool warned = false; |
6799 { | 4700 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); |
6800 fprintf (dump_file, "Array bound warning for "); | 4701 if (idx < 0) |
6801 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | 4702 { |
6802 fprintf (dump_file, "\n"); | 4703 if (dump_file && (dump_flags & TDF_DETAILS)) |
6803 } | 4704 { |
6804 warning_at (location, OPT_Warray_bounds, | 4705 fprintf (dump_file, "Array bound warning for "); |
6805 "array subscript is below array bounds"); | 4706 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); |
6806 TREE_NO_WARNING (t) = 1; | 4707 fprintf (dump_file, "\n"); |
6807 } | 4708 } |
6808 else if (idx > (wi::to_offset (up_bound) | 4709 warned = warning_at (location, OPT_Warray_bounds, |
6809 - wi::to_offset (low_bound) + 1)) | 4710 "array subscript %wi is below " |
6810 { | 4711 "array bounds of %qT", |
6811 if (dump_file && (dump_flags & TDF_DETAILS)) | 4712 idx.to_shwi (), TREE_TYPE (tem)); |
6812 { | 4713 } |
6813 fprintf (dump_file, "Array bound warning for "); | 4714 else if (idx > (wi::to_offset (up_bound) |
6814 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | 4715 - wi::to_offset (low_bound) + 1)) |
6815 fprintf (dump_file, "\n"); | 4716 { |
6816 } | 4717 if (dump_file && (dump_flags & TDF_DETAILS)) |
6817 warning_at (location, OPT_Warray_bounds, | 4718 { |
6818 "array subscript is above array bounds"); | 4719 fprintf (dump_file, "Array bound warning for "); |
6819 TREE_NO_WARNING (t) = 1; | 4720 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); |
6820 } | 4721 fprintf (dump_file, "\n"); |
4722 } | |
4723 warned = warning_at (location, OPT_Warray_bounds, | |
4724 "array subscript %wu is above " | |
4725 "array bounds of %qT", | |
4726 idx.to_uhwi (), TREE_TYPE (tem)); | |
4727 } | |
4728 | |
4729 if (warned) | |
4730 { | |
4731 if (DECL_P (t)) | |
4732 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t); | |
4733 | |
4734 TREE_NO_WARNING (t) = 1; | |
6821 } | 4735 } |
6822 } | 4736 } |
6823 | 4737 |
6824 /* walk_tree() callback that checks if *TP is | 4738 /* walk_tree() callback that checks if *TP is |
6825 an ARRAY_REF inside an ADDR_EXPR (in which an array | 4739 an ARRAY_REF inside an ADDR_EXPR (in which an array |
6835 location_t location; | 4749 location_t location; |
6836 | 4750 |
6837 if (EXPR_HAS_LOCATION (t)) | 4751 if (EXPR_HAS_LOCATION (t)) |
6838 location = EXPR_LOCATION (t); | 4752 location = EXPR_LOCATION (t); |
6839 else | 4753 else |
6840 { | 4754 location = gimple_location (wi->stmt); |
6841 location_t *locp = (location_t *) wi->info; | |
6842 location = *locp; | |
6843 } | |
6844 | 4755 |
6845 *walk_subtree = TRUE; | 4756 *walk_subtree = TRUE; |
6846 | 4757 |
4758 vrp_prop *vrp_prop = (class vrp_prop *)wi->info; | |
6847 if (TREE_CODE (t) == ARRAY_REF) | 4759 if (TREE_CODE (t) == ARRAY_REF) |
6848 check_array_ref (location, t, false /*ignore_off_by_one*/); | 4760 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/); |
6849 | 4761 else if (TREE_CODE (t) == MEM_REF) |
4762 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/); | |
6850 else if (TREE_CODE (t) == ADDR_EXPR) | 4763 else if (TREE_CODE (t) == ADDR_EXPR) |
6851 { | 4764 { |
6852 search_for_addr_array (t, location); | 4765 vrp_prop->search_for_addr_array (t, location); |
6853 *walk_subtree = FALSE; | 4766 *walk_subtree = FALSE; |
6854 } | 4767 } |
6855 | 4768 |
6856 return NULL_TREE; | 4769 return NULL_TREE; |
4770 } | |
4771 | |
4772 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs, | |
4773 to walk over all statements of all reachable BBs and call | |
4774 check_array_bounds on them. */ | |
4775 | |
4776 class check_array_bounds_dom_walker : public dom_walker | |
4777 { | |
4778 public: | |
4779 check_array_bounds_dom_walker (vrp_prop *prop) | |
4780 : dom_walker (CDI_DOMINATORS, | |
4781 /* Discover non-executable edges, preserving EDGE_EXECUTABLE | |
4782 flags, so that we can merge in information on | |
4783 non-executable edges from vrp_folder . */ | |
4784 REACHABLE_BLOCKS_PRESERVING_FLAGS), | |
4785 m_prop (prop) {} | |
4786 ~check_array_bounds_dom_walker () {} | |
4787 | |
4788 edge before_dom_children (basic_block) FINAL OVERRIDE; | |
4789 | |
4790 private: | |
4791 vrp_prop *m_prop; | |
4792 }; | |
4793 | |
4794 /* Implementation of dom_walker::before_dom_children. | |
4795 | |
4796 Walk over all statements of BB and call check_array_bounds on them, | |
4797 and determine if there's a unique successor edge. */ | |
4798 | |
4799 edge | |
4800 check_array_bounds_dom_walker::before_dom_children (basic_block bb) | |
4801 { | |
4802 gimple_stmt_iterator si; | |
4803 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
4804 { | |
4805 gimple *stmt = gsi_stmt (si); | |
4806 struct walk_stmt_info wi; | |
4807 if (!gimple_has_location (stmt) | |
4808 || is_gimple_debug (stmt)) | |
4809 continue; | |
4810 | |
4811 memset (&wi, 0, sizeof (wi)); | |
4812 | |
4813 wi.info = m_prop; | |
4814 | |
4815 walk_gimple_op (stmt, check_array_bounds, &wi); | |
4816 } | |
4817 | |
4818 /* Determine if there's a unique successor edge, and if so, return | |
4819 that back to dom_walker, ensuring that we don't visit blocks that | |
4820 became unreachable during the VRP propagation | |
4821 (PR tree-optimization/83312). */ | |
4822 return find_taken_edge (bb, NULL_TREE); | |
6857 } | 4823 } |
6858 | 4824 |
6859 /* Walk over all statements of all reachable BBs and call check_array_bounds | 4825 /* Walk over all statements of all reachable BBs and call check_array_bounds |
6860 on them. */ | 4826 on them. */ |
6861 | 4827 |
6862 static void | 4828 void |
6863 check_all_array_refs (void) | 4829 vrp_prop::check_all_array_refs () |
6864 { | 4830 { |
6865 basic_block bb; | 4831 check_array_bounds_dom_walker w (this); |
6866 gimple_stmt_iterator si; | 4832 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
6867 | |
6868 FOR_EACH_BB_FN (bb, cfun) | |
6869 { | |
6870 edge_iterator ei; | |
6871 edge e; | |
6872 bool executable = false; | |
6873 | |
6874 /* Skip blocks that were found to be unreachable. */ | |
6875 FOR_EACH_EDGE (e, ei, bb->preds) | |
6876 executable |= !!(e->flags & EDGE_EXECUTABLE); | |
6877 if (!executable) | |
6878 continue; | |
6879 | |
6880 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
6881 { | |
6882 gimple *stmt = gsi_stmt (si); | |
6883 struct walk_stmt_info wi; | |
6884 if (!gimple_has_location (stmt) | |
6885 || is_gimple_debug (stmt)) | |
6886 continue; | |
6887 | |
6888 memset (&wi, 0, sizeof (wi)); | |
6889 | |
6890 location_t loc = gimple_location (stmt); | |
6891 wi.info = &loc; | |
6892 | |
6893 walk_gimple_op (gsi_stmt (si), | |
6894 check_array_bounds, | |
6895 &wi); | |
6896 } | |
6897 } | |
6898 } | 4833 } |
6899 | 4834 |
6900 /* Return true if all imm uses of VAR are either in STMT, or | 4835 /* Return true if all imm uses of VAR are either in STMT, or |
6901 feed (optionally through a chain of single imm uses) GIMPLE_COND | 4836 feed (optionally through a chain of single imm uses) GIMPLE_COND |
6902 in basic block COND_BB. */ | 4837 in basic block COND_BB. */ |
6937 x_5 = ASSERT_EXPR <x_3, ...>; | 4872 x_5 = ASSERT_EXPR <x_3, ...>; |
6938 If x_3 has no other immediate uses (checked by caller), | 4873 If x_3 has no other immediate uses (checked by caller), |
6939 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits | 4874 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits |
6940 from the non-zero bitmask. */ | 4875 from the non-zero bitmask. */ |
6941 | 4876 |
6942 static void | 4877 void |
6943 maybe_set_nonzero_bits (basic_block bb, tree var) | 4878 maybe_set_nonzero_bits (edge e, tree var) |
6944 { | 4879 { |
6945 edge e = single_pred_edge (bb); | |
6946 basic_block cond_bb = e->src; | 4880 basic_block cond_bb = e->src; |
6947 gimple *stmt = last_stmt (cond_bb); | 4881 gimple *stmt = last_stmt (cond_bb); |
6948 tree cst; | 4882 tree cst; |
6949 | 4883 |
6950 if (stmt == NULL | 4884 if (stmt == NULL |
7055 single_pred (bb))) | 4989 single_pred (bb))) |
7056 { | 4990 { |
7057 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), | 4991 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), |
7058 SSA_NAME_RANGE_INFO (lhs)->get_min (), | 4992 SSA_NAME_RANGE_INFO (lhs)->get_min (), |
7059 SSA_NAME_RANGE_INFO (lhs)->get_max ()); | 4993 SSA_NAME_RANGE_INFO (lhs)->get_max ()); |
7060 maybe_set_nonzero_bits (bb, var); | 4994 maybe_set_nonzero_bits (single_pred_edge (bb), var); |
7061 } | 4995 } |
7062 } | 4996 } |
7063 | 4997 |
7064 /* Propagate the RHS into every use of the LHS. For SSA names | 4998 /* Propagate the RHS into every use of the LHS. For SSA names |
7065 also propagate abnormals as it merely restores the original | 4999 also propagate abnormals as it merely restores the original |
7087 gsi_next (&si); | 5021 gsi_next (&si); |
7088 } | 5022 } |
7089 } | 5023 } |
7090 } | 5024 } |
7091 | 5025 |
7092 | |
7093 /* Return true if STMT is interesting for VRP. */ | 5026 /* Return true if STMT is interesting for VRP. */ |
7094 | 5027 |
7095 static bool | 5028 bool |
7096 stmt_interesting_for_vrp (gimple *stmt) | 5029 stmt_interesting_for_vrp (gimple *stmt) |
7097 { | 5030 { |
7098 if (gimple_code (stmt) == GIMPLE_PHI) | 5031 if (gimple_code (stmt) == GIMPLE_PHI) |
7099 { | 5032 { |
7100 tree res = gimple_phi_result (stmt); | 5033 tree res = gimple_phi_result (stmt); |
7136 return true; | 5069 return true; |
7137 | 5070 |
7138 return false; | 5071 return false; |
7139 } | 5072 } |
7140 | 5073 |
7141 /* Initialize VRP lattice. */ | |
7142 | |
7143 static void | |
7144 vrp_initialize_lattice () | |
7145 { | |
7146 values_propagated = false; | |
7147 num_vr_values = num_ssa_names; | |
7148 vr_value = XCNEWVEC (value_range *, num_vr_values); | |
7149 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); | |
7150 bitmap_obstack_initialize (&vrp_equiv_obstack); | |
7151 } | |
7152 | |
7153 /* Initialization required by ssa_propagate engine. */ | 5074 /* Initialization required by ssa_propagate engine. */ |
7154 | 5075 |
7155 static void | 5076 void |
7156 vrp_initialize () | 5077 vrp_prop::vrp_initialize () |
7157 { | 5078 { |
7158 basic_block bb; | 5079 basic_block bb; |
7159 | 5080 |
7160 FOR_EACH_BB_FN (bb, cfun) | 5081 FOR_EACH_BB_FN (bb, cfun) |
7161 { | 5082 { |
7192 prop_set_simulate_again (stmt, true); | 5113 prop_set_simulate_again (stmt, true); |
7193 } | 5114 } |
7194 } | 5115 } |
7195 } | 5116 } |
7196 | 5117 |
7197 /* Return the singleton value-range for NAME or NAME. */ | |
7198 | |
7199 static inline tree | |
7200 vrp_valueize (tree name) | |
7201 { | |
7202 if (TREE_CODE (name) == SSA_NAME) | |
7203 { | |
7204 value_range *vr = get_value_range (name); | |
7205 if (vr->type == VR_RANGE | |
7206 && (TREE_CODE (vr->min) == SSA_NAME | |
7207 || is_gimple_min_invariant (vr->min)) | |
7208 && vrp_operand_equal_p (vr->min, vr->max)) | |
7209 return vr->min; | |
7210 } | |
7211 return name; | |
7212 } | |
7213 | |
7214 /* Return the singleton value-range for NAME if that is a constant | |
7215 but signal to not follow SSA edges. */ | |
7216 | |
7217 static inline tree | |
7218 vrp_valueize_1 (tree name) | |
7219 { | |
7220 if (TREE_CODE (name) == SSA_NAME) | |
7221 { | |
7222 /* If the definition may be simulated again we cannot follow | |
7223 this SSA edge as the SSA propagator does not necessarily | |
7224 re-visit the use. */ | |
7225 gimple *def_stmt = SSA_NAME_DEF_STMT (name); | |
7226 if (!gimple_nop_p (def_stmt) | |
7227 && prop_simulate_again_p (def_stmt)) | |
7228 return NULL_TREE; | |
7229 value_range *vr = get_value_range (name); | |
7230 if (range_int_cst_singleton_p (vr)) | |
7231 return vr->min; | |
7232 } | |
7233 return name; | |
7234 } | |
7235 | |
7236 /* Visit assignment STMT. If it produces an interesting range, record | |
7237 the range in VR and set LHS to OUTPUT_P. */ | |
7238 | |
7239 static void | |
7240 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr) | |
7241 { | |
7242 tree lhs; | |
7243 enum gimple_code code = gimple_code (stmt); | |
7244 lhs = gimple_get_lhs (stmt); | |
7245 *output_p = NULL_TREE; | |
7246 | |
7247 /* We only keep track of ranges in integral and pointer types. */ | |
7248 if (TREE_CODE (lhs) == SSA_NAME | |
7249 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
7250 /* It is valid to have NULL MIN/MAX values on a type. See | |
7251 build_range_type. */ | |
7252 && TYPE_MIN_VALUE (TREE_TYPE (lhs)) | |
7253 && TYPE_MAX_VALUE (TREE_TYPE (lhs))) | |
7254 || POINTER_TYPE_P (TREE_TYPE (lhs)))) | |
7255 { | |
7256 *output_p = lhs; | |
7257 | |
7258 /* Try folding the statement to a constant first. */ | |
7259 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize, | |
7260 vrp_valueize_1); | |
7261 if (tem) | |
7262 { | |
7263 if (TREE_CODE (tem) == SSA_NAME | |
7264 && (SSA_NAME_IS_DEFAULT_DEF (tem) | |
7265 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem)))) | |
7266 { | |
7267 extract_range_from_ssa_name (vr, tem); | |
7268 return; | |
7269 } | |
7270 else if (is_gimple_min_invariant (tem)) | |
7271 { | |
7272 set_value_range_to_value (vr, tem, NULL); | |
7273 return; | |
7274 } | |
7275 } | |
7276 /* Then dispatch to value-range extracting functions. */ | |
7277 if (code == GIMPLE_CALL) | |
7278 extract_range_basic (vr, stmt); | |
7279 else | |
7280 extract_range_from_assignment (vr, as_a <gassign *> (stmt)); | |
7281 } | |
7282 } | |
7283 | |
7284 /* Helper that gets the value range of the SSA_NAME with version I | |
7285 or a symbolic range containing the SSA_NAME only if the value range | |
7286 is varying or undefined. */ | |
7287 | |
7288 static inline value_range | |
7289 get_vr_for_comparison (int i) | |
7290 { | |
7291 value_range vr = *get_value_range (ssa_name (i)); | |
7292 | |
7293 /* If name N_i does not have a valid range, use N_i as its own | |
7294 range. This allows us to compare against names that may | |
7295 have N_i in their ranges. */ | |
7296 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) | |
7297 { | |
7298 vr.type = VR_RANGE; | |
7299 vr.min = ssa_name (i); | |
7300 vr.max = ssa_name (i); | |
7301 } | |
7302 | |
7303 return vr; | |
7304 } | |
7305 | |
7306 /* Compare all the value ranges for names equivalent to VAR with VAL | |
7307 using comparison code COMP. Return the same value returned by | |
7308 compare_range_with_value, including the setting of | |
7309 *STRICT_OVERFLOW_P. */ | |
7310 | |
7311 static tree | |
7312 compare_name_with_value (enum tree_code comp, tree var, tree val, | |
7313 bool *strict_overflow_p, bool use_equiv_p) | |
7314 { | |
7315 bitmap_iterator bi; | |
7316 unsigned i; | |
7317 bitmap e; | |
7318 tree retval, t; | |
7319 int used_strict_overflow; | |
7320 bool sop; | |
7321 value_range equiv_vr; | |
7322 | |
7323 /* Get the set of equivalences for VAR. */ | |
7324 e = get_value_range (var)->equiv; | |
7325 | |
7326 /* Start at -1. Set it to 0 if we do a comparison without relying | |
7327 on overflow, or 1 if all comparisons rely on overflow. */ | |
7328 used_strict_overflow = -1; | |
7329 | |
7330 /* Compare vars' value range with val. */ | |
7331 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); | |
7332 sop = false; | |
7333 retval = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
7334 if (retval) | |
7335 used_strict_overflow = sop ? 1 : 0; | |
7336 | |
7337 /* If the equiv set is empty we have done all work we need to do. */ | |
7338 if (e == NULL) | |
7339 { | |
7340 if (retval | |
7341 && used_strict_overflow > 0) | |
7342 *strict_overflow_p = true; | |
7343 return retval; | |
7344 } | |
7345 | |
7346 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) | |
7347 { | |
7348 tree name = ssa_name (i); | |
7349 if (! name) | |
7350 continue; | |
7351 | |
7352 if (! use_equiv_p | |
7353 && ! SSA_NAME_IS_DEFAULT_DEF (name) | |
7354 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name))) | |
7355 continue; | |
7356 | |
7357 equiv_vr = get_vr_for_comparison (i); | |
7358 sop = false; | |
7359 t = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
7360 if (t) | |
7361 { | |
7362 /* If we get different answers from different members | |
7363 of the equivalence set this check must be in a dead | |
7364 code region. Folding it to a trap representation | |
7365 would be correct here. For now just return don't-know. */ | |
7366 if (retval != NULL | |
7367 && t != retval) | |
7368 { | |
7369 retval = NULL_TREE; | |
7370 break; | |
7371 } | |
7372 retval = t; | |
7373 | |
7374 if (!sop) | |
7375 used_strict_overflow = 0; | |
7376 else if (used_strict_overflow < 0) | |
7377 used_strict_overflow = 1; | |
7378 } | |
7379 } | |
7380 | |
7381 if (retval | |
7382 && used_strict_overflow > 0) | |
7383 *strict_overflow_p = true; | |
7384 | |
7385 return retval; | |
7386 } | |
7387 | |
7388 | |
7389 /* Given a comparison code COMP and names N1 and N2, compare all the | |
7390 ranges equivalent to N1 against all the ranges equivalent to N2 | |
7391 to determine the value of N1 COMP N2. Return the same value | |
7392 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate | |
7393 whether we relied on undefined signed overflow in the comparison. */ | |
7394 | |
7395 | |
7396 static tree | |
7397 compare_names (enum tree_code comp, tree n1, tree n2, | |
7398 bool *strict_overflow_p) | |
7399 { | |
7400 tree t, retval; | |
7401 bitmap e1, e2; | |
7402 bitmap_iterator bi1, bi2; | |
7403 unsigned i1, i2; | |
7404 int used_strict_overflow; | |
7405 static bitmap_obstack *s_obstack = NULL; | |
7406 static bitmap s_e1 = NULL, s_e2 = NULL; | |
7407 | |
7408 /* Compare the ranges of every name equivalent to N1 against the | |
7409 ranges of every name equivalent to N2. */ | |
7410 e1 = get_value_range (n1)->equiv; | |
7411 e2 = get_value_range (n2)->equiv; | |
7412 | |
7413 /* Use the fake bitmaps if e1 or e2 are not available. */ | |
7414 if (s_obstack == NULL) | |
7415 { | |
7416 s_obstack = XNEW (bitmap_obstack); | |
7417 bitmap_obstack_initialize (s_obstack); | |
7418 s_e1 = BITMAP_ALLOC (s_obstack); | |
7419 s_e2 = BITMAP_ALLOC (s_obstack); | |
7420 } | |
7421 if (e1 == NULL) | |
7422 e1 = s_e1; | |
7423 if (e2 == NULL) | |
7424 e2 = s_e2; | |
7425 | |
7426 /* Add N1 and N2 to their own set of equivalences to avoid | |
7427 duplicating the body of the loop just to check N1 and N2 | |
7428 ranges. */ | |
7429 bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); | |
7430 bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); | |
7431 | |
7432 /* If the equivalence sets have a common intersection, then the two | |
7433 names can be compared without checking their ranges. */ | |
7434 if (bitmap_intersect_p (e1, e2)) | |
7435 { | |
7436 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7437 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7438 | |
7439 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) | |
7440 ? boolean_true_node | |
7441 : boolean_false_node; | |
7442 } | |
7443 | |
7444 /* Start at -1. Set it to 0 if we do a comparison without relying | |
7445 on overflow, or 1 if all comparisons rely on overflow. */ | |
7446 used_strict_overflow = -1; | |
7447 | |
7448 /* Otherwise, compare all the equivalent ranges. First, add N1 and | |
7449 N2 to their own set of equivalences to avoid duplicating the body | |
7450 of the loop just to check N1 and N2 ranges. */ | |
7451 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) | |
7452 { | |
7453 if (! ssa_name (i1)) | |
7454 continue; | |
7455 | |
7456 value_range vr1 = get_vr_for_comparison (i1); | |
7457 | |
7458 t = retval = NULL_TREE; | |
7459 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) | |
7460 { | |
7461 if (! ssa_name (i2)) | |
7462 continue; | |
7463 | |
7464 bool sop = false; | |
7465 | |
7466 value_range vr2 = get_vr_for_comparison (i2); | |
7467 | |
7468 t = compare_ranges (comp, &vr1, &vr2, &sop); | |
7469 if (t) | |
7470 { | |
7471 /* If we get different answers from different members | |
7472 of the equivalence set this check must be in a dead | |
7473 code region. Folding it to a trap representation | |
7474 would be correct here. For now just return don't-know. */ | |
7475 if (retval != NULL | |
7476 && t != retval) | |
7477 { | |
7478 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7479 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7480 return NULL_TREE; | |
7481 } | |
7482 retval = t; | |
7483 | |
7484 if (!sop) | |
7485 used_strict_overflow = 0; | |
7486 else if (used_strict_overflow < 0) | |
7487 used_strict_overflow = 1; | |
7488 } | |
7489 } | |
7490 | |
7491 if (retval) | |
7492 { | |
7493 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7494 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7495 if (used_strict_overflow > 0) | |
7496 *strict_overflow_p = true; | |
7497 return retval; | |
7498 } | |
7499 } | |
7500 | |
7501 /* None of the equivalent ranges are useful in computing this | |
7502 comparison. */ | |
7503 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7504 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7505 return NULL_TREE; | |
7506 } | |
7507 | |
7508 /* Helper function for vrp_evaluate_conditional_warnv & other | |
7509 optimizers. */ | |
7510 | |
7511 static tree | |
7512 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, | |
7513 tree op0, tree op1, | |
7514 bool * strict_overflow_p) | |
7515 { | |
7516 value_range *vr0, *vr1; | |
7517 | |
7518 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; | |
7519 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; | |
7520 | |
7521 tree res = NULL_TREE; | |
7522 if (vr0 && vr1) | |
7523 res = compare_ranges (code, vr0, vr1, strict_overflow_p); | |
7524 if (!res && vr0) | |
7525 res = compare_range_with_value (code, vr0, op1, strict_overflow_p); | |
7526 if (!res && vr1) | |
7527 res = (compare_range_with_value | |
7528 (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); | |
7529 return res; | |
7530 } | |
7531 | |
7532 /* Helper function for vrp_evaluate_conditional_warnv. */ | |
7533 | |
7534 static tree | |
7535 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, | |
7536 tree op1, bool use_equiv_p, | |
7537 bool *strict_overflow_p, bool *only_ranges) | |
7538 { | |
7539 tree ret; | |
7540 if (only_ranges) | |
7541 *only_ranges = true; | |
7542 | |
7543 /* We only deal with integral and pointer types. */ | |
7544 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
7545 && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
7546 return NULL_TREE; | |
7547 | |
7548 /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed | |
7549 as a simple equality test, then prefer that over its current form | |
7550 for evaluation. | |
7551 | |
7552 An overflow test which collapses to an equality test can always be | |
7553 expressed as a comparison of one argument against zero. Overflow | |
7554 occurs when the chosen argument is zero and does not occur if the | |
7555 chosen argument is not zero. */ | |
7556 tree x; | |
7557 if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x)) | |
7558 { | |
7559 wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED); | |
7560 /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0) | |
7561 B = A - 1; if (A > B) -> B = A - 1; if (A != 0) | |
7562 B = A + 1; if (B < A) -> B = A + 1; if (B == 0) | |
7563 B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */ | |
7564 if (integer_zerop (x)) | |
7565 { | |
7566 op1 = x; | |
7567 code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR; | |
7568 } | |
7569 /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0) | |
7570 B = A + 1; if (A < B) -> B = A + 1; if (B != 0) | |
7571 B = A - 1; if (B > A) -> B = A - 1; if (A == 0) | |
7572 B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */ | |
7573 else if (wi::to_wide (x) == max - 1) | |
7574 { | |
7575 op0 = op1; | |
7576 op1 = wide_int_to_tree (TREE_TYPE (op0), 0); | |
7577 code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR; | |
7578 } | |
7579 } | |
7580 | |
7581 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
7582 (code, op0, op1, strict_overflow_p))) | |
7583 return ret; | |
7584 if (only_ranges) | |
7585 *only_ranges = false; | |
7586 /* Do not use compare_names during propagation, it's quadratic. */ | |
7587 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME | |
7588 && use_equiv_p) | |
7589 return compare_names (code, op0, op1, strict_overflow_p); | |
7590 else if (TREE_CODE (op0) == SSA_NAME) | |
7591 return compare_name_with_value (code, op0, op1, | |
7592 strict_overflow_p, use_equiv_p); | |
7593 else if (TREE_CODE (op1) == SSA_NAME) | |
7594 return compare_name_with_value (swap_tree_comparison (code), op1, op0, | |
7595 strict_overflow_p, use_equiv_p); | |
7596 return NULL_TREE; | |
7597 } | |
7598 | |
7599 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range | |
7600 information. Return NULL if the conditional can not be evaluated. | |
7601 The ranges of all the names equivalent with the operands in COND | |
7602 will be used when trying to compute the value. If the result is | |
7603 based on undefined signed overflow, issue a warning if | |
7604 appropriate. */ | |
7605 | |
7606 static tree | |
7607 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt) | |
7608 { | |
7609 bool sop; | |
7610 tree ret; | |
7611 bool only_ranges; | |
7612 | |
7613 /* Some passes and foldings leak constants with overflow flag set | |
7614 into the IL. Avoid doing wrong things with these and bail out. */ | |
7615 if ((TREE_CODE (op0) == INTEGER_CST | |
7616 && TREE_OVERFLOW (op0)) | |
7617 || (TREE_CODE (op1) == INTEGER_CST | |
7618 && TREE_OVERFLOW (op1))) | |
7619 return NULL_TREE; | |
7620 | |
7621 sop = false; | |
7622 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, | |
7623 &only_ranges); | |
7624 | |
7625 if (ret && sop) | |
7626 { | |
7627 enum warn_strict_overflow_code wc; | |
7628 const char* warnmsg; | |
7629 | |
7630 if (is_gimple_min_invariant (ret)) | |
7631 { | |
7632 wc = WARN_STRICT_OVERFLOW_CONDITIONAL; | |
7633 warnmsg = G_("assuming signed overflow does not occur when " | |
7634 "simplifying conditional to constant"); | |
7635 } | |
7636 else | |
7637 { | |
7638 wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
7639 warnmsg = G_("assuming signed overflow does not occur when " | |
7640 "simplifying conditional"); | |
7641 } | |
7642 | |
7643 if (issue_strict_overflow_warning (wc)) | |
7644 { | |
7645 location_t location; | |
7646 | |
7647 if (!gimple_has_location (stmt)) | |
7648 location = input_location; | |
7649 else | |
7650 location = gimple_location (stmt); | |
7651 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); | |
7652 } | |
7653 } | |
7654 | |
7655 if (warn_type_limits | |
7656 && ret && only_ranges | |
7657 && TREE_CODE_CLASS (code) == tcc_comparison | |
7658 && TREE_CODE (op0) == SSA_NAME) | |
7659 { | |
7660 /* If the comparison is being folded and the operand on the LHS | |
7661 is being compared against a constant value that is outside of | |
7662 the natural range of OP0's type, then the predicate will | |
7663 always fold regardless of the value of OP0. If -Wtype-limits | |
7664 was specified, emit a warning. */ | |
7665 tree type = TREE_TYPE (op0); | |
7666 value_range *vr0 = get_value_range (op0); | |
7667 | |
7668 if (vr0->type == VR_RANGE | |
7669 && INTEGRAL_TYPE_P (type) | |
7670 && vrp_val_is_min (vr0->min) | |
7671 && vrp_val_is_max (vr0->max) | |
7672 && is_gimple_min_invariant (op1)) | |
7673 { | |
7674 location_t location; | |
7675 | |
7676 if (!gimple_has_location (stmt)) | |
7677 location = input_location; | |
7678 else | |
7679 location = gimple_location (stmt); | |
7680 | |
7681 warning_at (location, OPT_Wtype_limits, | |
7682 integer_zerop (ret) | |
7683 ? G_("comparison always false " | |
7684 "due to limited range of data type") | |
7685 : G_("comparison always true " | |
7686 "due to limited range of data type")); | |
7687 } | |
7688 } | |
7689 | |
7690 return ret; | |
7691 } | |
7692 | |
7693 | |
7694 /* Visit conditional statement STMT. If we can determine which edge | |
7695 will be taken out of STMT's basic block, record it in | |
7696 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */ | |
7697 | |
7698 static void | |
7699 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p) | |
7700 { | |
7701 tree val; | |
7702 | |
7703 *taken_edge_p = NULL; | |
7704 | |
7705 if (dump_file && (dump_flags & TDF_DETAILS)) | |
7706 { | |
7707 tree use; | |
7708 ssa_op_iter i; | |
7709 | |
7710 fprintf (dump_file, "\nVisiting conditional with predicate: "); | |
7711 print_gimple_stmt (dump_file, stmt, 0); | |
7712 fprintf (dump_file, "\nWith known ranges\n"); | |
7713 | |
7714 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) | |
7715 { | |
7716 fprintf (dump_file, "\t"); | |
7717 print_generic_expr (dump_file, use); | |
7718 fprintf (dump_file, ": "); | |
7719 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); | |
7720 } | |
7721 | |
7722 fprintf (dump_file, "\n"); | |
7723 } | |
7724 | |
7725 /* Compute the value of the predicate COND by checking the known | |
7726 ranges of each of its operands. | |
7727 | |
7728 Note that we cannot evaluate all the equivalent ranges here | |
7729 because those ranges may not yet be final and with the current | |
7730 propagation strategy, we cannot determine when the value ranges | |
7731 of the names in the equivalence set have changed. | |
7732 | |
7733 For instance, given the following code fragment | |
7734 | |
7735 i_5 = PHI <8, i_13> | |
7736 ... | |
7737 i_14 = ASSERT_EXPR <i_5, i_5 != 0> | |
7738 if (i_14 == 1) | |
7739 ... | |
7740 | |
7741 Assume that on the first visit to i_14, i_5 has the temporary | |
7742 range [8, 8] because the second argument to the PHI function is | |
7743 not yet executable. We derive the range ~[0, 0] for i_14 and the | |
7744 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for | |
7745 the first time, since i_14 is equivalent to the range [8, 8], we | |
7746 determine that the predicate is always false. | |
7747 | |
7748 On the next round of propagation, i_13 is determined to be | |
7749 VARYING, which causes i_5 to drop down to VARYING. So, another | |
7750 visit to i_14 is scheduled. In this second visit, we compute the | |
7751 exact same range and equivalence set for i_14, namely ~[0, 0] and | |
7752 { i_5 }. But we did not have the previous range for i_5 | |
7753 registered, so vrp_visit_assignment thinks that the range for | |
7754 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' | |
7755 is not visited again, which stops propagation from visiting | |
7756 statements in the THEN clause of that if(). | |
7757 | |
7758 To properly fix this we would need to keep the previous range | |
7759 value for the names in the equivalence set. This way we would've | |
7760 discovered that from one visit to the other i_5 changed from | |
7761 range [8, 8] to VR_VARYING. | |
7762 | |
7763 However, fixing this apparent limitation may not be worth the | |
7764 additional checking. Testing on several code bases (GCC, DLV, | |
7765 MICO, TRAMP3D and SPEC2000) showed that doing this results in | |
7766 4 more predicates folded in SPEC. */ | |
7767 | |
7768 bool sop; | |
7769 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), | |
7770 gimple_cond_lhs (stmt), | |
7771 gimple_cond_rhs (stmt), | |
7772 false, &sop, NULL); | |
7773 if (val) | |
7774 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); | |
7775 | |
7776 if (dump_file && (dump_flags & TDF_DETAILS)) | |
7777 { | |
7778 fprintf (dump_file, "\nPredicate evaluates to: "); | |
7779 if (val == NULL_TREE) | |
7780 fprintf (dump_file, "DON'T KNOW\n"); | |
7781 else | |
7782 print_generic_stmt (dump_file, val); | |
7783 } | |
7784 } | |
7785 | |
7786 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL | 5118 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
7787 that includes the value VAL. The search is restricted to the range | 5119 that includes the value VAL. The search is restricted to the range |
7788 [START_IDX, n - 1] where n is the size of VEC. | 5120 [START_IDX, n - 1] where n is the size of VEC. |
7789 | 5121 |
7790 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is | 5122 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
7794 it is placed in IDX and false is returned. | 5126 it is placed in IDX and false is returned. |
7795 | 5127 |
7796 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is | 5128 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
7797 returned. */ | 5129 returned. */ |
7798 | 5130 |
7799 static bool | 5131 bool |
7800 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx) | 5132 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx) |
7801 { | 5133 { |
7802 size_t n = gimple_switch_num_labels (stmt); | 5134 size_t n = gimple_switch_num_labels (stmt); |
7803 size_t low, high; | 5135 size_t low, high; |
7804 | 5136 |
7844 for values between MIN and MAX. The first index is placed in MIN_IDX. The | 5176 for values between MIN and MAX. The first index is placed in MIN_IDX. The |
7845 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty | 5177 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty |
7846 then MAX_IDX < MIN_IDX. | 5178 then MAX_IDX < MIN_IDX. |
7847 Returns true if the default label is not needed. */ | 5179 Returns true if the default label is not needed. */ |
7848 | 5180 |
7849 static bool | 5181 bool |
7850 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx, | 5182 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx, |
7851 size_t *max_idx) | 5183 size_t *max_idx) |
7852 { | 5184 { |
7853 size_t i, j; | 5185 size_t i, j; |
7854 bool min_take_default = !find_case_label_index (stmt, 1, min, &i); | 5186 bool min_take_default = !find_case_label_index (stmt, 1, min, &i); |
7895 *max_idx = j; | 5227 *max_idx = j; |
7896 return !take_default; | 5228 return !take_default; |
7897 } | 5229 } |
7898 } | 5230 } |
7899 | 5231 |
7900 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are | |
7901 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and | |
7902 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. | |
7903 Returns true if the default label is not needed. */ | |
7904 | |
7905 static bool | |
7906 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1, | |
7907 size_t *max_idx1, size_t *min_idx2, | |
7908 size_t *max_idx2) | |
7909 { | |
7910 size_t i, j, k, l; | |
7911 unsigned int n = gimple_switch_num_labels (stmt); | |
7912 bool take_default; | |
7913 tree case_low, case_high; | |
7914 tree min = vr->min, max = vr->max; | |
7915 | |
7916 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); | |
7917 | |
7918 take_default = !find_case_label_range (stmt, min, max, &i, &j); | |
7919 | |
7920 /* Set second range to emtpy. */ | |
7921 *min_idx2 = 1; | |
7922 *max_idx2 = 0; | |
7923 | |
7924 if (vr->type == VR_RANGE) | |
7925 { | |
7926 *min_idx1 = i; | |
7927 *max_idx1 = j; | |
7928 return !take_default; | |
7929 } | |
7930 | |
7931 /* Set first range to all case labels. */ | |
7932 *min_idx1 = 1; | |
7933 *max_idx1 = n - 1; | |
7934 | |
7935 if (i > j) | |
7936 return false; | |
7937 | |
7938 /* Make sure all the values of case labels [i , j] are contained in | |
7939 range [MIN, MAX]. */ | |
7940 case_low = CASE_LOW (gimple_switch_label (stmt, i)); | |
7941 case_high = CASE_HIGH (gimple_switch_label (stmt, j)); | |
7942 if (tree_int_cst_compare (case_low, min) < 0) | |
7943 i += 1; | |
7944 if (case_high != NULL_TREE | |
7945 && tree_int_cst_compare (max, case_high) < 0) | |
7946 j -= 1; | |
7947 | |
7948 if (i > j) | |
7949 return false; | |
7950 | |
7951 /* If the range spans case labels [i, j], the corresponding anti-range spans | |
7952 the labels [1, i - 1] and [j + 1, n - 1]. */ | |
7953 k = j + 1; | |
7954 l = n - 1; | |
7955 if (k > l) | |
7956 { | |
7957 k = 1; | |
7958 l = 0; | |
7959 } | |
7960 | |
7961 j = i - 1; | |
7962 i = 1; | |
7963 if (i > j) | |
7964 { | |
7965 i = k; | |
7966 j = l; | |
7967 k = 1; | |
7968 l = 0; | |
7969 } | |
7970 | |
7971 *min_idx1 = i; | |
7972 *max_idx1 = j; | |
7973 *min_idx2 = k; | |
7974 *max_idx2 = l; | |
7975 return false; | |
7976 } | |
7977 | |
7978 /* Visit switch statement STMT. If we can determine which edge | |
7979 will be taken out of STMT's basic block, record it in | |
7980 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */ | |
7981 | |
7982 static void | |
7983 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p) | |
7984 { | |
7985 tree op, val; | |
7986 value_range *vr; | |
7987 size_t i = 0, j = 0, k, l; | |
7988 bool take_default; | |
7989 | |
7990 *taken_edge_p = NULL; | |
7991 op = gimple_switch_index (stmt); | |
7992 if (TREE_CODE (op) != SSA_NAME) | |
7993 return; | |
7994 | |
7995 vr = get_value_range (op); | |
7996 if (dump_file && (dump_flags & TDF_DETAILS)) | |
7997 { | |
7998 fprintf (dump_file, "\nVisiting switch expression with operand "); | |
7999 print_generic_expr (dump_file, op); | |
8000 fprintf (dump_file, " with known range "); | |
8001 dump_value_range (dump_file, vr); | |
8002 fprintf (dump_file, "\n"); | |
8003 } | |
8004 | |
8005 if ((vr->type != VR_RANGE | |
8006 && vr->type != VR_ANTI_RANGE) | |
8007 || symbolic_range_p (vr)) | |
8008 return; | |
8009 | |
8010 /* Find the single edge that is taken from the switch expression. */ | |
8011 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); | |
8012 | |
8013 /* Check if the range spans no CASE_LABEL. If so, we only reach the default | |
8014 label */ | |
8015 if (j < i) | |
8016 { | |
8017 gcc_assert (take_default); | |
8018 val = gimple_switch_default_label (stmt); | |
8019 } | |
8020 else | |
8021 { | |
8022 /* Check if labels with index i to j and maybe the default label | |
8023 are all reaching the same label. */ | |
8024 | |
8025 val = gimple_switch_label (stmt, i); | |
8026 if (take_default | |
8027 && CASE_LABEL (gimple_switch_default_label (stmt)) | |
8028 != CASE_LABEL (val)) | |
8029 { | |
8030 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8031 fprintf (dump_file, " not a single destination for this " | |
8032 "range\n"); | |
8033 return; | |
8034 } | |
8035 for (++i; i <= j; ++i) | |
8036 { | |
8037 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) | |
8038 { | |
8039 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8040 fprintf (dump_file, " not a single destination for this " | |
8041 "range\n"); | |
8042 return; | |
8043 } | |
8044 } | |
8045 for (; k <= l; ++k) | |
8046 { | |
8047 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) | |
8048 { | |
8049 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8050 fprintf (dump_file, " not a single destination for this " | |
8051 "range\n"); | |
8052 return; | |
8053 } | |
8054 } | |
8055 } | |
8056 | |
8057 *taken_edge_p = find_edge (gimple_bb (stmt), | |
8058 label_to_block (CASE_LABEL (val))); | |
8059 | |
8060 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8061 { | |
8062 fprintf (dump_file, " will take edge to "); | |
8063 print_generic_stmt (dump_file, CASE_LABEL (val)); | |
8064 } | |
8065 } | |
8066 | |
8067 | |
8068 /* Evaluate statement STMT. If the statement produces a useful range, | |
8069 set VR and corepsponding OUTPUT_P. | |
8070 | |
8071 If STMT is a conditional branch and we can determine its truth | |
8072 value, the taken edge is recorded in *TAKEN_EDGE_P. */ | |
8073 | |
8074 static void | |
8075 extract_range_from_stmt (gimple *stmt, edge *taken_edge_p, | |
8076 tree *output_p, value_range *vr) | |
8077 { | |
8078 | |
8079 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8080 { | |
8081 fprintf (dump_file, "\nVisiting statement:\n"); | |
8082 print_gimple_stmt (dump_file, stmt, 0, dump_flags); | |
8083 } | |
8084 | |
8085 if (!stmt_interesting_for_vrp (stmt)) | |
8086 gcc_assert (stmt_ends_bb_p (stmt)); | |
8087 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) | |
8088 vrp_visit_assignment_or_call (stmt, output_p, vr); | |
8089 else if (gimple_code (stmt) == GIMPLE_COND) | |
8090 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p); | |
8091 else if (gimple_code (stmt) == GIMPLE_SWITCH) | |
8092 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p); | |
8093 } | |
8094 | |
8095 /* Evaluate statement STMT. If the statement produces a useful range, | 5232 /* Evaluate statement STMT. If the statement produces a useful range, |
8096 return SSA_PROP_INTERESTING and record the SSA name with the | 5233 return SSA_PROP_INTERESTING and record the SSA name with the |
8097 interesting range into *OUTPUT_P. | 5234 interesting range into *OUTPUT_P. |
8098 | 5235 |
8099 If STMT is a conditional branch and we can determine its truth | 5236 If STMT is a conditional branch and we can determine its truth |
8100 value, the taken edge is recorded in *TAKEN_EDGE_P. | 5237 value, the taken edge is recorded in *TAKEN_EDGE_P. |
8101 | 5238 |
8102 If STMT produces a varying value, return SSA_PROP_VARYING. */ | 5239 If STMT produces a varying value, return SSA_PROP_VARYING. */ |
8103 | 5240 |
8104 static enum ssa_prop_result | 5241 enum ssa_prop_result |
8105 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p) | 5242 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p) |
8106 { | 5243 { |
8107 value_range vr = VR_INITIALIZER; | |
8108 tree lhs = gimple_get_lhs (stmt); | 5244 tree lhs = gimple_get_lhs (stmt); |
5245 value_range vr; | |
8109 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr); | 5246 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr); |
8110 | 5247 |
8111 if (*output_p) | 5248 if (*output_p) |
8112 { | 5249 { |
8113 if (update_value_range (*output_p, &vr)) | 5250 if (update_value_range (*output_p, &vr)) |
8119 fprintf (dump_file, ": "); | 5256 fprintf (dump_file, ": "); |
8120 dump_value_range (dump_file, &vr); | 5257 dump_value_range (dump_file, &vr); |
8121 fprintf (dump_file, "\n"); | 5258 fprintf (dump_file, "\n"); |
8122 } | 5259 } |
8123 | 5260 |
8124 if (vr.type == VR_VARYING) | 5261 if (vr.varying_p ()) |
8125 return SSA_PROP_VARYING; | 5262 return SSA_PROP_VARYING; |
8126 | 5263 |
8127 return SSA_PROP_INTERESTING; | 5264 return SSA_PROP_INTERESTING; |
8128 } | 5265 } |
8129 return SSA_PROP_NOT_INTERESTING; | 5266 return SSA_PROP_NOT_INTERESTING; |
8172 or IMAGPART_EXPR immediate uses, but none of them have | 5309 or IMAGPART_EXPR immediate uses, but none of them have |
8173 a change in their value ranges, return | 5310 a change in their value ranges, return |
8174 SSA_PROP_NOT_INTERESTING. If there are no | 5311 SSA_PROP_NOT_INTERESTING. If there are no |
8175 {REAL,IMAG}PART_EXPR uses at all, | 5312 {REAL,IMAG}PART_EXPR uses at all, |
8176 return SSA_PROP_VARYING. */ | 5313 return SSA_PROP_VARYING. */ |
8177 value_range new_vr = VR_INITIALIZER; | 5314 value_range new_vr; |
8178 extract_range_basic (&new_vr, use_stmt); | 5315 extract_range_basic (&new_vr, use_stmt); |
8179 value_range *old_vr = get_value_range (use_lhs); | 5316 const value_range *old_vr = get_value_range (use_lhs); |
8180 if (old_vr->type != new_vr.type | 5317 if (*old_vr != new_vr) |
8181 || !vrp_operand_equal_p (old_vr->min, new_vr.min) | |
8182 || !vrp_operand_equal_p (old_vr->max, new_vr.max) | |
8183 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv)) | |
8184 res = SSA_PROP_INTERESTING; | 5318 res = SSA_PROP_INTERESTING; |
8185 else | 5319 else |
8186 res = SSA_PROP_NOT_INTERESTING; | 5320 res = SSA_PROP_NOT_INTERESTING; |
8187 BITMAP_FREE (new_vr.equiv); | 5321 new_vr.equiv_clear (); |
8188 if (res == SSA_PROP_INTERESTING) | 5322 if (res == SSA_PROP_INTERESTING) |
8189 { | 5323 { |
8190 *output_p = lhs; | 5324 *output_p = lhs; |
8191 return res; | 5325 return res; |
8192 } | 5326 } |
8210 { VR1TYPE, VR0MIN, VR0MAX } and store the result | 5344 { VR1TYPE, VR0MIN, VR0MAX } and store the result |
8211 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | 5345 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest |
8212 possible such range. The resulting range is not canonicalized. */ | 5346 possible such range. The resulting range is not canonicalized. */ |
8213 | 5347 |
8214 static void | 5348 static void |
8215 union_ranges (enum value_range_type *vr0type, | 5349 union_ranges (enum value_range_kind *vr0type, |
8216 tree *vr0min, tree *vr0max, | 5350 tree *vr0min, tree *vr0max, |
8217 enum value_range_type vr1type, | 5351 enum value_range_kind vr1type, |
8218 tree vr1min, tree vr1max) | 5352 tree vr1min, tree vr1max) |
8219 { | 5353 { |
8220 bool mineq = vrp_operand_equal_p (*vr0min, vr1min); | 5354 bool mineq = vrp_operand_equal_p (*vr0min, vr1min); |
8221 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max); | 5355 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max); |
8222 | 5356 |
8454 && vr1type == VR_ANTI_RANGE) | 5588 && vr1type == VR_ANTI_RANGE) |
8455 { | 5589 { |
8456 if (TREE_CODE (*vr0min) == INTEGER_CST) | 5590 if (TREE_CODE (*vr0min) == INTEGER_CST) |
8457 { | 5591 { |
8458 *vr0type = vr1type; | 5592 *vr0type = vr1type; |
8459 *vr0min = vr1min; | |
8460 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | 5593 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, |
8461 build_int_cst (TREE_TYPE (*vr0min), 1)); | 5594 build_int_cst (TREE_TYPE (*vr0min), 1)); |
5595 *vr0min = vr1min; | |
8462 } | 5596 } |
8463 else | 5597 else |
8464 goto give_up; | 5598 goto give_up; |
8465 } | 5599 } |
8466 else | 5600 else |
8481 { VR1TYPE, VR0MIN, VR0MAX } and store the result | 5615 { VR1TYPE, VR0MIN, VR0MAX } and store the result |
8482 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | 5616 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest |
8483 possible such range. The resulting range is not canonicalized. */ | 5617 possible such range. The resulting range is not canonicalized. */ |
8484 | 5618 |
8485 static void | 5619 static void |
8486 intersect_ranges (enum value_range_type *vr0type, | 5620 intersect_ranges (enum value_range_kind *vr0type, |
8487 tree *vr0min, tree *vr0max, | 5621 tree *vr0min, tree *vr0max, |
8488 enum value_range_type vr1type, | 5622 enum value_range_kind vr1type, |
8489 tree vr1min, tree vr1max) | 5623 tree vr1min, tree vr1max) |
8490 { | 5624 { |
8491 bool mineq = vrp_operand_equal_p (*vr0min, vr1min); | 5625 bool mineq = vrp_operand_equal_p (*vr0min, vr1min); |
8492 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max); | 5626 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max); |
8493 | 5627 |
8672 /* Choose the anti-range if the range is effectively varying. */ | 5806 /* Choose the anti-range if the range is effectively varying. */ |
8673 else if (vrp_val_is_min (vr1min) | 5807 else if (vrp_val_is_min (vr1min) |
8674 && vrp_val_is_max (vr1max)) | 5808 && vrp_val_is_max (vr1max)) |
8675 ; | 5809 ; |
8676 /* Choose the anti-range if it is ~[0,0], that range is special | 5810 /* Choose the anti-range if it is ~[0,0], that range is special |
8677 enough to special case when vr1's range is relatively wide. */ | 5811 enough to special case when vr1's range is relatively wide. |
5812 At least for types bigger than int - this covers pointers | |
5813 and arguments to functions like ctz. */ | |
8678 else if (*vr0min == *vr0max | 5814 else if (*vr0min == *vr0max |
8679 && integer_zerop (*vr0min) | 5815 && integer_zerop (*vr0min) |
8680 && (TYPE_PRECISION (TREE_TYPE (*vr0min)) | 5816 && ((TYPE_PRECISION (TREE_TYPE (*vr0min)) |
8681 == TYPE_PRECISION (ptr_type_node)) | 5817 >= TYPE_PRECISION (integer_type_node)) |
5818 || POINTER_TYPE_P (TREE_TYPE (*vr0min))) | |
8682 && TREE_CODE (vr1max) == INTEGER_CST | 5819 && TREE_CODE (vr1max) == INTEGER_CST |
8683 && TREE_CODE (vr1min) == INTEGER_CST | 5820 && TREE_CODE (vr1min) == INTEGER_CST |
8684 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min)) | 5821 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min)) |
8685 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2)) | 5822 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2)) |
8686 ; | 5823 ; |
8790 { | 5927 { |
8791 *vr0type = vr1type; | 5928 *vr0type = vr1type; |
8792 *vr0min = vr1min; | 5929 *vr0min = vr1min; |
8793 *vr0max = vr1max; | 5930 *vr0max = vr1max; |
8794 } | 5931 } |
8795 | |
8796 return; | |
8797 } | 5932 } |
8798 | 5933 |
8799 | 5934 |
8800 /* Intersect the two value-ranges *VR0 and *VR1 and store the result | 5935 /* Intersect the two value-ranges *VR0 and *VR1 and store the result |
8801 in *VR0. This may not be the smallest possible such range. */ | 5936 in *VR0. This may not be the smallest possible such range. */ |
8802 | 5937 |
8803 static void | 5938 void |
8804 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1) | 5939 value_range::intersect_helper (value_range *vr0, const value_range *vr1) |
8805 { | 5940 { |
8806 value_range saved; | |
8807 | |
8808 /* If either range is VR_VARYING the other one wins. */ | 5941 /* If either range is VR_VARYING the other one wins. */ |
8809 if (vr1->type == VR_VARYING) | 5942 if (vr1->varying_p ()) |
8810 return; | 5943 return; |
8811 if (vr0->type == VR_VARYING) | 5944 if (vr0->varying_p ()) |
8812 { | 5945 { |
8813 copy_value_range (vr0, vr1); | 5946 vr0->deep_copy (vr1); |
8814 return; | 5947 return; |
8815 } | 5948 } |
8816 | 5949 |
8817 /* When either range is VR_UNDEFINED the resulting range is | 5950 /* When either range is VR_UNDEFINED the resulting range is |
8818 VR_UNDEFINED, too. */ | 5951 VR_UNDEFINED, too. */ |
8819 if (vr0->type == VR_UNDEFINED) | 5952 if (vr0->undefined_p ()) |
8820 return; | 5953 return; |
8821 if (vr1->type == VR_UNDEFINED) | 5954 if (vr1->undefined_p ()) |
8822 { | 5955 { |
8823 set_value_range_to_undefined (vr0); | 5956 set_value_range_to_undefined (vr0); |
8824 return; | 5957 return; |
8825 } | 5958 } |
8826 | 5959 |
8827 /* Save the original vr0 so we can return it as conservative intersection | 5960 /* Save the original vr0 so we can return it as conservative intersection |
8828 result when our worker turns things to varying. */ | 5961 result when our worker turns things to varying. */ |
8829 saved = *vr0; | 5962 value_range saved (*vr0); |
8830 intersect_ranges (&vr0->type, &vr0->min, &vr0->max, | 5963 |
8831 vr1->type, vr1->min, vr1->max); | 5964 value_range_kind vr0type = vr0->kind (); |
5965 tree vr0min = vr0->min (); | |
5966 tree vr0max = vr0->max (); | |
5967 intersect_ranges (&vr0type, &vr0min, &vr0max, | |
5968 vr1->kind (), vr1->min (), vr1->max ()); | |
8832 /* Make sure to canonicalize the result though as the inversion of a | 5969 /* Make sure to canonicalize the result though as the inversion of a |
8833 VR_RANGE can still be a VR_RANGE. */ | 5970 VR_RANGE can still be a VR_RANGE. */ |
8834 set_and_canonicalize_value_range (vr0, vr0->type, | 5971 vr0->set_and_canonicalize (vr0type, vr0min, vr0max, vr0->m_equiv); |
8835 vr0->min, vr0->max, vr0->equiv); | |
8836 /* If that failed, use the saved original VR0. */ | 5972 /* If that failed, use the saved original VR0. */ |
8837 if (vr0->type == VR_VARYING) | 5973 if (vr0->varying_p ()) |
8838 { | 5974 { |
8839 *vr0 = saved; | 5975 *vr0 = saved; |
8840 return; | 5976 return; |
8841 } | 5977 } |
8842 /* If the result is VR_UNDEFINED there is no need to mess with | 5978 /* If the result is VR_UNDEFINED there is no need to mess with |
8843 the equivalencies. */ | 5979 the equivalencies. */ |
8844 if (vr0->type == VR_UNDEFINED) | 5980 if (vr0->undefined_p ()) |
8845 return; | 5981 return; |
8846 | 5982 |
8847 /* The resulting set of equivalences for range intersection is the union of | 5983 /* The resulting set of equivalences for range intersection is the union of |
8848 the two sets. */ | 5984 the two sets. */ |
8849 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | 5985 if (vr0->m_equiv && vr1->m_equiv && vr0->m_equiv != vr1->m_equiv) |
8850 bitmap_ior_into (vr0->equiv, vr1->equiv); | 5986 bitmap_ior_into (vr0->m_equiv, vr1->m_equiv); |
8851 else if (vr1->equiv && !vr0->equiv) | 5987 else if (vr1->m_equiv && !vr0->m_equiv) |
8852 { | 5988 { |
8853 vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack); | 5989 /* All equivalence bitmaps are allocated from the same obstack. So |
8854 bitmap_copy (vr0->equiv, vr1->equiv); | 5990 we can use the obstack associated with VR to allocate vr0->equiv. */ |
5991 vr0->m_equiv = BITMAP_ALLOC (vr1->m_equiv->obstack); | |
5992 bitmap_copy (m_equiv, vr1->m_equiv); | |
8855 } | 5993 } |
8856 } | 5994 } |
8857 | 5995 |
8858 void | 5996 void |
8859 vrp_intersect_ranges (value_range *vr0, value_range *vr1) | 5997 value_range::intersect (const value_range *other) |
8860 { | 5998 { |
8861 if (dump_file && (dump_flags & TDF_DETAILS)) | 5999 if (dump_file && (dump_flags & TDF_DETAILS)) |
8862 { | 6000 { |
8863 fprintf (dump_file, "Intersecting\n "); | 6001 fprintf (dump_file, "Intersecting\n "); |
8864 dump_value_range (dump_file, vr0); | 6002 dump_value_range (dump_file, this); |
8865 fprintf (dump_file, "\nand\n "); | 6003 fprintf (dump_file, "\nand\n "); |
8866 dump_value_range (dump_file, vr1); | 6004 dump_value_range (dump_file, other); |
8867 fprintf (dump_file, "\n"); | 6005 fprintf (dump_file, "\n"); |
8868 } | 6006 } |
8869 vrp_intersect_ranges_1 (vr0, vr1); | 6007 intersect_helper (this, other); |
8870 if (dump_file && (dump_flags & TDF_DETAILS)) | 6008 if (dump_file && (dump_flags & TDF_DETAILS)) |
8871 { | 6009 { |
8872 fprintf (dump_file, "to\n "); | 6010 fprintf (dump_file, "to\n "); |
8873 dump_value_range (dump_file, vr0); | 6011 dump_value_range (dump_file, this); |
8874 fprintf (dump_file, "\n"); | 6012 fprintf (dump_file, "\n"); |
8875 } | 6013 } |
8876 } | 6014 } |
8877 | 6015 |
8878 /* Meet operation for value ranges. Given two value ranges VR0 and | 6016 /* Meet operation for value ranges. Given two value ranges VR0 and |
8879 VR1, store in VR0 a range that contains both VR0 and VR1. This | 6017 VR1, store in VR0 a range that contains both VR0 and VR1. This |
8880 may not be the smallest possible such range. */ | 6018 may not be the smallest possible such range. */ |
8881 | 6019 |
8882 static void | 6020 void |
8883 vrp_meet_1 (value_range *vr0, const value_range *vr1) | 6021 value_range::union_helper (value_range *vr0, const value_range *vr1) |
8884 { | 6022 { |
8885 value_range saved; | 6023 if (vr1->undefined_p ()) |
8886 | |
8887 if (vr0->type == VR_UNDEFINED) | |
8888 { | |
8889 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv); | |
8890 return; | |
8891 } | |
8892 | |
8893 if (vr1->type == VR_UNDEFINED) | |
8894 { | 6024 { |
8895 /* VR0 already has the resulting range. */ | 6025 /* VR0 already has the resulting range. */ |
8896 return; | 6026 return; |
8897 } | 6027 } |
8898 | 6028 |
8899 if (vr0->type == VR_VARYING) | 6029 if (vr0->undefined_p ()) |
6030 { | |
6031 vr0->deep_copy (vr1); | |
6032 return; | |
6033 } | |
6034 | |
6035 if (vr0->varying_p ()) | |
8900 { | 6036 { |
8901 /* Nothing to do. VR0 already has the resulting range. */ | 6037 /* Nothing to do. VR0 already has the resulting range. */ |
8902 return; | 6038 return; |
8903 } | 6039 } |
8904 | 6040 |
8905 if (vr1->type == VR_VARYING) | 6041 if (vr1->varying_p ()) |
8906 { | 6042 { |
8907 set_value_range_to_varying (vr0); | 6043 set_value_range_to_varying (vr0); |
8908 return; | 6044 return; |
8909 } | 6045 } |
8910 | 6046 |
8911 saved = *vr0; | 6047 value_range saved (*vr0); |
8912 union_ranges (&vr0->type, &vr0->min, &vr0->max, | 6048 value_range_kind vr0type = vr0->kind (); |
8913 vr1->type, vr1->min, vr1->max); | 6049 tree vr0min = vr0->min (); |
8914 if (vr0->type == VR_VARYING) | 6050 tree vr0max = vr0->max (); |
6051 union_ranges (&vr0type, &vr0min, &vr0max, | |
6052 vr1->kind (), vr1->min (), vr1->max ()); | |
6053 *vr0 = value_range (vr0type, vr0min, vr0max); | |
6054 if (vr0->varying_p ()) | |
8915 { | 6055 { |
8916 /* Failed to find an efficient meet. Before giving up and setting | 6056 /* Failed to find an efficient meet. Before giving up and setting |
8917 the result to VARYING, see if we can at least derive a useful | 6057 the result to VARYING, see if we can at least derive a useful |
8918 anti-range. FIXME, all this nonsense about distinguishing | 6058 anti-range. */ |
8919 anti-ranges from ranges is necessary because of the odd | 6059 if (range_includes_zero_p (&saved) == 0 |
8920 semantics of range_includes_zero_p and friends. */ | 6060 && range_includes_zero_p (vr1) == 0) |
8921 if (((saved.type == VR_RANGE | 6061 { |
8922 && range_includes_zero_p (saved.min, saved.max) == 0) | 6062 set_value_range_to_nonnull (vr0, saved.type ()); |
8923 || (saved.type == VR_ANTI_RANGE | |
8924 && range_includes_zero_p (saved.min, saved.max) == 1)) | |
8925 && ((vr1->type == VR_RANGE | |
8926 && range_includes_zero_p (vr1->min, vr1->max) == 0) | |
8927 || (vr1->type == VR_ANTI_RANGE | |
8928 && range_includes_zero_p (vr1->min, vr1->max) == 1))) | |
8929 { | |
8930 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); | |
8931 | 6063 |
8932 /* Since this meet operation did not result from the meeting of | 6064 /* Since this meet operation did not result from the meeting of |
8933 two equivalent names, VR0 cannot have any equivalences. */ | 6065 two equivalent names, VR0 cannot have any equivalences. */ |
8934 if (vr0->equiv) | 6066 if (vr0->m_equiv) |
8935 bitmap_clear (vr0->equiv); | 6067 bitmap_clear (vr0->m_equiv); |
8936 return; | 6068 return; |
8937 } | 6069 } |
8938 | 6070 |
8939 set_value_range_to_varying (vr0); | 6071 set_value_range_to_varying (vr0); |
8940 return; | 6072 return; |
8941 } | 6073 } |
8942 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, | 6074 vr0->set_and_canonicalize (vr0->kind (), vr0->min (), vr0->max (), |
8943 vr0->equiv); | 6075 vr0->equiv ()); |
8944 if (vr0->type == VR_VARYING) | 6076 if (vr0->varying_p ()) |
8945 return; | 6077 return; |
8946 | 6078 |
8947 /* The resulting set of equivalences is always the intersection of | 6079 /* The resulting set of equivalences is always the intersection of |
8948 the two sets. */ | 6080 the two sets. */ |
8949 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | 6081 if (vr0->m_equiv && vr1->m_equiv && vr0->m_equiv != vr1->m_equiv) |
8950 bitmap_and_into (vr0->equiv, vr1->equiv); | 6082 bitmap_and_into (vr0->m_equiv, vr1->m_equiv); |
8951 else if (vr0->equiv && !vr1->equiv) | 6083 else if (vr0->m_equiv && !vr1->m_equiv) |
8952 bitmap_clear (vr0->equiv); | 6084 bitmap_clear (vr0->m_equiv); |
8953 } | 6085 } |
8954 | 6086 |
8955 void | 6087 void |
8956 vrp_meet (value_range *vr0, const value_range *vr1) | 6088 value_range::union_ (const value_range *other) |
8957 { | 6089 { |
8958 if (dump_file && (dump_flags & TDF_DETAILS)) | 6090 if (dump_file && (dump_flags & TDF_DETAILS)) |
8959 { | 6091 { |
8960 fprintf (dump_file, "Meeting\n "); | 6092 fprintf (dump_file, "Meeting\n "); |
8961 dump_value_range (dump_file, vr0); | 6093 dump_value_range (dump_file, this); |
8962 fprintf (dump_file, "\nand\n "); | 6094 fprintf (dump_file, "\nand\n "); |
8963 dump_value_range (dump_file, vr1); | 6095 dump_value_range (dump_file, other); |
8964 fprintf (dump_file, "\n"); | 6096 fprintf (dump_file, "\n"); |
8965 } | 6097 } |
8966 vrp_meet_1 (vr0, vr1); | 6098 union_helper (this, other); |
8967 if (dump_file && (dump_flags & TDF_DETAILS)) | 6099 if (dump_file && (dump_flags & TDF_DETAILS)) |
8968 { | 6100 { |
8969 fprintf (dump_file, "to\n "); | 6101 fprintf (dump_file, "to\n "); |
8970 dump_value_range (dump_file, vr0); | 6102 dump_value_range (dump_file, this); |
8971 fprintf (dump_file, "\n"); | 6103 fprintf (dump_file, "\n"); |
8972 } | 6104 } |
8973 } | |
8974 | |
8975 | |
8976 /* Visit all arguments for PHI node PHI that flow through executable | |
8977 edges. If a valid value range can be derived from all the incoming | |
8978 value ranges, set a new range in VR_RESULT. */ | |
8979 | |
8980 static void | |
8981 extract_range_from_phi_node (gphi *phi, value_range *vr_result) | |
8982 { | |
8983 size_t i; | |
8984 tree lhs = PHI_RESULT (phi); | |
8985 value_range *lhs_vr = get_value_range (lhs); | |
8986 bool first = true; | |
8987 int edges, old_edges; | |
8988 struct loop *l; | |
8989 | |
8990 if (dump_file && (dump_flags & TDF_DETAILS)) | |
8991 { | |
8992 fprintf (dump_file, "\nVisiting PHI node: "); | |
8993 print_gimple_stmt (dump_file, phi, 0, dump_flags); | |
8994 } | |
8995 | |
8996 bool may_simulate_backedge_again = false; | |
8997 edges = 0; | |
8998 for (i = 0; i < gimple_phi_num_args (phi); i++) | |
8999 { | |
9000 edge e = gimple_phi_arg_edge (phi, i); | |
9001 | |
9002 if (dump_file && (dump_flags & TDF_DETAILS)) | |
9003 { | |
9004 fprintf (dump_file, | |
9005 " Argument #%d (%d -> %d %sexecutable)\n", | |
9006 (int) i, e->src->index, e->dest->index, | |
9007 (e->flags & EDGE_EXECUTABLE) ? "" : "not "); | |
9008 } | |
9009 | |
9010 if (e->flags & EDGE_EXECUTABLE) | |
9011 { | |
9012 tree arg = PHI_ARG_DEF (phi, i); | |
9013 value_range vr_arg; | |
9014 | |
9015 ++edges; | |
9016 | |
9017 if (TREE_CODE (arg) == SSA_NAME) | |
9018 { | |
9019 /* See if we are eventually going to change one of the args. */ | |
9020 gimple *def_stmt = SSA_NAME_DEF_STMT (arg); | |
9021 if (! gimple_nop_p (def_stmt) | |
9022 && prop_simulate_again_p (def_stmt) | |
9023 && e->flags & EDGE_DFS_BACK) | |
9024 may_simulate_backedge_again = true; | |
9025 | |
9026 vr_arg = *(get_value_range (arg)); | |
9027 /* Do not allow equivalences or symbolic ranges to leak in from | |
9028 backedges. That creates invalid equivalencies. | |
9029 See PR53465 and PR54767. */ | |
9030 if (e->flags & EDGE_DFS_BACK) | |
9031 { | |
9032 if (vr_arg.type == VR_RANGE | |
9033 || vr_arg.type == VR_ANTI_RANGE) | |
9034 { | |
9035 vr_arg.equiv = NULL; | |
9036 if (symbolic_range_p (&vr_arg)) | |
9037 { | |
9038 vr_arg.type = VR_VARYING; | |
9039 vr_arg.min = NULL_TREE; | |
9040 vr_arg.max = NULL_TREE; | |
9041 } | |
9042 } | |
9043 } | |
9044 else | |
9045 { | |
9046 /* If the non-backedge arguments range is VR_VARYING then | |
9047 we can still try recording a simple equivalence. */ | |
9048 if (vr_arg.type == VR_VARYING) | |
9049 { | |
9050 vr_arg.type = VR_RANGE; | |
9051 vr_arg.min = arg; | |
9052 vr_arg.max = arg; | |
9053 vr_arg.equiv = NULL; | |
9054 } | |
9055 } | |
9056 } | |
9057 else | |
9058 { | |
9059 if (TREE_OVERFLOW_P (arg)) | |
9060 arg = drop_tree_overflow (arg); | |
9061 | |
9062 vr_arg.type = VR_RANGE; | |
9063 vr_arg.min = arg; | |
9064 vr_arg.max = arg; | |
9065 vr_arg.equiv = NULL; | |
9066 } | |
9067 | |
9068 if (dump_file && (dump_flags & TDF_DETAILS)) | |
9069 { | |
9070 fprintf (dump_file, "\t"); | |
9071 print_generic_expr (dump_file, arg, dump_flags); | |
9072 fprintf (dump_file, ": "); | |
9073 dump_value_range (dump_file, &vr_arg); | |
9074 fprintf (dump_file, "\n"); | |
9075 } | |
9076 | |
9077 if (first) | |
9078 copy_value_range (vr_result, &vr_arg); | |
9079 else | |
9080 vrp_meet (vr_result, &vr_arg); | |
9081 first = false; | |
9082 | |
9083 if (vr_result->type == VR_VARYING) | |
9084 break; | |
9085 } | |
9086 } | |
9087 | |
9088 if (vr_result->type == VR_VARYING) | |
9089 goto varying; | |
9090 else if (vr_result->type == VR_UNDEFINED) | |
9091 goto update_range; | |
9092 | |
9093 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; | |
9094 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; | |
9095 | |
9096 /* To prevent infinite iterations in the algorithm, derive ranges | |
9097 when the new value is slightly bigger or smaller than the | |
9098 previous one. We don't do this if we have seen a new executable | |
9099 edge; this helps us avoid an infinity for conditionals | |
9100 which are not in a loop. If the old value-range was VR_UNDEFINED | |
9101 use the updated range and iterate one more time. If we will not | |
9102 simulate this PHI again via the backedge allow us to iterate. */ | |
9103 if (edges > 0 | |
9104 && gimple_phi_num_args (phi) > 1 | |
9105 && edges == old_edges | |
9106 && lhs_vr->type != VR_UNDEFINED | |
9107 && may_simulate_backedge_again) | |
9108 { | |
9109 /* Compare old and new ranges, fall back to varying if the | |
9110 values are not comparable. */ | |
9111 int cmp_min = compare_values (lhs_vr->min, vr_result->min); | |
9112 if (cmp_min == -2) | |
9113 goto varying; | |
9114 int cmp_max = compare_values (lhs_vr->max, vr_result->max); | |
9115 if (cmp_max == -2) | |
9116 goto varying; | |
9117 | |
9118 /* For non VR_RANGE or for pointers fall back to varying if | |
9119 the range changed. */ | |
9120 if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE | |
9121 || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
9122 && (cmp_min != 0 || cmp_max != 0)) | |
9123 goto varying; | |
9124 | |
9125 /* If the new minimum is larger than the previous one | |
9126 retain the old value. If the new minimum value is smaller | |
9127 than the previous one and not -INF go all the way to -INF + 1. | |
9128 In the first case, to avoid infinite bouncing between different | |
9129 minimums, and in the other case to avoid iterating millions of | |
9130 times to reach -INF. Going to -INF + 1 also lets the following | |
9131 iteration compute whether there will be any overflow, at the | |
9132 expense of one additional iteration. */ | |
9133 if (cmp_min < 0) | |
9134 vr_result->min = lhs_vr->min; | |
9135 else if (cmp_min > 0 | |
9136 && !vrp_val_is_min (vr_result->min)) | |
9137 vr_result->min | |
9138 = int_const_binop (PLUS_EXPR, | |
9139 vrp_val_min (TREE_TYPE (vr_result->min)), | |
9140 build_int_cst (TREE_TYPE (vr_result->min), 1)); | |
9141 | |
9142 /* Similarly for the maximum value. */ | |
9143 if (cmp_max > 0) | |
9144 vr_result->max = lhs_vr->max; | |
9145 else if (cmp_max < 0 | |
9146 && !vrp_val_is_max (vr_result->max)) | |
9147 vr_result->max | |
9148 = int_const_binop (MINUS_EXPR, | |
9149 vrp_val_max (TREE_TYPE (vr_result->min)), | |
9150 build_int_cst (TREE_TYPE (vr_result->min), 1)); | |
9151 | |
9152 /* If we dropped either bound to +-INF then if this is a loop | |
9153 PHI node SCEV may known more about its value-range. */ | |
9154 if (cmp_min > 0 || cmp_min < 0 | |
9155 || cmp_max < 0 || cmp_max > 0) | |
9156 goto scev_check; | |
9157 | |
9158 goto infinite_check; | |
9159 } | |
9160 | |
9161 goto update_range; | |
9162 | |
9163 varying: | |
9164 set_value_range_to_varying (vr_result); | |
9165 | |
9166 scev_check: | |
9167 /* If this is a loop PHI node SCEV may known more about its value-range. | |
9168 scev_check can be reached from two paths, one is a fall through from above | |
9169 "varying" label, the other is direct goto from code block which tries to | |
9170 avoid infinite simulation. */ | |
9171 if ((l = loop_containing_stmt (phi)) | |
9172 && l->header == gimple_bb (phi)) | |
9173 adjust_range_with_scev (vr_result, l, phi, lhs); | |
9174 | |
9175 infinite_check: | |
9176 /* If we will end up with a (-INF, +INF) range, set it to | |
9177 VARYING. Same if the previous max value was invalid for | |
9178 the type and we end up with vr_result.min > vr_result.max. */ | |
9179 if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE) | |
9180 && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min)) | |
9181 || compare_values (vr_result->min, vr_result->max) > 0)) | |
9182 ; | |
9183 else | |
9184 set_value_range_to_varying (vr_result); | |
9185 | |
9186 /* If the new range is different than the previous value, keep | |
9187 iterating. */ | |
9188 update_range: | |
9189 return; | |
9190 } | 6105 } |
9191 | 6106 |
9192 /* Visit all arguments for PHI node PHI that flow through executable | 6107 /* Visit all arguments for PHI node PHI that flow through executable |
9193 edges. If a valid value range can be derived from all the incoming | 6108 edges. If a valid value range can be derived from all the incoming |
9194 value ranges, set a new range for the LHS of PHI. */ | 6109 value ranges, set a new range for the LHS of PHI. */ |
9195 | 6110 |
9196 static enum ssa_prop_result | 6111 enum ssa_prop_result |
9197 vrp_visit_phi_node (gphi *phi) | 6112 vrp_prop::visit_phi (gphi *phi) |
9198 { | 6113 { |
9199 tree lhs = PHI_RESULT (phi); | 6114 tree lhs = PHI_RESULT (phi); |
9200 value_range vr_result = VR_INITIALIZER; | 6115 value_range vr_result; |
9201 extract_range_from_phi_node (phi, &vr_result); | 6116 extract_range_from_phi_node (phi, &vr_result); |
9202 if (update_value_range (lhs, &vr_result)) | 6117 if (update_value_range (lhs, &vr_result)) |
9203 { | 6118 { |
9204 if (dump_file && (dump_flags & TDF_DETAILS)) | 6119 if (dump_file && (dump_flags & TDF_DETAILS)) |
9205 { | 6120 { |
9208 fprintf (dump_file, ": "); | 6123 fprintf (dump_file, ": "); |
9209 dump_value_range (dump_file, &vr_result); | 6124 dump_value_range (dump_file, &vr_result); |
9210 fprintf (dump_file, "\n"); | 6125 fprintf (dump_file, "\n"); |
9211 } | 6126 } |
9212 | 6127 |
9213 if (vr_result.type == VR_VARYING) | 6128 if (vr_result.varying_p ()) |
9214 return SSA_PROP_VARYING; | 6129 return SSA_PROP_VARYING; |
9215 | 6130 |
9216 return SSA_PROP_INTERESTING; | 6131 return SSA_PROP_INTERESTING; |
9217 } | 6132 } |
9218 | 6133 |
9219 /* Nothing changed, don't add outgoing edges. */ | 6134 /* Nothing changed, don't add outgoing edges. */ |
9220 return SSA_PROP_NOT_INTERESTING; | 6135 return SSA_PROP_NOT_INTERESTING; |
9221 } | 6136 } |
9222 | 6137 |
9223 /* Simplify boolean operations if the source is known | 6138 class vrp_folder : public substitute_and_fold_engine |
9224 to be already a boolean. */ | 6139 { |
9225 static bool | 6140 public: |
9226 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | 6141 tree get_value (tree) FINAL OVERRIDE; |
9227 { | 6142 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE; |
9228 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | 6143 bool fold_predicate_in (gimple_stmt_iterator *); |
9229 tree lhs, op0, op1; | 6144 |
9230 bool need_conversion; | 6145 class vr_values *vr_values; |
9231 | 6146 |
9232 /* We handle only !=/== case here. */ | 6147 /* Delegators. */ |
9233 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); | 6148 tree vrp_evaluate_conditional (tree_code code, tree op0, |
9234 | 6149 tree op1, gimple *stmt) |
9235 op0 = gimple_assign_rhs1 (stmt); | 6150 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); } |
9236 if (!op_with_boolean_value_range_p (op0)) | 6151 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) |
9237 return false; | 6152 { return vr_values->simplify_stmt_using_ranges (gsi); } |
9238 | 6153 tree op_with_constant_singleton_value_range (tree op) |
9239 op1 = gimple_assign_rhs2 (stmt); | 6154 { return vr_values->op_with_constant_singleton_value_range (op); } |
9240 if (!op_with_boolean_value_range_p (op1)) | 6155 }; |
9241 return false; | |
9242 | |
9243 /* Reduce number of cases to handle to NE_EXPR. As there is no | |
9244 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ | |
9245 if (rhs_code == EQ_EXPR) | |
9246 { | |
9247 if (TREE_CODE (op1) == INTEGER_CST) | |
9248 op1 = int_const_binop (BIT_XOR_EXPR, op1, | |
9249 build_int_cst (TREE_TYPE (op1), 1)); | |
9250 else | |
9251 return false; | |
9252 } | |
9253 | |
9254 lhs = gimple_assign_lhs (stmt); | |
9255 need_conversion | |
9256 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); | |
9257 | |
9258 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ | |
9259 if (need_conversion | |
9260 && !TYPE_UNSIGNED (TREE_TYPE (op0)) | |
9261 && TYPE_PRECISION (TREE_TYPE (op0)) == 1 | |
9262 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) | |
9263 return false; | |
9264 | |
9265 /* For A != 0 we can substitute A itself. */ | |
9266 if (integer_zerop (op1)) | |
9267 gimple_assign_set_rhs_with_ops (gsi, | |
9268 need_conversion | |
9269 ? NOP_EXPR : TREE_CODE (op0), op0); | |
9270 /* For A != B we substitute A ^ B. Either with conversion. */ | |
9271 else if (need_conversion) | |
9272 { | |
9273 tree tem = make_ssa_name (TREE_TYPE (op0)); | |
9274 gassign *newop | |
9275 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1); | |
9276 gsi_insert_before (gsi, newop, GSI_SAME_STMT); | |
9277 if (INTEGRAL_TYPE_P (TREE_TYPE (tem)) | |
9278 && TYPE_PRECISION (TREE_TYPE (tem)) > 1) | |
9279 set_range_info (tem, VR_RANGE, | |
9280 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))), | |
9281 wi::one (TYPE_PRECISION (TREE_TYPE (tem)))); | |
9282 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem); | |
9283 } | |
9284 /* Or without. */ | |
9285 else | |
9286 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); | |
9287 update_stmt (gsi_stmt (*gsi)); | |
9288 fold_stmt (gsi, follow_single_use_edges); | |
9289 | |
9290 return true; | |
9291 } | |
9292 | |
9293 /* Simplify a division or modulo operator to a right shift or bitwise and | |
9294 if the first operand is unsigned or is greater than zero and the second | |
9295 operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with | |
9296 constant op1 (op1min = op1) or with op1 in [op1min, op1max] range, | |
9297 optimize it into just op0 if op0's range is known to be a subset of | |
9298 [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned | |
9299 modulo. */ | |
9300 | |
9301 static bool | |
9302 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
9303 { | |
9304 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
9305 tree val = NULL; | |
9306 tree op0 = gimple_assign_rhs1 (stmt); | |
9307 tree op1 = gimple_assign_rhs2 (stmt); | |
9308 tree op0min = NULL_TREE, op0max = NULL_TREE; | |
9309 tree op1min = op1; | |
9310 value_range *vr = NULL; | |
9311 | |
9312 if (TREE_CODE (op0) == INTEGER_CST) | |
9313 { | |
9314 op0min = op0; | |
9315 op0max = op0; | |
9316 } | |
9317 else | |
9318 { | |
9319 vr = get_value_range (op0); | |
9320 if (range_int_cst_p (vr)) | |
9321 { | |
9322 op0min = vr->min; | |
9323 op0max = vr->max; | |
9324 } | |
9325 } | |
9326 | |
9327 if (rhs_code == TRUNC_MOD_EXPR | |
9328 && TREE_CODE (op1) == SSA_NAME) | |
9329 { | |
9330 value_range *vr1 = get_value_range (op1); | |
9331 if (range_int_cst_p (vr1)) | |
9332 op1min = vr1->min; | |
9333 } | |
9334 if (rhs_code == TRUNC_MOD_EXPR | |
9335 && TREE_CODE (op1min) == INTEGER_CST | |
9336 && tree_int_cst_sgn (op1min) == 1 | |
9337 && op0max | |
9338 && tree_int_cst_lt (op0max, op1min)) | |
9339 { | |
9340 if (TYPE_UNSIGNED (TREE_TYPE (op0)) | |
9341 || tree_int_cst_sgn (op0min) >= 0 | |
9342 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min), | |
9343 op0min)) | |
9344 { | |
9345 /* If op0 already has the range op0 % op1 has, | |
9346 then TRUNC_MOD_EXPR won't change anything. */ | |
9347 gimple_assign_set_rhs_from_tree (gsi, op0); | |
9348 return true; | |
9349 } | |
9350 } | |
9351 | |
9352 if (TREE_CODE (op0) != SSA_NAME) | |
9353 return false; | |
9354 | |
9355 if (!integer_pow2p (op1)) | |
9356 { | |
9357 /* X % -Y can be only optimized into X % Y either if | |
9358 X is not INT_MIN, or Y is not -1. Fold it now, as after | |
9359 remove_range_assertions the range info might be not available | |
9360 anymore. */ | |
9361 if (rhs_code == TRUNC_MOD_EXPR | |
9362 && fold_stmt (gsi, follow_single_use_edges)) | |
9363 return true; | |
9364 return false; | |
9365 } | |
9366 | |
9367 if (TYPE_UNSIGNED (TREE_TYPE (op0))) | |
9368 val = integer_one_node; | |
9369 else | |
9370 { | |
9371 bool sop = false; | |
9372 | |
9373 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); | |
9374 | |
9375 if (val | |
9376 && sop | |
9377 && integer_onep (val) | |
9378 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
9379 { | |
9380 location_t location; | |
9381 | |
9382 if (!gimple_has_location (stmt)) | |
9383 location = input_location; | |
9384 else | |
9385 location = gimple_location (stmt); | |
9386 warning_at (location, OPT_Wstrict_overflow, | |
9387 "assuming signed overflow does not occur when " | |
9388 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); | |
9389 } | |
9390 } | |
9391 | |
9392 if (val && integer_onep (val)) | |
9393 { | |
9394 tree t; | |
9395 | |
9396 if (rhs_code == TRUNC_DIV_EXPR) | |
9397 { | |
9398 t = build_int_cst (integer_type_node, tree_log2 (op1)); | |
9399 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); | |
9400 gimple_assign_set_rhs1 (stmt, op0); | |
9401 gimple_assign_set_rhs2 (stmt, t); | |
9402 } | |
9403 else | |
9404 { | |
9405 t = build_int_cst (TREE_TYPE (op1), 1); | |
9406 t = int_const_binop (MINUS_EXPR, op1, t); | |
9407 t = fold_convert (TREE_TYPE (op0), t); | |
9408 | |
9409 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); | |
9410 gimple_assign_set_rhs1 (stmt, op0); | |
9411 gimple_assign_set_rhs2 (stmt, t); | |
9412 } | |
9413 | |
9414 update_stmt (stmt); | |
9415 fold_stmt (gsi, follow_single_use_edges); | |
9416 return true; | |
9417 } | |
9418 | |
9419 return false; | |
9420 } | |
9421 | |
9422 /* Simplify a min or max if the ranges of the two operands are | |
9423 disjoint. Return true if we do simplify. */ | |
9424 | |
9425 static bool | |
9426 simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
9427 { | |
9428 tree op0 = gimple_assign_rhs1 (stmt); | |
9429 tree op1 = gimple_assign_rhs2 (stmt); | |
9430 bool sop = false; | |
9431 tree val; | |
9432 | |
9433 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
9434 (LE_EXPR, op0, op1, &sop)); | |
9435 if (!val) | |
9436 { | |
9437 sop = false; | |
9438 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
9439 (LT_EXPR, op0, op1, &sop)); | |
9440 } | |
9441 | |
9442 if (val) | |
9443 { | |
9444 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
9445 { | |
9446 location_t location; | |
9447 | |
9448 if (!gimple_has_location (stmt)) | |
9449 location = input_location; | |
9450 else | |
9451 location = gimple_location (stmt); | |
9452 warning_at (location, OPT_Wstrict_overflow, | |
9453 "assuming signed overflow does not occur when " | |
9454 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>"); | |
9455 } | |
9456 | |
9457 /* VAL == TRUE -> OP0 < or <= op1 | |
9458 VAL == FALSE -> OP0 > or >= op1. */ | |
9459 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR) | |
9460 == integer_zerop (val)) ? op0 : op1; | |
9461 gimple_assign_set_rhs_from_tree (gsi, res); | |
9462 return true; | |
9463 } | |
9464 | |
9465 return false; | |
9466 } | |
9467 | |
9468 /* If the operand to an ABS_EXPR is >= 0, then eliminate the | |
9469 ABS_EXPR. If the operand is <= 0, then simplify the | |
9470 ABS_EXPR into a NEGATE_EXPR. */ | |
9471 | |
9472 static bool | |
9473 simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
9474 { | |
9475 tree op = gimple_assign_rhs1 (stmt); | |
9476 value_range *vr = get_value_range (op); | |
9477 | |
9478 if (vr) | |
9479 { | |
9480 tree val = NULL; | |
9481 bool sop = false; | |
9482 | |
9483 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); | |
9484 if (!val) | |
9485 { | |
9486 /* The range is neither <= 0 nor > 0. Now see if it is | |
9487 either < 0 or >= 0. */ | |
9488 sop = false; | |
9489 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node, | |
9490 &sop); | |
9491 } | |
9492 | |
9493 if (val) | |
9494 { | |
9495 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
9496 { | |
9497 location_t location; | |
9498 | |
9499 if (!gimple_has_location (stmt)) | |
9500 location = input_location; | |
9501 else | |
9502 location = gimple_location (stmt); | |
9503 warning_at (location, OPT_Wstrict_overflow, | |
9504 "assuming signed overflow does not occur when " | |
9505 "simplifying %<abs (X)%> to %<X%> or %<-X%>"); | |
9506 } | |
9507 | |
9508 gimple_assign_set_rhs1 (stmt, op); | |
9509 if (integer_zerop (val)) | |
9510 gimple_assign_set_rhs_code (stmt, SSA_NAME); | |
9511 else | |
9512 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); | |
9513 update_stmt (stmt); | |
9514 fold_stmt (gsi, follow_single_use_edges); | |
9515 return true; | |
9516 } | |
9517 } | |
9518 | |
9519 return false; | |
9520 } | |
9521 | |
9522 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. | |
9523 If all the bits that are being cleared by & are already | |
9524 known to be zero from VR, or all the bits that are being | |
9525 set by | are already known to be one from VR, the bit | |
9526 operation is redundant. */ | |
9527 | |
9528 static bool | |
9529 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
9530 { | |
9531 tree op0 = gimple_assign_rhs1 (stmt); | |
9532 tree op1 = gimple_assign_rhs2 (stmt); | |
9533 tree op = NULL_TREE; | |
9534 value_range vr0 = VR_INITIALIZER; | |
9535 value_range vr1 = VR_INITIALIZER; | |
9536 wide_int may_be_nonzero0, may_be_nonzero1; | |
9537 wide_int must_be_nonzero0, must_be_nonzero1; | |
9538 wide_int mask; | |
9539 | |
9540 if (TREE_CODE (op0) == SSA_NAME) | |
9541 vr0 = *(get_value_range (op0)); | |
9542 else if (is_gimple_min_invariant (op0)) | |
9543 set_value_range_to_value (&vr0, op0, NULL); | |
9544 else | |
9545 return false; | |
9546 | |
9547 if (TREE_CODE (op1) == SSA_NAME) | |
9548 vr1 = *(get_value_range (op1)); | |
9549 else if (is_gimple_min_invariant (op1)) | |
9550 set_value_range_to_value (&vr1, op1, NULL); | |
9551 else | |
9552 return false; | |
9553 | |
9554 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, | |
9555 &must_be_nonzero0)) | |
9556 return false; | |
9557 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, | |
9558 &must_be_nonzero1)) | |
9559 return false; | |
9560 | |
9561 switch (gimple_assign_rhs_code (stmt)) | |
9562 { | |
9563 case BIT_AND_EXPR: | |
9564 mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1); | |
9565 if (mask == 0) | |
9566 { | |
9567 op = op0; | |
9568 break; | |
9569 } | |
9570 mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0); | |
9571 if (mask == 0) | |
9572 { | |
9573 op = op1; | |
9574 break; | |
9575 } | |
9576 break; | |
9577 case BIT_IOR_EXPR: | |
9578 mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1); | |
9579 if (mask == 0) | |
9580 { | |
9581 op = op1; | |
9582 break; | |
9583 } | |
9584 mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0); | |
9585 if (mask == 0) | |
9586 { | |
9587 op = op0; | |
9588 break; | |
9589 } | |
9590 break; | |
9591 default: | |
9592 gcc_unreachable (); | |
9593 } | |
9594 | |
9595 if (op == NULL_TREE) | |
9596 return false; | |
9597 | |
9598 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op); | |
9599 update_stmt (gsi_stmt (*gsi)); | |
9600 return true; | |
9601 } | |
9602 | |
9603 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has | |
9604 a known value range VR. | |
9605 | |
9606 If there is one and only one value which will satisfy the | |
9607 conditional, then return that value. Else return NULL. | |
9608 | |
9609 If signed overflow must be undefined for the value to satisfy | |
9610 the conditional, then set *STRICT_OVERFLOW_P to true. */ | |
9611 | |
9612 static tree | |
9613 test_for_singularity (enum tree_code cond_code, tree op0, | |
9614 tree op1, value_range *vr) | |
9615 { | |
9616 tree min = NULL; | |
9617 tree max = NULL; | |
9618 | |
9619 /* Extract minimum/maximum values which satisfy the conditional as it was | |
9620 written. */ | |
9621 if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
9622 { | |
9623 min = TYPE_MIN_VALUE (TREE_TYPE (op0)); | |
9624 | |
9625 max = op1; | |
9626 if (cond_code == LT_EXPR) | |
9627 { | |
9628 tree one = build_int_cst (TREE_TYPE (op0), 1); | |
9629 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); | |
9630 /* Signal to compare_values_warnv this expr doesn't overflow. */ | |
9631 if (EXPR_P (max)) | |
9632 TREE_NO_WARNING (max) = 1; | |
9633 } | |
9634 } | |
9635 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
9636 { | |
9637 max = TYPE_MAX_VALUE (TREE_TYPE (op0)); | |
9638 | |
9639 min = op1; | |
9640 if (cond_code == GT_EXPR) | |
9641 { | |
9642 tree one = build_int_cst (TREE_TYPE (op0), 1); | |
9643 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); | |
9644 /* Signal to compare_values_warnv this expr doesn't overflow. */ | |
9645 if (EXPR_P (min)) | |
9646 TREE_NO_WARNING (min) = 1; | |
9647 } | |
9648 } | |
9649 | |
9650 /* Now refine the minimum and maximum values using any | |
9651 value range information we have for op0. */ | |
9652 if (min && max) | |
9653 { | |
9654 if (compare_values (vr->min, min) == 1) | |
9655 min = vr->min; | |
9656 if (compare_values (vr->max, max) == -1) | |
9657 max = vr->max; | |
9658 | |
9659 /* If the new min/max values have converged to a single value, | |
9660 then there is only one value which can satisfy the condition, | |
9661 return that value. */ | |
9662 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) | |
9663 return min; | |
9664 } | |
9665 return NULL; | |
9666 } | |
9667 | |
9668 /* Return whether the value range *VR fits in an integer type specified | |
9669 by PRECISION and UNSIGNED_P. */ | |
9670 | |
9671 static bool | |
9672 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn) | |
9673 { | |
9674 tree src_type; | |
9675 unsigned src_precision; | |
9676 widest_int tem; | |
9677 signop src_sgn; | |
9678 | |
9679 /* We can only handle integral and pointer types. */ | |
9680 src_type = TREE_TYPE (vr->min); | |
9681 if (!INTEGRAL_TYPE_P (src_type) | |
9682 && !POINTER_TYPE_P (src_type)) | |
9683 return false; | |
9684 | |
9685 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, | |
9686 and so is an identity transform. */ | |
9687 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); | |
9688 src_sgn = TYPE_SIGN (src_type); | |
9689 if ((src_precision < dest_precision | |
9690 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) | |
9691 || (src_precision == dest_precision && src_sgn == dest_sgn)) | |
9692 return true; | |
9693 | |
9694 /* Now we can only handle ranges with constant bounds. */ | |
9695 if (vr->type != VR_RANGE | |
9696 || TREE_CODE (vr->min) != INTEGER_CST | |
9697 || TREE_CODE (vr->max) != INTEGER_CST) | |
9698 return false; | |
9699 | |
9700 /* For sign changes, the MSB of the wide_int has to be clear. | |
9701 An unsigned value with its MSB set cannot be represented by | |
9702 a signed wide_int, while a negative value cannot be represented | |
9703 by an unsigned wide_int. */ | |
9704 if (src_sgn != dest_sgn | |
9705 && (wi::lts_p (wi::to_wide (vr->min), 0) | |
9706 || wi::lts_p (wi::to_wide (vr->max), 0))) | |
9707 return false; | |
9708 | |
9709 /* Then we can perform the conversion on both ends and compare | |
9710 the result for equality. */ | |
9711 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); | |
9712 if (tem != wi::to_widest (vr->min)) | |
9713 return false; | |
9714 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); | |
9715 if (tem != wi::to_widest (vr->max)) | |
9716 return false; | |
9717 | |
9718 return true; | |
9719 } | |
9720 | |
9721 /* Simplify a conditional using a relational operator to an equality | |
9722 test if the range information indicates only one value can satisfy | |
9723 the original conditional. */ | |
9724 | |
9725 static bool | |
9726 simplify_cond_using_ranges_1 (gcond *stmt) | |
9727 { | |
9728 tree op0 = gimple_cond_lhs (stmt); | |
9729 tree op1 = gimple_cond_rhs (stmt); | |
9730 enum tree_code cond_code = gimple_cond_code (stmt); | |
9731 | |
9732 if (cond_code != NE_EXPR | |
9733 && cond_code != EQ_EXPR | |
9734 && TREE_CODE (op0) == SSA_NAME | |
9735 && INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
9736 && is_gimple_min_invariant (op1)) | |
9737 { | |
9738 value_range *vr = get_value_range (op0); | |
9739 | |
9740 /* If we have range information for OP0, then we might be | |
9741 able to simplify this conditional. */ | |
9742 if (vr->type == VR_RANGE) | |
9743 { | |
9744 tree new_tree = test_for_singularity (cond_code, op0, op1, vr); | |
9745 if (new_tree) | |
9746 { | |
9747 if (dump_file) | |
9748 { | |
9749 fprintf (dump_file, "Simplified relational "); | |
9750 print_gimple_stmt (dump_file, stmt, 0); | |
9751 fprintf (dump_file, " into "); | |
9752 } | |
9753 | |
9754 gimple_cond_set_code (stmt, EQ_EXPR); | |
9755 gimple_cond_set_lhs (stmt, op0); | |
9756 gimple_cond_set_rhs (stmt, new_tree); | |
9757 | |
9758 update_stmt (stmt); | |
9759 | |
9760 if (dump_file) | |
9761 { | |
9762 print_gimple_stmt (dump_file, stmt, 0); | |
9763 fprintf (dump_file, "\n"); | |
9764 } | |
9765 | |
9766 return true; | |
9767 } | |
9768 | |
9769 /* Try again after inverting the condition. We only deal | |
9770 with integral types here, so no need to worry about | |
9771 issues with inverting FP comparisons. */ | |
9772 new_tree = test_for_singularity | |
9773 (invert_tree_comparison (cond_code, false), | |
9774 op0, op1, vr); | |
9775 if (new_tree) | |
9776 { | |
9777 if (dump_file) | |
9778 { | |
9779 fprintf (dump_file, "Simplified relational "); | |
9780 print_gimple_stmt (dump_file, stmt, 0); | |
9781 fprintf (dump_file, " into "); | |
9782 } | |
9783 | |
9784 gimple_cond_set_code (stmt, NE_EXPR); | |
9785 gimple_cond_set_lhs (stmt, op0); | |
9786 gimple_cond_set_rhs (stmt, new_tree); | |
9787 | |
9788 update_stmt (stmt); | |
9789 | |
9790 if (dump_file) | |
9791 { | |
9792 print_gimple_stmt (dump_file, stmt, 0); | |
9793 fprintf (dump_file, "\n"); | |
9794 } | |
9795 | |
9796 return true; | |
9797 } | |
9798 } | |
9799 } | |
9800 return false; | |
9801 } | |
9802 | |
9803 /* STMT is a conditional at the end of a basic block. | |
9804 | |
9805 If the conditional is of the form SSA_NAME op constant and the SSA_NAME | |
9806 was set via a type conversion, try to replace the SSA_NAME with the RHS | |
9807 of the type conversion. Doing so makes the conversion dead which helps | |
9808 subsequent passes. */ | |
9809 | |
9810 static void | |
9811 simplify_cond_using_ranges_2 (gcond *stmt) | |
9812 { | |
9813 tree op0 = gimple_cond_lhs (stmt); | |
9814 tree op1 = gimple_cond_rhs (stmt); | |
9815 | |
9816 /* If we have a comparison of an SSA_NAME (OP0) against a constant, | |
9817 see if OP0 was set by a type conversion where the source of | |
9818 the conversion is another SSA_NAME with a range that fits | |
9819 into the range of OP0's type. | |
9820 | |
9821 If so, the conversion is redundant as the earlier SSA_NAME can be | |
9822 used for the comparison directly if we just massage the constant in the | |
9823 comparison. */ | |
9824 if (TREE_CODE (op0) == SSA_NAME | |
9825 && TREE_CODE (op1) == INTEGER_CST) | |
9826 { | |
9827 gimple *def_stmt = SSA_NAME_DEF_STMT (op0); | |
9828 tree innerop; | |
9829 | |
9830 if (!is_gimple_assign (def_stmt) | |
9831 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
9832 return; | |
9833 | |
9834 innerop = gimple_assign_rhs1 (def_stmt); | |
9835 | |
9836 if (TREE_CODE (innerop) == SSA_NAME | |
9837 && !POINTER_TYPE_P (TREE_TYPE (innerop)) | |
9838 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop) | |
9839 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0))) | |
9840 { | |
9841 value_range *vr = get_value_range (innerop); | |
9842 | |
9843 if (range_int_cst_p (vr) | |
9844 && range_fits_type_p (vr, | |
9845 TYPE_PRECISION (TREE_TYPE (op0)), | |
9846 TYPE_SIGN (TREE_TYPE (op0))) | |
9847 && int_fits_type_p (op1, TREE_TYPE (innerop))) | |
9848 { | |
9849 tree newconst = fold_convert (TREE_TYPE (innerop), op1); | |
9850 gimple_cond_set_lhs (stmt, innerop); | |
9851 gimple_cond_set_rhs (stmt, newconst); | |
9852 update_stmt (stmt); | |
9853 if (dump_file && (dump_flags & TDF_DETAILS)) | |
9854 { | |
9855 fprintf (dump_file, "Folded into: "); | |
9856 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
9857 fprintf (dump_file, "\n"); | |
9858 } | |
9859 } | |
9860 } | |
9861 } | |
9862 } | |
9863 | |
9864 /* Simplify a switch statement using the value range of the switch | |
9865 argument. */ | |
9866 | |
9867 static bool | |
9868 simplify_switch_using_ranges (gswitch *stmt) | |
9869 { | |
9870 tree op = gimple_switch_index (stmt); | |
9871 value_range *vr = NULL; | |
9872 bool take_default; | |
9873 edge e; | |
9874 edge_iterator ei; | |
9875 size_t i = 0, j = 0, n, n2; | |
9876 tree vec2; | |
9877 switch_update su; | |
9878 size_t k = 1, l = 0; | |
9879 | |
9880 if (TREE_CODE (op) == SSA_NAME) | |
9881 { | |
9882 vr = get_value_range (op); | |
9883 | |
9884 /* We can only handle integer ranges. */ | |
9885 if ((vr->type != VR_RANGE | |
9886 && vr->type != VR_ANTI_RANGE) | |
9887 || symbolic_range_p (vr)) | |
9888 return false; | |
9889 | |
9890 /* Find case label for min/max of the value range. */ | |
9891 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); | |
9892 } | |
9893 else if (TREE_CODE (op) == INTEGER_CST) | |
9894 { | |
9895 take_default = !find_case_label_index (stmt, 1, op, &i); | |
9896 if (take_default) | |
9897 { | |
9898 i = 1; | |
9899 j = 0; | |
9900 } | |
9901 else | |
9902 { | |
9903 j = i; | |
9904 } | |
9905 } | |
9906 else | |
9907 return false; | |
9908 | |
9909 n = gimple_switch_num_labels (stmt); | |
9910 | |
9911 /* We can truncate the case label ranges that partially overlap with OP's | |
9912 value range. */ | |
9913 size_t min_idx = 1, max_idx = 0; | |
9914 if (vr != NULL) | |
9915 find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx); | |
9916 if (min_idx <= max_idx) | |
9917 { | |
9918 tree min_label = gimple_switch_label (stmt, min_idx); | |
9919 tree max_label = gimple_switch_label (stmt, max_idx); | |
9920 | |
9921 /* Avoid changing the type of the case labels when truncating. */ | |
9922 tree case_label_type = TREE_TYPE (CASE_LOW (min_label)); | |
9923 tree vr_min = fold_convert (case_label_type, vr->min); | |
9924 tree vr_max = fold_convert (case_label_type, vr->max); | |
9925 | |
9926 if (vr->type == VR_RANGE) | |
9927 { | |
9928 /* If OP's value range is [2,8] and the low label range is | |
9929 0 ... 3, truncate the label's range to 2 .. 3. */ | |
9930 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0 | |
9931 && CASE_HIGH (min_label) != NULL_TREE | |
9932 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0) | |
9933 CASE_LOW (min_label) = vr_min; | |
9934 | |
9935 /* If OP's value range is [2,8] and the high label range is | |
9936 7 ... 10, truncate the label's range to 7 .. 8. */ | |
9937 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0 | |
9938 && CASE_HIGH (max_label) != NULL_TREE | |
9939 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0) | |
9940 CASE_HIGH (max_label) = vr_max; | |
9941 } | |
9942 else if (vr->type == VR_ANTI_RANGE) | |
9943 { | |
9944 tree one_cst = build_one_cst (case_label_type); | |
9945 | |
9946 if (min_label == max_label) | |
9947 { | |
9948 /* If OP's value range is ~[7,8] and the label's range is | |
9949 7 ... 10, truncate the label's range to 9 ... 10. */ | |
9950 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0 | |
9951 && CASE_HIGH (min_label) != NULL_TREE | |
9952 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0) | |
9953 CASE_LOW (min_label) | |
9954 = int_const_binop (PLUS_EXPR, vr_max, one_cst); | |
9955 | |
9956 /* If OP's value range is ~[7,8] and the label's range is | |
9957 5 ... 8, truncate the label's range to 5 ... 6. */ | |
9958 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0 | |
9959 && CASE_HIGH (min_label) != NULL_TREE | |
9960 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0) | |
9961 CASE_HIGH (min_label) | |
9962 = int_const_binop (MINUS_EXPR, vr_min, one_cst); | |
9963 } | |
9964 else | |
9965 { | |
9966 /* If OP's value range is ~[2,8] and the low label range is | |
9967 0 ... 3, truncate the label's range to 0 ... 1. */ | |
9968 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0 | |
9969 && CASE_HIGH (min_label) != NULL_TREE | |
9970 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0) | |
9971 CASE_HIGH (min_label) | |
9972 = int_const_binop (MINUS_EXPR, vr_min, one_cst); | |
9973 | |
9974 /* If OP's value range is ~[2,8] and the high label range is | |
9975 7 ... 10, truncate the label's range to 9 ... 10. */ | |
9976 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0 | |
9977 && CASE_HIGH (max_label) != NULL_TREE | |
9978 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0) | |
9979 CASE_LOW (max_label) | |
9980 = int_const_binop (PLUS_EXPR, vr_max, one_cst); | |
9981 } | |
9982 } | |
9983 | |
9984 /* Canonicalize singleton case ranges. */ | |
9985 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label))) | |
9986 CASE_HIGH (min_label) = NULL_TREE; | |
9987 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label))) | |
9988 CASE_HIGH (max_label) = NULL_TREE; | |
9989 } | |
9990 | |
9991 /* We can also eliminate case labels that lie completely outside OP's value | |
9992 range. */ | |
9993 | |
9994 /* Bail out if this is just all edges taken. */ | |
9995 if (i == 1 | |
9996 && j == n - 1 | |
9997 && take_default) | |
9998 return false; | |
9999 | |
10000 /* Build a new vector of taken case labels. */ | |
10001 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); | |
10002 n2 = 0; | |
10003 | |
10004 /* Add the default edge, if necessary. */ | |
10005 if (take_default) | |
10006 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); | |
10007 | |
10008 for (; i <= j; ++i, ++n2) | |
10009 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); | |
10010 | |
10011 for (; k <= l; ++k, ++n2) | |
10012 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); | |
10013 | |
10014 /* Mark needed edges. */ | |
10015 for (i = 0; i < n2; ++i) | |
10016 { | |
10017 e = find_edge (gimple_bb (stmt), | |
10018 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); | |
10019 e->aux = (void *)-1; | |
10020 } | |
10021 | |
10022 /* Queue not needed edges for later removal. */ | |
10023 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) | |
10024 { | |
10025 if (e->aux == (void *)-1) | |
10026 { | |
10027 e->aux = NULL; | |
10028 continue; | |
10029 } | |
10030 | |
10031 if (dump_file && (dump_flags & TDF_DETAILS)) | |
10032 { | |
10033 fprintf (dump_file, "removing unreachable case label\n"); | |
10034 } | |
10035 to_remove_edges.safe_push (e); | |
10036 e->flags &= ~EDGE_EXECUTABLE; | |
10037 } | |
10038 | |
10039 /* And queue an update for the stmt. */ | |
10040 su.stmt = stmt; | |
10041 su.vec = vec2; | |
10042 to_update_switch_stmts.safe_push (su); | |
10043 return false; | |
10044 } | |
10045 | |
10046 /* Simplify an integral conversion from an SSA name in STMT. */ | |
10047 | |
10048 static bool | |
10049 simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
10050 { | |
10051 tree innerop, middleop, finaltype; | |
10052 gimple *def_stmt; | |
10053 signop inner_sgn, middle_sgn, final_sgn; | |
10054 unsigned inner_prec, middle_prec, final_prec; | |
10055 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; | |
10056 | |
10057 finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); | |
10058 if (!INTEGRAL_TYPE_P (finaltype)) | |
10059 return false; | |
10060 middleop = gimple_assign_rhs1 (stmt); | |
10061 def_stmt = SSA_NAME_DEF_STMT (middleop); | |
10062 if (!is_gimple_assign (def_stmt) | |
10063 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
10064 return false; | |
10065 innerop = gimple_assign_rhs1 (def_stmt); | |
10066 if (TREE_CODE (innerop) != SSA_NAME | |
10067 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) | |
10068 return false; | |
10069 | |
10070 /* Get the value-range of the inner operand. Use get_range_info in | |
10071 case innerop was created during substitute-and-fold. */ | |
10072 wide_int imin, imax; | |
10073 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop)) | |
10074 || get_range_info (innerop, &imin, &imax) != VR_RANGE) | |
10075 return false; | |
10076 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop))); | |
10077 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop))); | |
10078 | |
10079 /* Simulate the conversion chain to check if the result is equal if | |
10080 the middle conversion is removed. */ | |
10081 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); | |
10082 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); | |
10083 final_prec = TYPE_PRECISION (finaltype); | |
10084 | |
10085 /* If the first conversion is not injective, the second must not | |
10086 be widening. */ | |
10087 if (wi::gtu_p (innermax - innermin, | |
10088 wi::mask <widest_int> (middle_prec, false)) | |
10089 && middle_prec < final_prec) | |
10090 return false; | |
10091 /* We also want a medium value so that we can track the effect that | |
10092 narrowing conversions with sign change have. */ | |
10093 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); | |
10094 if (inner_sgn == UNSIGNED) | |
10095 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); | |
10096 else | |
10097 innermed = 0; | |
10098 if (wi::cmp (innermin, innermed, inner_sgn) >= 0 | |
10099 || wi::cmp (innermed, innermax, inner_sgn) >= 0) | |
10100 innermed = innermin; | |
10101 | |
10102 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); | |
10103 middlemin = wi::ext (innermin, middle_prec, middle_sgn); | |
10104 middlemed = wi::ext (innermed, middle_prec, middle_sgn); | |
10105 middlemax = wi::ext (innermax, middle_prec, middle_sgn); | |
10106 | |
10107 /* Require that the final conversion applied to both the original | |
10108 and the intermediate range produces the same result. */ | |
10109 final_sgn = TYPE_SIGN (finaltype); | |
10110 if (wi::ext (middlemin, final_prec, final_sgn) | |
10111 != wi::ext (innermin, final_prec, final_sgn) | |
10112 || wi::ext (middlemed, final_prec, final_sgn) | |
10113 != wi::ext (innermed, final_prec, final_sgn) | |
10114 || wi::ext (middlemax, final_prec, final_sgn) | |
10115 != wi::ext (innermax, final_prec, final_sgn)) | |
10116 return false; | |
10117 | |
10118 gimple_assign_set_rhs1 (stmt, innerop); | |
10119 fold_stmt (gsi, follow_single_use_edges); | |
10120 return true; | |
10121 } | |
10122 | |
10123 /* Simplify a conversion from integral SSA name to float in STMT. */ | |
10124 | |
10125 static bool | |
10126 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, | |
10127 gimple *stmt) | |
10128 { | |
10129 tree rhs1 = gimple_assign_rhs1 (stmt); | |
10130 value_range *vr = get_value_range (rhs1); | |
10131 scalar_float_mode fltmode | |
10132 = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); | |
10133 scalar_int_mode mode; | |
10134 tree tem; | |
10135 gassign *conv; | |
10136 | |
10137 /* We can only handle constant ranges. */ | |
10138 if (vr->type != VR_RANGE | |
10139 || TREE_CODE (vr->min) != INTEGER_CST | |
10140 || TREE_CODE (vr->max) != INTEGER_CST) | |
10141 return false; | |
10142 | |
10143 /* First check if we can use a signed type in place of an unsigned. */ | |
10144 scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1)); | |
10145 if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) | |
10146 && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing | |
10147 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) | |
10148 mode = rhs_mode; | |
10149 /* If we can do the conversion in the current input mode do nothing. */ | |
10150 else if (can_float_p (fltmode, rhs_mode, | |
10151 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) | |
10152 return false; | |
10153 /* Otherwise search for a mode we can use, starting from the narrowest | |
10154 integer mode available. */ | |
10155 else | |
10156 { | |
10157 mode = NARROWEST_INT_MODE; | |
10158 for (;;) | |
10159 { | |
10160 /* If we cannot do a signed conversion to float from mode | |
10161 or if the value-range does not fit in the signed type | |
10162 try with a wider mode. */ | |
10163 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing | |
10164 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) | |
10165 break; | |
10166 | |
10167 /* But do not widen the input. Instead leave that to the | |
10168 optabs expansion code. */ | |
10169 if (!GET_MODE_WIDER_MODE (mode).exists (&mode) | |
10170 || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) | |
10171 return false; | |
10172 } | |
10173 } | |
10174 | |
10175 /* It works, insert a truncation or sign-change before the | |
10176 float conversion. */ | |
10177 tem = make_ssa_name (build_nonstandard_integer_type | |
10178 (GET_MODE_PRECISION (mode), 0)); | |
10179 conv = gimple_build_assign (tem, NOP_EXPR, rhs1); | |
10180 gsi_insert_before (gsi, conv, GSI_SAME_STMT); | |
10181 gimple_assign_set_rhs1 (stmt, tem); | |
10182 fold_stmt (gsi, follow_single_use_edges); | |
10183 | |
10184 return true; | |
10185 } | |
10186 | |
10187 /* Simplify an internal fn call using ranges if possible. */ | |
10188 | |
10189 static bool | |
10190 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) | |
10191 { | |
10192 enum tree_code subcode; | |
10193 bool is_ubsan = false; | |
10194 bool ovf = false; | |
10195 switch (gimple_call_internal_fn (stmt)) | |
10196 { | |
10197 case IFN_UBSAN_CHECK_ADD: | |
10198 subcode = PLUS_EXPR; | |
10199 is_ubsan = true; | |
10200 break; | |
10201 case IFN_UBSAN_CHECK_SUB: | |
10202 subcode = MINUS_EXPR; | |
10203 is_ubsan = true; | |
10204 break; | |
10205 case IFN_UBSAN_CHECK_MUL: | |
10206 subcode = MULT_EXPR; | |
10207 is_ubsan = true; | |
10208 break; | |
10209 case IFN_ADD_OVERFLOW: | |
10210 subcode = PLUS_EXPR; | |
10211 break; | |
10212 case IFN_SUB_OVERFLOW: | |
10213 subcode = MINUS_EXPR; | |
10214 break; | |
10215 case IFN_MUL_OVERFLOW: | |
10216 subcode = MULT_EXPR; | |
10217 break; | |
10218 default: | |
10219 return false; | |
10220 } | |
10221 | |
10222 tree op0 = gimple_call_arg (stmt, 0); | |
10223 tree op1 = gimple_call_arg (stmt, 1); | |
10224 tree type; | |
10225 if (is_ubsan) | |
10226 { | |
10227 type = TREE_TYPE (op0); | |
10228 if (VECTOR_TYPE_P (type)) | |
10229 return false; | |
10230 } | |
10231 else if (gimple_call_lhs (stmt) == NULL_TREE) | |
10232 return false; | |
10233 else | |
10234 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt))); | |
10235 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf) | |
10236 || (is_ubsan && ovf)) | |
10237 return false; | |
10238 | |
10239 gimple *g; | |
10240 location_t loc = gimple_location (stmt); | |
10241 if (is_ubsan) | |
10242 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1); | |
10243 else | |
10244 { | |
10245 int prec = TYPE_PRECISION (type); | |
10246 tree utype = type; | |
10247 if (ovf | |
10248 || !useless_type_conversion_p (type, TREE_TYPE (op0)) | |
10249 || !useless_type_conversion_p (type, TREE_TYPE (op1))) | |
10250 utype = build_nonstandard_integer_type (prec, 1); | |
10251 if (TREE_CODE (op0) == INTEGER_CST) | |
10252 op0 = fold_convert (utype, op0); | |
10253 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0))) | |
10254 { | |
10255 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0); | |
10256 gimple_set_location (g, loc); | |
10257 gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
10258 op0 = gimple_assign_lhs (g); | |
10259 } | |
10260 if (TREE_CODE (op1) == INTEGER_CST) | |
10261 op1 = fold_convert (utype, op1); | |
10262 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1))) | |
10263 { | |
10264 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1); | |
10265 gimple_set_location (g, loc); | |
10266 gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
10267 op1 = gimple_assign_lhs (g); | |
10268 } | |
10269 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1); | |
10270 gimple_set_location (g, loc); | |
10271 gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
10272 if (utype != type) | |
10273 { | |
10274 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR, | |
10275 gimple_assign_lhs (g)); | |
10276 gimple_set_location (g, loc); | |
10277 gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
10278 } | |
10279 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR, | |
10280 gimple_assign_lhs (g), | |
10281 build_int_cst (type, ovf)); | |
10282 } | |
10283 gimple_set_location (g, loc); | |
10284 gsi_replace (gsi, g, false); | |
10285 return true; | |
10286 } | |
10287 | |
10288 /* Return true if VAR is a two-valued variable. Set a and b with the | |
10289 two-values when it is true. Return false otherwise. */ | |
10290 | |
10291 static bool | |
10292 two_valued_val_range_p (tree var, tree *a, tree *b) | |
10293 { | |
10294 value_range *vr = get_value_range (var); | |
10295 if ((vr->type != VR_RANGE | |
10296 && vr->type != VR_ANTI_RANGE) | |
10297 || TREE_CODE (vr->min) != INTEGER_CST | |
10298 || TREE_CODE (vr->max) != INTEGER_CST) | |
10299 return false; | |
10300 | |
10301 if (vr->type == VR_RANGE | |
10302 && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1) | |
10303 { | |
10304 *a = vr->min; | |
10305 *b = vr->max; | |
10306 return true; | |
10307 } | |
10308 | |
10309 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */ | |
10310 if (vr->type == VR_ANTI_RANGE | |
10311 && (wi::to_wide (vr->min) | |
10312 - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1 | |
10313 && (wi::to_wide (vrp_val_max (TREE_TYPE (var))) | |
10314 - wi::to_wide (vr->max)) == 1) | |
10315 { | |
10316 *a = vrp_val_min (TREE_TYPE (var)); | |
10317 *b = vrp_val_max (TREE_TYPE (var)); | |
10318 return true; | |
10319 } | |
10320 | |
10321 return false; | |
10322 } | |
10323 | |
10324 /* Simplify STMT using ranges if possible. */ | |
10325 | |
10326 static bool | |
10327 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) | |
10328 { | |
10329 gimple *stmt = gsi_stmt (*gsi); | |
10330 if (is_gimple_assign (stmt)) | |
10331 { | |
10332 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
10333 tree rhs1 = gimple_assign_rhs1 (stmt); | |
10334 tree rhs2 = gimple_assign_rhs2 (stmt); | |
10335 tree lhs = gimple_assign_lhs (stmt); | |
10336 tree val1 = NULL_TREE, val2 = NULL_TREE; | |
10337 use_operand_p use_p; | |
10338 gimple *use_stmt; | |
10339 | |
10340 /* Convert: | |
10341 LHS = CST BINOP VAR | |
10342 Where VAR is two-valued and LHS is used in GIMPLE_COND only | |
10343 To: | |
10344 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2) | |
10345 | |
10346 Also handles: | |
10347 LHS = VAR BINOP CST | |
10348 Where VAR is two-valued and LHS is used in GIMPLE_COND only | |
10349 To: | |
10350 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */ | |
10351 | |
10352 if (TREE_CODE_CLASS (rhs_code) == tcc_binary | |
10353 && INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
10354 && ((TREE_CODE (rhs1) == INTEGER_CST | |
10355 && TREE_CODE (rhs2) == SSA_NAME) | |
10356 || (TREE_CODE (rhs2) == INTEGER_CST | |
10357 && TREE_CODE (rhs1) == SSA_NAME)) | |
10358 && single_imm_use (lhs, &use_p, &use_stmt) | |
10359 && gimple_code (use_stmt) == GIMPLE_COND) | |
10360 | |
10361 { | |
10362 tree new_rhs1 = NULL_TREE; | |
10363 tree new_rhs2 = NULL_TREE; | |
10364 tree cmp_var = NULL_TREE; | |
10365 | |
10366 if (TREE_CODE (rhs2) == SSA_NAME | |
10367 && two_valued_val_range_p (rhs2, &val1, &val2)) | |
10368 { | |
10369 /* Optimize RHS1 OP [VAL1, VAL2]. */ | |
10370 new_rhs1 = int_const_binop (rhs_code, rhs1, val1); | |
10371 new_rhs2 = int_const_binop (rhs_code, rhs1, val2); | |
10372 cmp_var = rhs2; | |
10373 } | |
10374 else if (TREE_CODE (rhs1) == SSA_NAME | |
10375 && two_valued_val_range_p (rhs1, &val1, &val2)) | |
10376 { | |
10377 /* Optimize [VAL1, VAL2] OP RHS2. */ | |
10378 new_rhs1 = int_const_binop (rhs_code, val1, rhs2); | |
10379 new_rhs2 = int_const_binop (rhs_code, val2, rhs2); | |
10380 cmp_var = rhs1; | |
10381 } | |
10382 | |
10383 /* If we could not find two-vals or the optimzation is invalid as | |
10384 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */ | |
10385 if (new_rhs1 && new_rhs2) | |
10386 { | |
10387 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1); | |
10388 gimple_assign_set_rhs_with_ops (gsi, | |
10389 COND_EXPR, cond, | |
10390 new_rhs1, | |
10391 new_rhs2); | |
10392 update_stmt (gsi_stmt (*gsi)); | |
10393 fold_stmt (gsi, follow_single_use_edges); | |
10394 return true; | |
10395 } | |
10396 } | |
10397 | |
10398 switch (rhs_code) | |
10399 { | |
10400 case EQ_EXPR: | |
10401 case NE_EXPR: | |
10402 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity | |
10403 if the RHS is zero or one, and the LHS are known to be boolean | |
10404 values. */ | |
10405 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10406 return simplify_truth_ops_using_ranges (gsi, stmt); | |
10407 break; | |
10408 | |
10409 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR | |
10410 and BIT_AND_EXPR respectively if the first operand is greater | |
10411 than zero and the second operand is an exact power of two. | |
10412 Also optimize TRUNC_MOD_EXPR away if the second operand is | |
10413 constant and the first operand already has the right value | |
10414 range. */ | |
10415 case TRUNC_DIV_EXPR: | |
10416 case TRUNC_MOD_EXPR: | |
10417 if ((TREE_CODE (rhs1) == SSA_NAME | |
10418 || TREE_CODE (rhs1) == INTEGER_CST) | |
10419 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10420 return simplify_div_or_mod_using_ranges (gsi, stmt); | |
10421 break; | |
10422 | |
10423 /* Transform ABS (X) into X or -X as appropriate. */ | |
10424 case ABS_EXPR: | |
10425 if (TREE_CODE (rhs1) == SSA_NAME | |
10426 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10427 return simplify_abs_using_ranges (gsi, stmt); | |
10428 break; | |
10429 | |
10430 case BIT_AND_EXPR: | |
10431 case BIT_IOR_EXPR: | |
10432 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR | |
10433 if all the bits being cleared are already cleared or | |
10434 all the bits being set are already set. */ | |
10435 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10436 return simplify_bit_ops_using_ranges (gsi, stmt); | |
10437 break; | |
10438 | |
10439 CASE_CONVERT: | |
10440 if (TREE_CODE (rhs1) == SSA_NAME | |
10441 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10442 return simplify_conversion_using_ranges (gsi, stmt); | |
10443 break; | |
10444 | |
10445 case FLOAT_EXPR: | |
10446 if (TREE_CODE (rhs1) == SSA_NAME | |
10447 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
10448 return simplify_float_conversion_using_ranges (gsi, stmt); | |
10449 break; | |
10450 | |
10451 case MIN_EXPR: | |
10452 case MAX_EXPR: | |
10453 return simplify_min_or_max_using_ranges (gsi, stmt); | |
10454 | |
10455 default: | |
10456 break; | |
10457 } | |
10458 } | |
10459 else if (gimple_code (stmt) == GIMPLE_COND) | |
10460 return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt)); | |
10461 else if (gimple_code (stmt) == GIMPLE_SWITCH) | |
10462 return simplify_switch_using_ranges (as_a <gswitch *> (stmt)); | |
10463 else if (is_gimple_call (stmt) | |
10464 && gimple_call_internal_p (stmt)) | |
10465 return simplify_internal_call_using_ranges (gsi, stmt); | |
10466 | |
10467 return false; | |
10468 } | |
10469 | 6156 |
10470 /* If the statement pointed by SI has a predicate whose value can be | 6157 /* If the statement pointed by SI has a predicate whose value can be |
10471 computed using the value range information computed by VRP, compute | 6158 computed using the value range information computed by VRP, compute |
10472 its value and return true. Otherwise, return false. */ | 6159 its value and return true. Otherwise, return false. */ |
10473 | 6160 |
10474 static bool | 6161 bool |
10475 fold_predicate_in (gimple_stmt_iterator *si) | 6162 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si) |
10476 { | 6163 { |
10477 bool assignment_p = false; | 6164 bool assignment_p = false; |
10478 tree val; | 6165 tree val; |
10479 gimple *stmt = gsi_stmt (*si); | 6166 gimple *stmt = gsi_stmt (*si); |
10480 | 6167 |
10529 return false; | 6216 return false; |
10530 } | 6217 } |
10531 | 6218 |
10532 /* Callback for substitute_and_fold folding the stmt at *SI. */ | 6219 /* Callback for substitute_and_fold folding the stmt at *SI. */ |
10533 | 6220 |
10534 static bool | 6221 bool |
10535 vrp_fold_stmt (gimple_stmt_iterator *si) | 6222 vrp_folder::fold_stmt (gimple_stmt_iterator *si) |
10536 { | 6223 { |
10537 if (fold_predicate_in (si)) | 6224 if (fold_predicate_in (si)) |
10538 return true; | 6225 return true; |
10539 | 6226 |
10540 return simplify_stmt_using_ranges (si); | 6227 return simplify_stmt_using_ranges (si); |
6228 } | |
6229 | |
6230 /* If OP has a value range with a single constant value return that, | |
6231 otherwise return NULL_TREE. This returns OP itself if OP is a | |
6232 constant. | |
6233 | |
6234 Implemented as a pure wrapper right now, but this will change. */ | |
6235 | |
6236 tree | |
6237 vrp_folder::get_value (tree op) | |
6238 { | |
6239 return op_with_constant_singleton_value_range (op); | |
10541 } | 6240 } |
10542 | 6241 |
10543 /* Return the LHS of any ASSERT_EXPR where OP appears as the first | 6242 /* Return the LHS of any ASSERT_EXPR where OP appears as the first |
10544 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates | 6243 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates |
10545 BB. If no such ASSERT_EXPR is found, return OP. */ | 6244 BB. If no such ASSERT_EXPR is found, return OP. */ |
10565 } | 6264 } |
10566 } | 6265 } |
10567 return op; | 6266 return op; |
10568 } | 6267 } |
10569 | 6268 |
6269 /* A hack. */ | |
6270 static class vr_values *x_vr_values; | |
6271 | |
10570 /* A trivial wrapper so that we can present the generic jump threading | 6272 /* A trivial wrapper so that we can present the generic jump threading |
10571 code with a simple API for simplifying statements. STMT is the | 6273 code with a simple API for simplifying statements. STMT is the |
10572 statement we want to simplify, WITHIN_STMT provides the location | 6274 statement we want to simplify, WITHIN_STMT provides the location |
10573 for any overflow warnings. */ | 6275 for any overflow warnings. */ |
10574 | 6276 |
10580 /* First see if the conditional is in the hash table. */ | 6282 /* First see if the conditional is in the hash table. */ |
10581 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true); | 6283 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true); |
10582 if (cached_lhs && is_gimple_min_invariant (cached_lhs)) | 6284 if (cached_lhs && is_gimple_min_invariant (cached_lhs)) |
10583 return cached_lhs; | 6285 return cached_lhs; |
10584 | 6286 |
6287 vr_values *vr_values = x_vr_values; | |
10585 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) | 6288 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) |
10586 { | 6289 { |
10587 tree op0 = gimple_cond_lhs (cond_stmt); | 6290 tree op0 = gimple_cond_lhs (cond_stmt); |
10588 op0 = lhs_of_dominating_assert (op0, bb, stmt); | 6291 op0 = lhs_of_dominating_assert (op0, bb, stmt); |
10589 | 6292 |
10590 tree op1 = gimple_cond_rhs (cond_stmt); | 6293 tree op1 = gimple_cond_rhs (cond_stmt); |
10591 op1 = lhs_of_dominating_assert (op1, bb, stmt); | 6294 op1 = lhs_of_dominating_assert (op1, bb, stmt); |
10592 | 6295 |
10593 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt), | 6296 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt), |
10594 op0, op1, within_stmt); | 6297 op0, op1, within_stmt); |
10595 } | 6298 } |
10596 | 6299 |
10597 /* We simplify a switch statement by trying to determine which case label | 6300 /* We simplify a switch statement by trying to determine which case label |
10598 will be taken. If we are successful then we return the corresponding | 6301 will be taken. If we are successful then we return the corresponding |
10599 CASE_LABEL_EXPR. */ | 6302 CASE_LABEL_EXPR. */ |
10603 if (TREE_CODE (op) != SSA_NAME) | 6306 if (TREE_CODE (op) != SSA_NAME) |
10604 return NULL_TREE; | 6307 return NULL_TREE; |
10605 | 6308 |
10606 op = lhs_of_dominating_assert (op, bb, stmt); | 6309 op = lhs_of_dominating_assert (op, bb, stmt); |
10607 | 6310 |
10608 value_range *vr = get_value_range (op); | 6311 const value_range *vr = vr_values->get_value_range (op); |
10609 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE) | 6312 if (vr->undefined_p () |
10610 || symbolic_range_p (vr)) | 6313 || vr->varying_p () |
6314 || vr->symbolic_p ()) | |
10611 return NULL_TREE; | 6315 return NULL_TREE; |
10612 | 6316 |
10613 if (vr->type == VR_RANGE) | 6317 if (vr->kind () == VR_RANGE) |
10614 { | 6318 { |
10615 size_t i, j; | 6319 size_t i, j; |
10616 /* Get the range of labels that contain a part of the operand's | 6320 /* Get the range of labels that contain a part of the operand's |
10617 value range. */ | 6321 value range. */ |
10618 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j); | 6322 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j); |
10619 | 6323 |
10620 /* Is there only one such label? */ | 6324 /* Is there only one such label? */ |
10621 if (i == j) | 6325 if (i == j) |
10622 { | 6326 { |
10623 tree label = gimple_switch_label (switch_stmt, i); | 6327 tree label = gimple_switch_label (switch_stmt, i); |
10624 | 6328 |
10625 /* The i'th label will be taken only if the value range of the | 6329 /* The i'th label will be taken only if the value range of the |
10626 operand is entirely within the bounds of this label. */ | 6330 operand is entirely within the bounds of this label. */ |
10627 if (CASE_HIGH (label) != NULL_TREE | 6331 if (CASE_HIGH (label) != NULL_TREE |
10628 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0 | 6332 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0 |
10629 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0) | 6333 && tree_int_cst_compare (CASE_HIGH (label), |
10630 : (tree_int_cst_equal (CASE_LOW (label), vr->min) | 6334 vr->max ()) >= 0) |
10631 && tree_int_cst_equal (vr->min, vr->max))) | 6335 : (tree_int_cst_equal (CASE_LOW (label), vr->min ()) |
6336 && tree_int_cst_equal (vr->min (), vr->max ()))) | |
10632 return label; | 6337 return label; |
10633 } | 6338 } |
10634 | 6339 |
10635 /* If there are no such labels then the default label will be | 6340 /* If there are no such labels then the default label will be |
10636 taken. */ | 6341 taken. */ |
10637 if (i > j) | 6342 if (i > j) |
10638 return gimple_switch_label (switch_stmt, 0); | 6343 return gimple_switch_label (switch_stmt, 0); |
10639 } | 6344 } |
10640 | 6345 |
10641 if (vr->type == VR_ANTI_RANGE) | 6346 if (vr->kind () == VR_ANTI_RANGE) |
10642 { | 6347 { |
10643 unsigned n = gimple_switch_num_labels (switch_stmt); | 6348 unsigned n = gimple_switch_num_labels (switch_stmt); |
10644 tree min_label = gimple_switch_label (switch_stmt, 1); | 6349 tree min_label = gimple_switch_label (switch_stmt, 1); |
10645 tree max_label = gimple_switch_label (switch_stmt, n - 1); | 6350 tree max_label = gimple_switch_label (switch_stmt, n - 1); |
10646 | 6351 |
10647 /* The default label will be taken only if the anti-range of the | 6352 /* The default label will be taken only if the anti-range of the |
10648 operand is entirely outside the bounds of all the (non-default) | 6353 operand is entirely outside the bounds of all the (non-default) |
10649 case labels. */ | 6354 case labels. */ |
10650 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0 | 6355 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0 |
10651 && (CASE_HIGH (max_label) != NULL_TREE | 6356 && (CASE_HIGH (max_label) != NULL_TREE |
10652 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0 | 6357 ? tree_int_cst_compare (vr->max (), |
10653 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0)) | 6358 CASE_HIGH (max_label)) >= 0 |
6359 : tree_int_cst_compare (vr->max (), | |
6360 CASE_LOW (max_label)) >= 0)) | |
10654 return gimple_switch_label (switch_stmt, 0); | 6361 return gimple_switch_label (switch_stmt, 0); |
10655 } | 6362 } |
10656 | 6363 |
10657 return NULL_TREE; | 6364 return NULL_TREE; |
10658 } | 6365 } |
10659 | 6366 |
10660 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt)) | 6367 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt)) |
10661 { | 6368 { |
10662 value_range new_vr = VR_INITIALIZER; | |
10663 tree lhs = gimple_assign_lhs (assign_stmt); | 6369 tree lhs = gimple_assign_lhs (assign_stmt); |
10664 | |
10665 if (TREE_CODE (lhs) == SSA_NAME | 6370 if (TREE_CODE (lhs) == SSA_NAME |
10666 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | 6371 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
10667 || POINTER_TYPE_P (TREE_TYPE (lhs)))) | 6372 || POINTER_TYPE_P (TREE_TYPE (lhs))) |
10668 { | 6373 && stmt_interesting_for_vrp (stmt)) |
10669 extract_range_from_assignment (&new_vr, assign_stmt); | 6374 { |
10670 if (range_int_cst_singleton_p (&new_vr)) | 6375 edge dummy_e; |
10671 return new_vr.min; | 6376 tree dummy_tree; |
6377 value_range new_vr; | |
6378 vr_values->extract_range_from_stmt (stmt, &dummy_e, | |
6379 &dummy_tree, &new_vr); | |
6380 tree singleton; | |
6381 if (new_vr.singleton_p (&singleton)) | |
6382 return singleton; | |
10672 } | 6383 } |
10673 } | 6384 } |
10674 | 6385 |
10675 return NULL_TREE; | 6386 return NULL_TREE; |
10676 } | 6387 } |
10679 { | 6390 { |
10680 public: | 6391 public: |
10681 vrp_dom_walker (cdi_direction direction, | 6392 vrp_dom_walker (cdi_direction direction, |
10682 class const_and_copies *const_and_copies, | 6393 class const_and_copies *const_and_copies, |
10683 class avail_exprs_stack *avail_exprs_stack) | 6394 class avail_exprs_stack *avail_exprs_stack) |
10684 : dom_walker (direction, true), | 6395 : dom_walker (direction, REACHABLE_BLOCKS), |
10685 m_const_and_copies (const_and_copies), | 6396 m_const_and_copies (const_and_copies), |
10686 m_avail_exprs_stack (avail_exprs_stack), | 6397 m_avail_exprs_stack (avail_exprs_stack), |
10687 m_dummy_cond (NULL) {} | 6398 m_dummy_cond (NULL) {} |
10688 | 6399 |
10689 virtual edge before_dom_children (basic_block); | 6400 virtual edge before_dom_children (basic_block); |
10690 virtual void after_dom_children (basic_block); | 6401 virtual void after_dom_children (basic_block); |
10691 | 6402 |
6403 class vr_values *vr_values; | |
6404 | |
10692 private: | 6405 private: |
10693 class const_and_copies *m_const_and_copies; | 6406 class const_and_copies *m_const_and_copies; |
10694 class avail_exprs_stack *m_avail_exprs_stack; | 6407 class avail_exprs_stack *m_avail_exprs_stack; |
10695 | 6408 |
10696 gcond *m_dummy_cond; | 6409 gcond *m_dummy_cond; |
6410 | |
10697 }; | 6411 }; |
10698 | 6412 |
10699 /* Called before processing dominator children of BB. We want to look | 6413 /* Called before processing dominator children of BB. We want to look |
10700 at ASSERT_EXPRs and record information from them in the appropriate | 6414 at ASSERT_EXPRs and record information from them in the appropriate |
10701 tables. | 6415 tables. |
10744 if (!m_dummy_cond) | 6458 if (!m_dummy_cond) |
10745 m_dummy_cond = gimple_build_cond (NE_EXPR, | 6459 m_dummy_cond = gimple_build_cond (NE_EXPR, |
10746 integer_zero_node, integer_zero_node, | 6460 integer_zero_node, integer_zero_node, |
10747 NULL, NULL); | 6461 NULL, NULL); |
10748 | 6462 |
6463 x_vr_values = vr_values; | |
10749 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies, | 6464 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies, |
10750 m_avail_exprs_stack, | 6465 m_avail_exprs_stack, NULL, |
10751 simplify_stmt_for_jump_threading); | 6466 simplify_stmt_for_jump_threading); |
6467 x_vr_values = NULL; | |
10752 | 6468 |
10753 m_avail_exprs_stack->pop_to_marker (); | 6469 m_avail_exprs_stack->pop_to_marker (); |
10754 m_const_and_copies->pop_to_marker (); | 6470 m_const_and_copies->pop_to_marker (); |
10755 } | 6471 } |
10756 | 6472 |
10773 | 6489 |
10774 As jump threading opportunities are discovered, they are registered | 6490 As jump threading opportunities are discovered, they are registered |
10775 for later realization. */ | 6491 for later realization. */ |
10776 | 6492 |
10777 static void | 6493 static void |
10778 identify_jump_threads (void) | 6494 identify_jump_threads (class vr_values *vr_values) |
10779 { | 6495 { |
10780 int i; | |
10781 edge e; | |
10782 | |
10783 /* Ugh. When substituting values earlier in this pass we can | 6496 /* Ugh. When substituting values earlier in this pass we can |
10784 wipe the dominance information. So rebuild the dominator | 6497 wipe the dominance information. So rebuild the dominator |
10785 information as we need it within the jump threading code. */ | 6498 information as we need it within the jump threading code. */ |
10786 calculate_dominance_info (CDI_DOMINATORS); | 6499 calculate_dominance_info (CDI_DOMINATORS); |
10787 | 6500 |
10790 difficult to avoid eliminating loop exit tests. Of course | 6503 difficult to avoid eliminating loop exit tests. Of course |
10791 EDGE_DFS_BACK is not accurate at this time so we have to | 6504 EDGE_DFS_BACK is not accurate at this time so we have to |
10792 recompute it. */ | 6505 recompute it. */ |
10793 mark_dfs_back_edges (); | 6506 mark_dfs_back_edges (); |
10794 | 6507 |
10795 /* Do not thread across edges we are about to remove. Just marking | |
10796 them as EDGE_IGNORE will do. */ | |
10797 FOR_EACH_VEC_ELT (to_remove_edges, i, e) | |
10798 e->flags |= EDGE_IGNORE; | |
10799 | |
10800 /* Allocate our unwinder stack to unwind any temporary equivalences | 6508 /* Allocate our unwinder stack to unwind any temporary equivalences |
10801 that might be recorded. */ | 6509 that might be recorded. */ |
10802 const_and_copies *equiv_stack = new const_and_copies (); | 6510 const_and_copies *equiv_stack = new const_and_copies (); |
10803 | 6511 |
10804 hash_table<expr_elt_hasher> *avail_exprs | 6512 hash_table<expr_elt_hasher> *avail_exprs |
10805 = new hash_table<expr_elt_hasher> (1024); | 6513 = new hash_table<expr_elt_hasher> (1024); |
10806 avail_exprs_stack *avail_exprs_stack | 6514 avail_exprs_stack *avail_exprs_stack |
10807 = new class avail_exprs_stack (avail_exprs); | 6515 = new class avail_exprs_stack (avail_exprs); |
10808 | 6516 |
10809 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack); | 6517 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack); |
6518 walker.vr_values = vr_values; | |
10810 walker.walk (cfun->cfg->x_entry_block_ptr); | 6519 walker.walk (cfun->cfg->x_entry_block_ptr); |
10811 | |
10812 /* Clear EDGE_IGNORE. */ | |
10813 FOR_EACH_VEC_ELT (to_remove_edges, i, e) | |
10814 e->flags &= ~EDGE_IGNORE; | |
10815 | 6520 |
10816 /* We do not actually update the CFG or SSA graphs at this point as | 6521 /* We do not actually update the CFG or SSA graphs at this point as |
10817 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet | 6522 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet |
10818 handle ASSERT_EXPRs gracefully. */ | 6523 handle ASSERT_EXPRs gracefully. */ |
10819 delete equiv_stack; | 6524 delete equiv_stack; |
10820 delete avail_exprs; | 6525 delete avail_exprs; |
10821 delete avail_exprs_stack; | 6526 delete avail_exprs_stack; |
10822 } | 6527 } |
10823 | 6528 |
10824 /* Free VRP lattice. */ | |
10825 | |
10826 static void | |
10827 vrp_free_lattice () | |
10828 { | |
10829 /* Free allocated memory. */ | |
10830 free (vr_value); | |
10831 free (vr_phi_edge_counts); | |
10832 bitmap_obstack_release (&vrp_equiv_obstack); | |
10833 vrp_value_range_pool.release (); | |
10834 | |
10835 /* So that we can distinguish between VRP data being available | |
10836 and not available. */ | |
10837 vr_value = NULL; | |
10838 vr_phi_edge_counts = NULL; | |
10839 } | |
10840 | |
10841 /* Traverse all the blocks folding conditionals with known ranges. */ | 6529 /* Traverse all the blocks folding conditionals with known ranges. */ |
10842 | 6530 |
10843 static void | 6531 void |
10844 vrp_finalize (bool warn_array_bounds_p) | 6532 vrp_prop::vrp_finalize (bool warn_array_bounds_p) |
10845 { | 6533 { |
10846 size_t i; | 6534 size_t i; |
10847 | 6535 |
10848 values_propagated = true; | 6536 /* We have completed propagating through the lattice. */ |
6537 vr_values.set_lattice_propagation_complete (); | |
10849 | 6538 |
10850 if (dump_file) | 6539 if (dump_file) |
10851 { | 6540 { |
10852 fprintf (dump_file, "\nValue ranges after VRP:\n\n"); | 6541 fprintf (dump_file, "\nValue ranges after VRP:\n\n"); |
10853 dump_all_value_ranges (dump_file); | 6542 vr_values.dump_all_value_ranges (dump_file); |
10854 fprintf (dump_file, "\n"); | 6543 fprintf (dump_file, "\n"); |
10855 } | 6544 } |
10856 | 6545 |
10857 /* Set value range to non pointer SSA_NAMEs. */ | 6546 /* Set value range to non pointer SSA_NAMEs. */ |
10858 for (i = 0; i < num_vr_values; i++) | 6547 for (i = 0; i < num_ssa_names; i++) |
10859 if (vr_value[i]) | 6548 { |
10860 { | 6549 tree name = ssa_name (i); |
10861 tree name = ssa_name (i); | 6550 if (!name) |
10862 | 6551 continue; |
10863 if (!name | 6552 |
10864 || (vr_value[i]->type == VR_VARYING) | 6553 const value_range *vr = get_value_range (name); |
10865 || (vr_value[i]->type == VR_UNDEFINED) | 6554 if (!name || !vr->constant_p ()) |
10866 || (TREE_CODE (vr_value[i]->min) != INTEGER_CST) | 6555 continue; |
10867 || (TREE_CODE (vr_value[i]->max) != INTEGER_CST)) | 6556 |
10868 continue; | 6557 if (POINTER_TYPE_P (TREE_TYPE (name)) |
10869 | 6558 && range_includes_zero_p (vr) == 0) |
10870 if (POINTER_TYPE_P (TREE_TYPE (name)) | 6559 set_ptr_nonnull (name); |
10871 && ((vr_value[i]->type == VR_RANGE | 6560 else if (!POINTER_TYPE_P (TREE_TYPE (name))) |
10872 && range_includes_zero_p (vr_value[i]->min, | 6561 set_range_info (name, vr->kind (), |
10873 vr_value[i]->max) == 0) | 6562 wi::to_wide (vr->min ()), |
10874 || (vr_value[i]->type == VR_ANTI_RANGE | 6563 wi::to_wide (vr->max ())); |
10875 && range_includes_zero_p (vr_value[i]->min, | 6564 } |
10876 vr_value[i]->max) == 1))) | 6565 |
10877 set_ptr_nonnull (name); | 6566 /* If we're checking array refs, we want to merge information on |
10878 else if (!POINTER_TYPE_P (TREE_TYPE (name))) | 6567 the executability of each edge between vrp_folder and the |
10879 set_range_info (name, vr_value[i]->type, | 6568 check_array_bounds_dom_walker: each can clear the |
10880 wi::to_wide (vr_value[i]->min), | 6569 EDGE_EXECUTABLE flag on edges, in different ways. |
10881 wi::to_wide (vr_value[i]->max)); | 6570 |
10882 } | 6571 Hence, if we're going to call check_all_array_refs, set |
10883 | 6572 the flag on every edge now, rather than in |
10884 substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt); | 6573 check_array_bounds_dom_walker's ctor; vrp_folder may clear |
6574 it from some edges. */ | |
6575 if (warn_array_bounds && warn_array_bounds_p) | |
6576 set_all_edges_as_executable (cfun); | |
6577 | |
6578 class vrp_folder vrp_folder; | |
6579 vrp_folder.vr_values = &vr_values; | |
6580 vrp_folder.substitute_and_fold (); | |
10885 | 6581 |
10886 if (warn_array_bounds && warn_array_bounds_p) | 6582 if (warn_array_bounds && warn_array_bounds_p) |
10887 check_all_array_refs (); | 6583 check_all_array_refs (); |
10888 } | 6584 } |
10889 | |
10890 /* evrp_dom_walker visits the basic blocks in the dominance order and set | |
10891 the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to | |
10892 discover more VRs. */ | |
10893 | |
10894 class evrp_dom_walker : public dom_walker | |
10895 { | |
10896 public: | |
10897 evrp_dom_walker () | |
10898 : dom_walker (CDI_DOMINATORS), stack (10) | |
10899 { | |
10900 need_eh_cleanup = BITMAP_ALLOC (NULL); | |
10901 } | |
10902 ~evrp_dom_walker () | |
10903 { | |
10904 BITMAP_FREE (need_eh_cleanup); | |
10905 } | |
10906 virtual edge before_dom_children (basic_block); | |
10907 virtual void after_dom_children (basic_block); | |
10908 void push_value_range (tree var, value_range *vr); | |
10909 value_range *pop_value_range (tree var); | |
10910 value_range *try_find_new_range (tree, tree op, tree_code code, tree limit); | |
10911 | |
10912 /* Cond_stack holds the old VR. */ | |
10913 auto_vec<std::pair <tree, value_range*> > stack; | |
10914 bitmap need_eh_cleanup; | |
10915 auto_vec<gimple *> stmts_to_fixup; | |
10916 auto_vec<gimple *> stmts_to_remove; | |
10917 }; | |
10918 | |
10919 /* Find new range for NAME such that (OP CODE LIMIT) is true. */ | |
10920 | |
10921 value_range * | |
10922 evrp_dom_walker::try_find_new_range (tree name, | |
10923 tree op, tree_code code, tree limit) | |
10924 { | |
10925 value_range vr = VR_INITIALIZER; | |
10926 value_range *old_vr = get_value_range (name); | |
10927 | |
10928 /* Discover VR when condition is true. */ | |
10929 extract_range_for_var_from_comparison_expr (name, code, op, | |
10930 limit, &vr); | |
10931 /* If we found any usable VR, set the VR to ssa_name and create a | |
10932 PUSH old value in the stack with the old VR. */ | |
10933 if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE) | |
10934 { | |
10935 if (old_vr->type == vr.type | |
10936 && vrp_operand_equal_p (old_vr->min, vr.min) | |
10937 && vrp_operand_equal_p (old_vr->max, vr.max)) | |
10938 return NULL; | |
10939 value_range *new_vr = vrp_value_range_pool.allocate (); | |
10940 *new_vr = vr; | |
10941 return new_vr; | |
10942 } | |
10943 return NULL; | |
10944 } | |
10945 | |
10946 /* See if there is any new scope is entered with new VR and set that VR to | |
10947 ssa_name before visiting the statements in the scope. */ | |
10948 | |
10949 edge | |
10950 evrp_dom_walker::before_dom_children (basic_block bb) | |
10951 { | |
10952 tree op0 = NULL_TREE; | |
10953 edge_iterator ei; | |
10954 edge e; | |
10955 | |
10956 if (dump_file && (dump_flags & TDF_DETAILS)) | |
10957 fprintf (dump_file, "Visiting BB%d\n", bb->index); | |
10958 | |
10959 stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL)); | |
10960 | |
10961 edge pred_e = NULL; | |
10962 FOR_EACH_EDGE (e, ei, bb->preds) | |
10963 { | |
10964 /* Ignore simple backedges from this to allow recording conditions | |
10965 in loop headers. */ | |
10966 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest)) | |
10967 continue; | |
10968 if (! pred_e) | |
10969 pred_e = e; | |
10970 else | |
10971 { | |
10972 pred_e = NULL; | |
10973 break; | |
10974 } | |
10975 } | |
10976 if (pred_e) | |
10977 { | |
10978 gimple *stmt = last_stmt (pred_e->src); | |
10979 if (stmt | |
10980 && gimple_code (stmt) == GIMPLE_COND | |
10981 && (op0 = gimple_cond_lhs (stmt)) | |
10982 && TREE_CODE (op0) == SSA_NAME | |
10983 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))) | |
10984 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))))) | |
10985 { | |
10986 if (dump_file && (dump_flags & TDF_DETAILS)) | |
10987 { | |
10988 fprintf (dump_file, "Visiting controlling predicate "); | |
10989 print_gimple_stmt (dump_file, stmt, 0); | |
10990 } | |
10991 /* Entering a new scope. Try to see if we can find a VR | |
10992 here. */ | |
10993 tree op1 = gimple_cond_rhs (stmt); | |
10994 if (TREE_OVERFLOW_P (op1)) | |
10995 op1 = drop_tree_overflow (op1); | |
10996 tree_code code = gimple_cond_code (stmt); | |
10997 | |
10998 auto_vec<assert_info, 8> asserts; | |
10999 register_edge_assert_for (op0, pred_e, code, op0, op1, asserts); | |
11000 if (TREE_CODE (op1) == SSA_NAME) | |
11001 register_edge_assert_for (op1, pred_e, code, op0, op1, asserts); | |
11002 | |
11003 auto_vec<std::pair<tree, value_range *>, 8> vrs; | |
11004 for (unsigned i = 0; i < asserts.length (); ++i) | |
11005 { | |
11006 value_range *vr = try_find_new_range (asserts[i].name, | |
11007 asserts[i].expr, | |
11008 asserts[i].comp_code, | |
11009 asserts[i].val); | |
11010 if (vr) | |
11011 vrs.safe_push (std::make_pair (asserts[i].name, vr)); | |
11012 } | |
11013 /* Push updated ranges only after finding all of them to avoid | |
11014 ordering issues that can lead to worse ranges. */ | |
11015 for (unsigned i = 0; i < vrs.length (); ++i) | |
11016 push_value_range (vrs[i].first, vrs[i].second); | |
11017 } | |
11018 } | |
11019 | |
11020 /* Visit PHI stmts and discover any new VRs possible. */ | |
11021 bool has_unvisited_preds = false; | |
11022 FOR_EACH_EDGE (e, ei, bb->preds) | |
11023 if (e->flags & EDGE_EXECUTABLE | |
11024 && !(e->src->flags & BB_VISITED)) | |
11025 { | |
11026 has_unvisited_preds = true; | |
11027 break; | |
11028 } | |
11029 | |
11030 for (gphi_iterator gpi = gsi_start_phis (bb); | |
11031 !gsi_end_p (gpi); gsi_next (&gpi)) | |
11032 { | |
11033 gphi *phi = gpi.phi (); | |
11034 tree lhs = PHI_RESULT (phi); | |
11035 if (virtual_operand_p (lhs)) | |
11036 continue; | |
11037 value_range vr_result = VR_INITIALIZER; | |
11038 bool interesting = stmt_interesting_for_vrp (phi); | |
11039 if (interesting && dump_file && (dump_flags & TDF_DETAILS)) | |
11040 { | |
11041 fprintf (dump_file, "Visiting PHI node "); | |
11042 print_gimple_stmt (dump_file, phi, 0); | |
11043 } | |
11044 if (!has_unvisited_preds | |
11045 && interesting) | |
11046 extract_range_from_phi_node (phi, &vr_result); | |
11047 else | |
11048 { | |
11049 set_value_range_to_varying (&vr_result); | |
11050 /* When we have an unvisited executable predecessor we can't | |
11051 use PHI arg ranges which may be still UNDEFINED but have | |
11052 to use VARYING for them. But we can still resort to | |
11053 SCEV for loop header PHIs. */ | |
11054 struct loop *l; | |
11055 if (interesting | |
11056 && (l = loop_containing_stmt (phi)) | |
11057 && l->header == gimple_bb (phi)) | |
11058 adjust_range_with_scev (&vr_result, l, phi, lhs); | |
11059 } | |
11060 update_value_range (lhs, &vr_result); | |
11061 | |
11062 /* Mark PHIs whose lhs we fully propagate for removal. */ | |
11063 tree val = op_with_constant_singleton_value_range (lhs); | |
11064 if (val && may_propagate_copy (lhs, val)) | |
11065 { | |
11066 stmts_to_remove.safe_push (phi); | |
11067 continue; | |
11068 } | |
11069 | |
11070 /* Set the SSA with the value range. */ | |
11071 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))) | |
11072 { | |
11073 if ((vr_result.type == VR_RANGE | |
11074 || vr_result.type == VR_ANTI_RANGE) | |
11075 && (TREE_CODE (vr_result.min) == INTEGER_CST) | |
11076 && (TREE_CODE (vr_result.max) == INTEGER_CST)) | |
11077 set_range_info (lhs, vr_result.type, | |
11078 wi::to_wide (vr_result.min), | |
11079 wi::to_wide (vr_result.max)); | |
11080 } | |
11081 else if (POINTER_TYPE_P (TREE_TYPE (lhs)) | |
11082 && ((vr_result.type == VR_RANGE | |
11083 && range_includes_zero_p (vr_result.min, | |
11084 vr_result.max) == 0) | |
11085 || (vr_result.type == VR_ANTI_RANGE | |
11086 && range_includes_zero_p (vr_result.min, | |
11087 vr_result.max) == 1))) | |
11088 set_ptr_nonnull (lhs); | |
11089 } | |
11090 | |
11091 edge taken_edge = NULL; | |
11092 | |
11093 /* Visit all other stmts and discover any new VRs possible. */ | |
11094 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); | |
11095 !gsi_end_p (gsi); gsi_next (&gsi)) | |
11096 { | |
11097 gimple *stmt = gsi_stmt (gsi); | |
11098 tree output = NULL_TREE; | |
11099 gimple *old_stmt = stmt; | |
11100 bool was_noreturn = (is_gimple_call (stmt) | |
11101 && gimple_call_noreturn_p (stmt)); | |
11102 | |
11103 if (dump_file && (dump_flags & TDF_DETAILS)) | |
11104 { | |
11105 fprintf (dump_file, "Visiting stmt "); | |
11106 print_gimple_stmt (dump_file, stmt, 0); | |
11107 } | |
11108 | |
11109 if (gcond *cond = dyn_cast <gcond *> (stmt)) | |
11110 { | |
11111 vrp_visit_cond_stmt (cond, &taken_edge); | |
11112 if (taken_edge) | |
11113 { | |
11114 if (taken_edge->flags & EDGE_TRUE_VALUE) | |
11115 gimple_cond_make_true (cond); | |
11116 else if (taken_edge->flags & EDGE_FALSE_VALUE) | |
11117 gimple_cond_make_false (cond); | |
11118 else | |
11119 gcc_unreachable (); | |
11120 update_stmt (stmt); | |
11121 } | |
11122 } | |
11123 else if (stmt_interesting_for_vrp (stmt)) | |
11124 { | |
11125 edge taken_edge; | |
11126 value_range vr = VR_INITIALIZER; | |
11127 extract_range_from_stmt (stmt, &taken_edge, &output, &vr); | |
11128 if (output | |
11129 && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)) | |
11130 { | |
11131 update_value_range (output, &vr); | |
11132 vr = *get_value_range (output); | |
11133 | |
11134 /* Mark stmts whose output we fully propagate for removal. */ | |
11135 tree val; | |
11136 if ((val = op_with_constant_singleton_value_range (output)) | |
11137 && may_propagate_copy (output, val) | |
11138 && !stmt_could_throw_p (stmt) | |
11139 && !gimple_has_side_effects (stmt)) | |
11140 { | |
11141 stmts_to_remove.safe_push (stmt); | |
11142 continue; | |
11143 } | |
11144 | |
11145 /* Set the SSA with the value range. */ | |
11146 if (INTEGRAL_TYPE_P (TREE_TYPE (output))) | |
11147 { | |
11148 if ((vr.type == VR_RANGE | |
11149 || vr.type == VR_ANTI_RANGE) | |
11150 && (TREE_CODE (vr.min) == INTEGER_CST) | |
11151 && (TREE_CODE (vr.max) == INTEGER_CST)) | |
11152 set_range_info (output, vr.type, | |
11153 wi::to_wide (vr.min), | |
11154 wi::to_wide (vr.max)); | |
11155 } | |
11156 else if (POINTER_TYPE_P (TREE_TYPE (output)) | |
11157 && ((vr.type == VR_RANGE | |
11158 && range_includes_zero_p (vr.min, | |
11159 vr.max) == 0) | |
11160 || (vr.type == VR_ANTI_RANGE | |
11161 && range_includes_zero_p (vr.min, | |
11162 vr.max) == 1))) | |
11163 set_ptr_nonnull (output); | |
11164 } | |
11165 else | |
11166 set_defs_to_varying (stmt); | |
11167 } | |
11168 else | |
11169 set_defs_to_varying (stmt); | |
11170 | |
11171 /* See if we can derive a range for any of STMT's operands. */ | |
11172 tree op; | |
11173 ssa_op_iter i; | |
11174 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
11175 { | |
11176 tree value; | |
11177 enum tree_code comp_code; | |
11178 | |
11179 /* If OP is used in such a way that we can infer a value | |
11180 range for it, and we don't find a previous assertion for | |
11181 it, create a new assertion location node for OP. */ | |
11182 if (infer_value_range (stmt, op, &comp_code, &value)) | |
11183 { | |
11184 /* If we are able to infer a nonzero value range for OP, | |
11185 then walk backwards through the use-def chain to see if OP | |
11186 was set via a typecast. | |
11187 If so, then we can also infer a nonzero value range | |
11188 for the operand of the NOP_EXPR. */ | |
11189 if (comp_code == NE_EXPR && integer_zerop (value)) | |
11190 { | |
11191 tree t = op; | |
11192 gimple *def_stmt = SSA_NAME_DEF_STMT (t); | |
11193 while (is_gimple_assign (def_stmt) | |
11194 && CONVERT_EXPR_CODE_P | |
11195 (gimple_assign_rhs_code (def_stmt)) | |
11196 && TREE_CODE | |
11197 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME | |
11198 && POINTER_TYPE_P | |
11199 (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) | |
11200 { | |
11201 t = gimple_assign_rhs1 (def_stmt); | |
11202 def_stmt = SSA_NAME_DEF_STMT (t); | |
11203 | |
11204 /* Add VR when (T COMP_CODE value) condition is | |
11205 true. */ | |
11206 value_range *op_range | |
11207 = try_find_new_range (t, t, comp_code, value); | |
11208 if (op_range) | |
11209 push_value_range (t, op_range); | |
11210 } | |
11211 } | |
11212 /* Add VR when (OP COMP_CODE value) condition is true. */ | |
11213 value_range *op_range = try_find_new_range (op, op, | |
11214 comp_code, value); | |
11215 if (op_range) | |
11216 push_value_range (op, op_range); | |
11217 } | |
11218 } | |
11219 | |
11220 /* Try folding stmts with the VR discovered. */ | |
11221 bool did_replace | |
11222 = replace_uses_in (stmt, op_with_constant_singleton_value_range); | |
11223 if (fold_stmt (&gsi, follow_single_use_edges) | |
11224 || did_replace) | |
11225 { | |
11226 stmt = gsi_stmt (gsi); | |
11227 update_stmt (stmt); | |
11228 did_replace = true; | |
11229 } | |
11230 | |
11231 if (did_replace) | |
11232 { | |
11233 /* If we cleaned up EH information from the statement, | |
11234 remove EH edges. */ | |
11235 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) | |
11236 bitmap_set_bit (need_eh_cleanup, bb->index); | |
11237 | |
11238 /* If we turned a not noreturn call into a noreturn one | |
11239 schedule it for fixup. */ | |
11240 if (!was_noreturn | |
11241 && is_gimple_call (stmt) | |
11242 && gimple_call_noreturn_p (stmt)) | |
11243 stmts_to_fixup.safe_push (stmt); | |
11244 | |
11245 if (gimple_assign_single_p (stmt)) | |
11246 { | |
11247 tree rhs = gimple_assign_rhs1 (stmt); | |
11248 if (TREE_CODE (rhs) == ADDR_EXPR) | |
11249 recompute_tree_invariant_for_addr_expr (rhs); | |
11250 } | |
11251 } | |
11252 } | |
11253 | |
11254 /* Visit BB successor PHI nodes and replace PHI args. */ | |
11255 FOR_EACH_EDGE (e, ei, bb->succs) | |
11256 { | |
11257 for (gphi_iterator gpi = gsi_start_phis (e->dest); | |
11258 !gsi_end_p (gpi); gsi_next (&gpi)) | |
11259 { | |
11260 gphi *phi = gpi.phi (); | |
11261 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e); | |
11262 tree arg = USE_FROM_PTR (use_p); | |
11263 if (TREE_CODE (arg) != SSA_NAME | |
11264 || virtual_operand_p (arg)) | |
11265 continue; | |
11266 tree val = op_with_constant_singleton_value_range (arg); | |
11267 if (val && may_propagate_copy (arg, val)) | |
11268 propagate_value (use_p, val); | |
11269 } | |
11270 } | |
11271 | |
11272 bb->flags |= BB_VISITED; | |
11273 | |
11274 return taken_edge; | |
11275 } | |
11276 | |
11277 /* Restore/pop VRs valid only for BB when we leave BB. */ | |
11278 | |
11279 void | |
11280 evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED) | |
11281 { | |
11282 gcc_checking_assert (!stack.is_empty ()); | |
11283 while (stack.last ().first != NULL_TREE) | |
11284 pop_value_range (stack.last ().first); | |
11285 stack.pop (); | |
11286 } | |
11287 | |
11288 /* Push the Value Range of VAR to the stack and update it with new VR. */ | |
11289 | |
11290 void | |
11291 evrp_dom_walker::push_value_range (tree var, value_range *vr) | |
11292 { | |
11293 if (SSA_NAME_VERSION (var) >= num_vr_values) | |
11294 return; | |
11295 if (dump_file && (dump_flags & TDF_DETAILS)) | |
11296 { | |
11297 fprintf (dump_file, "pushing new range for "); | |
11298 print_generic_expr (dump_file, var); | |
11299 fprintf (dump_file, ": "); | |
11300 dump_value_range (dump_file, vr); | |
11301 fprintf (dump_file, "\n"); | |
11302 } | |
11303 stack.safe_push (std::make_pair (var, get_value_range (var))); | |
11304 vr_value[SSA_NAME_VERSION (var)] = vr; | |
11305 } | |
11306 | |
11307 /* Pop the Value Range from the vrp_stack and update VAR with it. */ | |
11308 | |
11309 value_range * | |
11310 evrp_dom_walker::pop_value_range (tree var) | |
11311 { | |
11312 value_range *vr = stack.last ().second; | |
11313 gcc_checking_assert (var == stack.last ().first); | |
11314 if (dump_file && (dump_flags & TDF_DETAILS)) | |
11315 { | |
11316 fprintf (dump_file, "popping range for "); | |
11317 print_generic_expr (dump_file, var); | |
11318 fprintf (dump_file, ", restoring "); | |
11319 dump_value_range (dump_file, vr); | |
11320 fprintf (dump_file, "\n"); | |
11321 } | |
11322 vr_value[SSA_NAME_VERSION (var)] = vr; | |
11323 stack.pop (); | |
11324 return vr; | |
11325 } | |
11326 | |
11327 | |
11328 /* Main entry point for the early vrp pass which is a simplified non-iterative | |
11329 version of vrp where basic blocks are visited in dominance order. Value | |
11330 ranges discovered in early vrp will also be used by ipa-vrp. */ | |
11331 | |
11332 static unsigned int | |
11333 execute_early_vrp () | |
11334 { | |
11335 edge e; | |
11336 edge_iterator ei; | |
11337 basic_block bb; | |
11338 | |
11339 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); | |
11340 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); | |
11341 scev_initialize (); | |
11342 calculate_dominance_info (CDI_DOMINATORS); | |
11343 FOR_EACH_BB_FN (bb, cfun) | |
11344 { | |
11345 bb->flags &= ~BB_VISITED; | |
11346 FOR_EACH_EDGE (e, ei, bb->preds) | |
11347 e->flags |= EDGE_EXECUTABLE; | |
11348 } | |
11349 vrp_initialize_lattice (); | |
11350 | |
11351 /* Walk stmts in dominance order and propagate VRP. */ | |
11352 evrp_dom_walker walker; | |
11353 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun)); | |
11354 | |
11355 if (dump_file) | |
11356 { | |
11357 fprintf (dump_file, "\nValue ranges after Early VRP:\n\n"); | |
11358 dump_all_value_ranges (dump_file); | |
11359 fprintf (dump_file, "\n"); | |
11360 } | |
11361 | |
11362 /* Remove stmts in reverse order to make debug stmt creation possible. */ | |
11363 while (! walker.stmts_to_remove.is_empty ()) | |
11364 { | |
11365 gimple *stmt = walker.stmts_to_remove.pop (); | |
11366 if (dump_file && dump_flags & TDF_DETAILS) | |
11367 { | |
11368 fprintf (dump_file, "Removing dead stmt "); | |
11369 print_gimple_stmt (dump_file, stmt, 0); | |
11370 fprintf (dump_file, "\n"); | |
11371 } | |
11372 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
11373 if (gimple_code (stmt) == GIMPLE_PHI) | |
11374 remove_phi_node (&gsi, true); | |
11375 else | |
11376 { | |
11377 unlink_stmt_vdef (stmt); | |
11378 gsi_remove (&gsi, true); | |
11379 release_defs (stmt); | |
11380 } | |
11381 } | |
11382 | |
11383 if (!bitmap_empty_p (walker.need_eh_cleanup)) | |
11384 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup); | |
11385 | |
11386 /* Fixup stmts that became noreturn calls. This may require splitting | |
11387 blocks and thus isn't possible during the dominator walk. Do this | |
11388 in reverse order so we don't inadvertedly remove a stmt we want to | |
11389 fixup by visiting a dominating now noreturn call first. */ | |
11390 while (!walker.stmts_to_fixup.is_empty ()) | |
11391 { | |
11392 gimple *stmt = walker.stmts_to_fixup.pop (); | |
11393 fixup_noreturn_call (stmt); | |
11394 } | |
11395 | |
11396 vrp_free_lattice (); | |
11397 scev_finalize (); | |
11398 loop_optimizer_finalize (); | |
11399 return 0; | |
11400 } | |
11401 | |
11402 | 6585 |
11403 /* Main entry point to VRP (Value Range Propagation). This pass is | 6586 /* Main entry point to VRP (Value Range Propagation). This pass is |
11404 loosely based on J. R. C. Patterson, ``Accurate Static Branch | 6587 loosely based on J. R. C. Patterson, ``Accurate Static Branch |
11405 Prediction by Value Range Propagation,'' in SIGPLAN Conference on | 6588 Prediction by Value Range Propagation,'' in SIGPLAN Conference on |
11406 Programming Language Design and Implementation, pp. 67-78, 1995. | 6589 Programming Language Design and Implementation, pp. 67-78, 1995. |
11445 probabilities to aid branch prediction. */ | 6628 probabilities to aid branch prediction. */ |
11446 | 6629 |
11447 static unsigned int | 6630 static unsigned int |
11448 execute_vrp (bool warn_array_bounds_p) | 6631 execute_vrp (bool warn_array_bounds_p) |
11449 { | 6632 { |
11450 int i; | |
11451 edge e; | |
11452 switch_update *su; | |
11453 | 6633 |
11454 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); | 6634 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
11455 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); | 6635 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
11456 scev_initialize (); | 6636 scev_initialize (); |
11457 | 6637 |
11458 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. | 6638 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. |
11459 Inserting assertions may split edges which will invalidate | 6639 Inserting assertions may split edges which will invalidate |
11460 EDGE_DFS_BACK. */ | 6640 EDGE_DFS_BACK. */ |
11461 insert_range_assertions (); | 6641 insert_range_assertions (); |
11462 | 6642 |
11463 to_remove_edges.create (10); | |
11464 to_update_switch_stmts.create (5); | |
11465 threadedge_initialize_values (); | 6643 threadedge_initialize_values (); |
11466 | 6644 |
11467 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ | 6645 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ |
11468 mark_dfs_back_edges (); | 6646 mark_dfs_back_edges (); |
11469 | 6647 |
11470 vrp_initialize_lattice (); | 6648 class vrp_prop vrp_prop; |
11471 vrp_initialize (); | 6649 vrp_prop.vrp_initialize (); |
11472 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); | 6650 vrp_prop.ssa_propagate (); |
11473 vrp_finalize (warn_array_bounds_p); | 6651 vrp_prop.vrp_finalize (warn_array_bounds_p); |
11474 | 6652 |
11475 /* We must identify jump threading opportunities before we release | 6653 /* We must identify jump threading opportunities before we release |
11476 the datastructures built by VRP. */ | 6654 the datastructures built by VRP. */ |
11477 identify_jump_threads (); | 6655 identify_jump_threads (&vrp_prop.vr_values); |
11478 | 6656 |
11479 /* A comparison of an SSA_NAME against a constant where the SSA_NAME | 6657 /* A comparison of an SSA_NAME against a constant where the SSA_NAME |
11480 was set by a type conversion can often be rewritten to use the | 6658 was set by a type conversion can often be rewritten to use the |
11481 RHS of the type conversion. | 6659 RHS of the type conversion. |
11482 | 6660 |
11486 basic_block bb; | 6664 basic_block bb; |
11487 FOR_EACH_BB_FN (bb, cfun) | 6665 FOR_EACH_BB_FN (bb, cfun) |
11488 { | 6666 { |
11489 gimple *last = last_stmt (bb); | 6667 gimple *last = last_stmt (bb); |
11490 if (last && gimple_code (last) == GIMPLE_COND) | 6668 if (last && gimple_code (last) == GIMPLE_COND) |
11491 simplify_cond_using_ranges_2 (as_a <gcond *> (last)); | 6669 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last)); |
11492 } | 6670 } |
11493 | |
11494 vrp_free_lattice (); | |
11495 | 6671 |
11496 free_numbers_of_iterations_estimates (cfun); | 6672 free_numbers_of_iterations_estimates (cfun); |
11497 | 6673 |
11498 /* ASSERT_EXPRs must be removed before finalizing jump threads | 6674 /* ASSERT_EXPRs must be removed before finalizing jump threads |
11499 as finalizing jump threads calls the CFG cleanup code which | 6675 as finalizing jump threads calls the CFG cleanup code which |
11513 | 6689 |
11514 Note the SSA graph update will occur during the normal TODO | 6690 Note the SSA graph update will occur during the normal TODO |
11515 processing by the pass manager. */ | 6691 processing by the pass manager. */ |
11516 thread_through_all_blocks (false); | 6692 thread_through_all_blocks (false); |
11517 | 6693 |
11518 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the | 6694 vrp_prop.vr_values.cleanup_edges_and_switches (); |
11519 CFG in a broken state and requires a cfg_cleanup run. */ | |
11520 FOR_EACH_VEC_ELT (to_remove_edges, i, e) | |
11521 remove_edge (e); | |
11522 /* Update SWITCH_EXPR case label vector. */ | |
11523 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) | |
11524 { | |
11525 size_t j; | |
11526 size_t n = TREE_VEC_LENGTH (su->vec); | |
11527 tree label; | |
11528 gimple_switch_set_num_labels (su->stmt, n); | |
11529 for (j = 0; j < n; j++) | |
11530 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); | |
11531 /* As we may have replaced the default label with a regular one | |
11532 make sure to make it a real default label again. This ensures | |
11533 optimal expansion. */ | |
11534 label = gimple_switch_label (su->stmt, 0); | |
11535 CASE_LOW (label) = NULL_TREE; | |
11536 CASE_HIGH (label) = NULL_TREE; | |
11537 } | |
11538 | |
11539 if (to_remove_edges.length () > 0) | |
11540 { | |
11541 free_dominance_info (CDI_DOMINATORS); | |
11542 loops_state_set (LOOPS_NEED_FIXUP); | |
11543 } | |
11544 | |
11545 to_remove_edges.release (); | |
11546 to_update_switch_stmts.release (); | |
11547 threadedge_finalize_values (); | 6695 threadedge_finalize_values (); |
11548 | 6696 |
11549 scev_finalize (); | 6697 scev_finalize (); |
11550 loop_optimizer_finalize (); | 6698 loop_optimizer_finalize (); |
11551 return 0; | 6699 return 0; |
11594 make_pass_vrp (gcc::context *ctxt) | 6742 make_pass_vrp (gcc::context *ctxt) |
11595 { | 6743 { |
11596 return new pass_vrp (ctxt); | 6744 return new pass_vrp (ctxt); |
11597 } | 6745 } |
11598 | 6746 |
11599 namespace { | 6747 |
11600 | 6748 /* Worker for determine_value_range. */ |
11601 const pass_data pass_data_early_vrp = | 6749 |
11602 { | 6750 static void |
11603 GIMPLE_PASS, /* type */ | 6751 determine_value_range_1 (value_range *vr, tree expr) |
11604 "evrp", /* name */ | 6752 { |
11605 OPTGROUP_NONE, /* optinfo_flags */ | 6753 if (BINARY_CLASS_P (expr)) |
11606 TV_TREE_EARLY_VRP, /* tv_id */ | 6754 { |
11607 PROP_ssa, /* properties_required */ | 6755 value_range vr0, vr1; |
11608 0, /* properties_provided */ | 6756 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0)); |
11609 0, /* properties_destroyed */ | 6757 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1)); |
11610 0, /* todo_flags_start */ | 6758 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr), |
11611 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ), | 6759 &vr0, &vr1); |
11612 }; | 6760 } |
11613 | 6761 else if (UNARY_CLASS_P (expr)) |
11614 class pass_early_vrp : public gimple_opt_pass | 6762 { |
11615 { | 6763 value_range vr0; |
11616 public: | 6764 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0)); |
11617 pass_early_vrp (gcc::context *ctxt) | 6765 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr), |
11618 : gimple_opt_pass (pass_data_early_vrp, ctxt) | 6766 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0))); |
11619 {} | 6767 } |
11620 | 6768 else if (TREE_CODE (expr) == INTEGER_CST) |
11621 /* opt_pass methods: */ | 6769 set_value_range_to_value (vr, expr, NULL); |
11622 opt_pass * clone () { return new pass_early_vrp (m_ctxt); } | 6770 else |
11623 virtual bool gate (function *) | 6771 { |
11624 { | 6772 value_range_kind kind; |
11625 return flag_tree_vrp != 0; | 6773 wide_int min, max; |
11626 } | 6774 /* For SSA names try to extract range info computed by VRP. Otherwise |
11627 virtual unsigned int execute (function *) | 6775 fall back to varying. */ |
11628 { return execute_early_vrp (); } | 6776 if (TREE_CODE (expr) == SSA_NAME |
11629 | 6777 && INTEGRAL_TYPE_P (TREE_TYPE (expr)) |
11630 }; // class pass_vrp | 6778 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING) |
11631 } // anon namespace | 6779 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min), |
11632 | 6780 wide_int_to_tree (TREE_TYPE (expr), max), NULL); |
11633 gimple_opt_pass * | 6781 else |
11634 make_pass_early_vrp (gcc::context *ctxt) | 6782 set_value_range_to_varying (vr); |
11635 { | 6783 } |
11636 return new pass_early_vrp (ctxt); | 6784 } |
11637 } | 6785 |
11638 | 6786 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return |
6787 the determined range type. */ | |
6788 | |
6789 value_range_kind | |
6790 determine_value_range (tree expr, wide_int *min, wide_int *max) | |
6791 { | |
6792 value_range vr; | |
6793 determine_value_range_1 (&vr, expr); | |
6794 if (vr.constant_p ()) | |
6795 { | |
6796 *min = wi::to_wide (vr.min ()); | |
6797 *max = wi::to_wide (vr.max ()); | |
6798 return vr.kind (); | |
6799 } | |
6800 | |
6801 return VR_VARYING; | |
6802 } |