131
|
1 /* Support routines for Value Range Propagation (VRP).
|
145
|
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
|
131
|
3
|
|
4 This file is part of GCC.
|
|
5
|
|
6 GCC is free software; you can redistribute it and/or modify
|
|
7 it under the terms of the GNU General Public License as published by
|
|
8 the Free Software Foundation; either version 3, or (at your option)
|
|
9 any later version.
|
|
10
|
|
11 GCC is distributed in the hope that it will be useful,
|
|
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
14 GNU General Public License for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with GCC; see the file COPYING3. If not see
|
|
18 <http://www.gnu.org/licenses/>. */
|
|
19
|
|
20 #include "config.h"
|
|
21 #include "system.h"
|
|
22 #include "coretypes.h"
|
|
23 #include "backend.h"
|
|
24 #include "insn-codes.h"
|
|
25 #include "tree.h"
|
|
26 #include "gimple.h"
|
|
27 #include "ssa.h"
|
|
28 #include "optabs-tree.h"
|
|
29 #include "gimple-pretty-print.h"
|
|
30 #include "diagnostic-core.h"
|
|
31 #include "flags.h"
|
|
32 #include "fold-const.h"
|
|
33 #include "calls.h"
|
|
34 #include "cfganal.h"
|
|
35 #include "gimple-fold.h"
|
|
36 #include "gimple-iterator.h"
|
|
37 #include "tree-cfg.h"
|
|
38 #include "tree-ssa-loop-niter.h"
|
|
39 #include "tree-ssa-loop.h"
|
|
40 #include "intl.h"
|
|
41 #include "cfgloop.h"
|
|
42 #include "tree-scalar-evolution.h"
|
|
43 #include "tree-ssa-propagate.h"
|
|
44 #include "tree-chrec.h"
|
|
45 #include "omp-general.h"
|
|
46 #include "case-cfn-macros.h"
|
|
47 #include "alloc-pool.h"
|
|
48 #include "attribs.h"
|
145
|
49 #include "range.h"
|
131
|
50 #include "vr-values.h"
|
|
51 #include "cfghooks.h"
|
145
|
52 #include "range-op.h"
|
131
|
53
|
|
54 /* Set value range VR to a non-negative range of type TYPE. */
|
|
55
|
|
56 static inline void
|
145
|
57 set_value_range_to_nonnegative (value_range_equiv *vr, tree type)
|
131
|
58 {
|
|
59 tree zero = build_int_cst (type, 0);
|
145
|
60 vr->update (zero, vrp_val_max (type));
|
131
|
61 }
|
|
62
|
|
63 /* Set value range VR to a range of a truthvalue of type TYPE. */
|
|
64
|
|
65 static inline void
|
145
|
66 set_value_range_to_truthvalue (value_range_equiv *vr, tree type)
|
131
|
67 {
|
|
68 if (TYPE_PRECISION (type) == 1)
|
145
|
69 vr->set_varying (type);
|
131
|
70 else
|
145
|
71 vr->update (build_int_cst (type, 0), build_int_cst (type, 1));
|
131
|
72 }
|
|
73
|
145
|
74 /* Return the lattice entry for VAR or NULL if it doesn't exist or cannot
|
|
75 be initialized. */
|
|
76
|
|
77 value_range_equiv *
|
|
78 vr_values::get_lattice_entry (const_tree var)
|
131
|
79 {
|
145
|
80 value_range_equiv *vr;
|
131
|
81 tree sym;
|
|
82 unsigned ver = SSA_NAME_VERSION (var);
|
|
83
|
145
|
84 /* If we query the entry for a new SSA name avoid reallocating the lattice
|
|
85 since we should get here at most from the substitute-and-fold stage which
|
131
|
86 will never try to change values. */
|
|
87 if (ver >= num_vr_values)
|
145
|
88 return NULL;
|
131
|
89
|
|
90 vr = vr_value[ver];
|
|
91 if (vr)
|
|
92 return vr;
|
|
93
|
|
94 /* Create a default value range. */
|
|
95 vr_value[ver] = vr = vrp_value_range_pool.allocate ();
|
145
|
96
|
|
97 /* After propagation finished return varying. */
|
|
98 if (values_propagated)
|
|
99 {
|
|
100 vr->set_varying (TREE_TYPE (var));
|
|
101 return vr;
|
|
102 }
|
|
103
|
131
|
104 vr->set_undefined ();
|
|
105
|
|
106 /* If VAR is a default definition of a parameter, the variable can
|
|
107 take any value in VAR's type. */
|
|
108 if (SSA_NAME_IS_DEFAULT_DEF (var))
|
|
109 {
|
|
110 sym = SSA_NAME_VAR (var);
|
|
111 if (TREE_CODE (sym) == PARM_DECL)
|
|
112 {
|
|
113 /* Try to use the "nonnull" attribute to create ~[0, 0]
|
|
114 anti-ranges for pointers. Note that this is only valid with
|
|
115 default definitions of PARM_DECLs. */
|
|
116 if (POINTER_TYPE_P (TREE_TYPE (sym))
|
|
117 && (nonnull_arg_p (sym)
|
|
118 || get_ptr_nonnull (var)))
|
145
|
119 {
|
|
120 vr->set_nonzero (TREE_TYPE (sym));
|
|
121 vr->equiv_clear ();
|
|
122 }
|
131
|
123 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
|
|
124 {
|
145
|
125 get_range_info (var, *vr);
|
|
126 if (vr->undefined_p ())
|
|
127 vr->set_varying (TREE_TYPE (sym));
|
131
|
128 }
|
|
129 else
|
145
|
130 vr->set_varying (TREE_TYPE (sym));
|
131
|
131 }
|
|
132 else if (TREE_CODE (sym) == RESULT_DECL
|
|
133 && DECL_BY_REFERENCE (sym))
|
145
|
134 {
|
|
135 vr->set_nonzero (TREE_TYPE (sym));
|
|
136 vr->equiv_clear ();
|
|
137 }
|
131
|
138 }
|
|
139
|
|
140 return vr;
|
|
141 }
|
|
142
|
145
|
143 /* Return value range information for VAR.
|
|
144
|
|
145 If we have no values ranges recorded (ie, VRP is not running), then
|
|
146 return NULL. Otherwise create an empty range if none existed for VAR. */
|
|
147
|
|
148 const value_range_equiv *
|
|
149 vr_values::get_value_range (const_tree var)
|
|
150 {
|
|
151 /* If we have no recorded ranges, then return NULL. */
|
|
152 if (!vr_value)
|
|
153 return NULL;
|
|
154
|
|
155 value_range_equiv *vr = get_lattice_entry (var);
|
|
156
|
|
157 /* Reallocate the lattice if needed. */
|
|
158 if (!vr)
|
|
159 {
|
|
160 unsigned int old_sz = num_vr_values;
|
|
161 num_vr_values = num_ssa_names + num_ssa_names / 10;
|
|
162 vr_value = XRESIZEVEC (value_range_equiv *, vr_value, num_vr_values);
|
|
163 for ( ; old_sz < num_vr_values; old_sz++)
|
|
164 vr_value [old_sz] = NULL;
|
|
165
|
|
166 /* Now that the lattice has been resized, we should never fail. */
|
|
167 vr = get_lattice_entry (var);
|
|
168 gcc_assert (vr);
|
|
169 }
|
|
170
|
|
171 return vr;
|
|
172 }
|
|
173
|
|
174 /* Set the lattice entry for DEF to VARYING. */
|
|
175
|
|
176 void
|
|
177 vr_values::set_def_to_varying (const_tree def)
|
|
178 {
|
|
179 value_range_equiv *vr = get_lattice_entry (def);
|
|
180 if (vr)
|
|
181 vr->set_varying (TREE_TYPE (def));
|
|
182 }
|
|
183
|
131
|
184 /* Set value-ranges of all SSA names defined by STMT to varying. */
|
|
185
|
|
186 void
|
|
187 vr_values::set_defs_to_varying (gimple *stmt)
|
|
188 {
|
|
189 ssa_op_iter i;
|
|
190 tree def;
|
|
191 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
|
145
|
192 set_def_to_varying (def);
|
131
|
193 }
|
|
194
|
|
195 /* Update the value range and equivalence set for variable VAR to
|
|
196 NEW_VR. Return true if NEW_VR is different from VAR's previous
|
|
197 value.
|
|
198
|
|
199 NOTE: This function assumes that NEW_VR is a temporary value range
|
|
200 object created for the sole purpose of updating VAR's range. The
|
|
201 storage used by the equivalence set from NEW_VR will be freed by
|
|
202 this function. Do not call update_value_range when NEW_VR
|
|
203 is the range object associated with another SSA name. */
|
|
204
|
|
205 bool
|
145
|
206 vr_values::update_value_range (const_tree var, value_range_equiv *new_vr)
|
131
|
207 {
|
145
|
208 value_range_equiv *old_vr;
|
131
|
209 bool is_new;
|
|
210
|
|
211 /* If there is a value-range on the SSA name from earlier analysis
|
|
212 factor that in. */
|
|
213 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
|
|
214 {
|
145
|
215 value_range_equiv nr;
|
|
216 get_range_info (var, nr);
|
|
217 if (!nr.undefined_p ())
|
|
218 new_vr->intersect (&nr);
|
131
|
219 }
|
|
220
|
145
|
221 /* Update the value range, if necessary. If we cannot allocate a lattice
|
|
222 entry for VAR keep it at VARYING. This happens when DOM feeds us stmts
|
|
223 with SSA names allocated after setting up the lattice. */
|
|
224 old_vr = get_lattice_entry (var);
|
|
225 if (!old_vr)
|
|
226 return false;
|
|
227 is_new = !old_vr->equal_p (*new_vr, /*ignore_equivs=*/false);
|
131
|
228
|
|
229 if (is_new)
|
|
230 {
|
|
231 /* Do not allow transitions up the lattice. The following
|
|
232 is slightly more awkward than just new_vr->type < old_vr->type
|
|
233 because VR_RANGE and VR_ANTI_RANGE need to be considered
|
|
234 the same. We may not have is_new when transitioning to
|
|
235 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
|
145
|
236 called, if we are anyway, keep it VARYING. */
|
|
237 if (old_vr->varying_p ())
|
131
|
238 {
|
145
|
239 new_vr->set_varying (TREE_TYPE (var));
|
|
240 is_new = false;
|
|
241 }
|
|
242 else if (new_vr->undefined_p ())
|
|
243 {
|
|
244 old_vr->set_varying (TREE_TYPE (var));
|
|
245 new_vr->set_varying (TREE_TYPE (var));
|
131
|
246 return true;
|
|
247 }
|
|
248 else
|
145
|
249 old_vr->set (new_vr->min (), new_vr->max (), new_vr->equiv (),
|
|
250 new_vr->kind ());
|
131
|
251 }
|
|
252
|
|
253 new_vr->equiv_clear ();
|
|
254
|
|
255 return is_new;
|
|
256 }
|
|
257
|
|
258 /* Return true if value range VR involves exactly one symbol SYM. */
|
|
259
|
|
260 static bool
|
|
261 symbolic_range_based_on_p (value_range *vr, const_tree sym)
|
|
262 {
|
|
263 bool neg, min_has_symbol, max_has_symbol;
|
|
264 tree inv;
|
|
265
|
|
266 if (is_gimple_min_invariant (vr->min ()))
|
|
267 min_has_symbol = false;
|
|
268 else if (get_single_symbol (vr->min (), &neg, &inv) == sym)
|
|
269 min_has_symbol = true;
|
|
270 else
|
|
271 return false;
|
|
272
|
|
273 if (is_gimple_min_invariant (vr->max ()))
|
|
274 max_has_symbol = false;
|
|
275 else if (get_single_symbol (vr->max (), &neg, &inv) == sym)
|
|
276 max_has_symbol = true;
|
|
277 else
|
|
278 return false;
|
|
279
|
|
280 return (min_has_symbol || max_has_symbol);
|
|
281 }
|
|
282
|
|
283 /* Return true if the result of assignment STMT is know to be non-zero. */
|
|
284
|
|
285 static bool
|
|
286 gimple_assign_nonzero_p (gimple *stmt)
|
|
287 {
|
|
288 enum tree_code code = gimple_assign_rhs_code (stmt);
|
|
289 bool strict_overflow_p;
|
|
290 switch (get_gimple_rhs_class (code))
|
|
291 {
|
|
292 case GIMPLE_UNARY_RHS:
|
|
293 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
|
|
294 gimple_expr_type (stmt),
|
|
295 gimple_assign_rhs1 (stmt),
|
|
296 &strict_overflow_p);
|
|
297 case GIMPLE_BINARY_RHS:
|
|
298 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
|
|
299 gimple_expr_type (stmt),
|
|
300 gimple_assign_rhs1 (stmt),
|
|
301 gimple_assign_rhs2 (stmt),
|
|
302 &strict_overflow_p);
|
|
303 case GIMPLE_TERNARY_RHS:
|
|
304 return false;
|
|
305 case GIMPLE_SINGLE_RHS:
|
|
306 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
|
|
307 &strict_overflow_p);
|
|
308 case GIMPLE_INVALID_RHS:
|
|
309 gcc_unreachable ();
|
|
310 default:
|
|
311 gcc_unreachable ();
|
|
312 }
|
|
313 }
|
|
314
|
|
315 /* Return true if STMT is known to compute a non-zero value. */
|
|
316
|
|
317 static bool
|
|
318 gimple_stmt_nonzero_p (gimple *stmt)
|
|
319 {
|
|
320 switch (gimple_code (stmt))
|
|
321 {
|
|
322 case GIMPLE_ASSIGN:
|
|
323 return gimple_assign_nonzero_p (stmt);
|
|
324 case GIMPLE_CALL:
|
|
325 {
|
|
326 gcall *call_stmt = as_a<gcall *> (stmt);
|
|
327 return (gimple_call_nonnull_result_p (call_stmt)
|
|
328 || gimple_call_nonnull_arg (call_stmt));
|
|
329 }
|
|
330 default:
|
|
331 gcc_unreachable ();
|
|
332 }
|
|
333 }
|
|
334 /* Like tree_expr_nonzero_p, but this function uses value ranges
|
|
335 obtained so far. */
|
|
336
|
|
337 bool
|
|
338 vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
|
|
339 {
|
|
340 if (gimple_stmt_nonzero_p (stmt))
|
|
341 return true;
|
|
342
|
|
343 /* If we have an expression of the form &X->a, then the expression
|
|
344 is nonnull if X is nonnull. */
|
|
345 if (is_gimple_assign (stmt)
|
|
346 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
|
|
347 {
|
|
348 tree expr = gimple_assign_rhs1 (stmt);
|
145
|
349 poly_int64 bitsize, bitpos;
|
|
350 tree offset;
|
|
351 machine_mode mode;
|
|
352 int unsignedp, reversep, volatilep;
|
|
353 tree base = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize,
|
|
354 &bitpos, &offset, &mode, &unsignedp,
|
|
355 &reversep, &volatilep);
|
131
|
356
|
|
357 if (base != NULL_TREE
|
|
358 && TREE_CODE (base) == MEM_REF
|
|
359 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
|
|
360 {
|
145
|
361 poly_offset_int off = 0;
|
|
362 bool off_cst = false;
|
|
363 if (offset == NULL_TREE || TREE_CODE (offset) == INTEGER_CST)
|
|
364 {
|
|
365 off = mem_ref_offset (base);
|
|
366 if (offset)
|
|
367 off += poly_offset_int::from (wi::to_poly_wide (offset),
|
|
368 SIGNED);
|
|
369 off <<= LOG2_BITS_PER_UNIT;
|
|
370 off += bitpos;
|
|
371 off_cst = true;
|
|
372 }
|
|
373 /* If &X->a is equal to X and X is ~[0, 0], the result is too.
|
|
374 For -fdelete-null-pointer-checks -fno-wrapv-pointer we don't
|
|
375 allow going from non-NULL pointer to NULL. */
|
|
376 if ((off_cst && known_eq (off, 0))
|
|
377 || (flag_delete_null_pointer_checks
|
|
378 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (expr))))
|
|
379 {
|
|
380 const value_range_equiv *vr
|
|
381 = get_value_range (TREE_OPERAND (base, 0));
|
|
382 if (!range_includes_zero_p (vr))
|
|
383 return true;
|
|
384 }
|
|
385 /* If MEM_REF has a "positive" offset, consider it non-NULL
|
|
386 always, for -fdelete-null-pointer-checks also "negative"
|
|
387 ones. Punt for unknown offsets (e.g. variable ones). */
|
|
388 if (!TYPE_OVERFLOW_WRAPS (TREE_TYPE (expr))
|
|
389 && off_cst
|
|
390 && known_ne (off, 0)
|
|
391 && (flag_delete_null_pointer_checks || known_gt (off, 0)))
|
131
|
392 return true;
|
|
393 }
|
|
394 }
|
|
395
|
|
396 return false;
|
|
397 }
|
|
398
|
|
399 /* Returns true if EXPR is a valid value (as expected by compare_values) --
|
|
400 a gimple invariant, or SSA_NAME +- CST. */
|
|
401
|
|
402 static bool
|
|
403 valid_value_p (tree expr)
|
|
404 {
|
|
405 if (TREE_CODE (expr) == SSA_NAME)
|
|
406 return true;
|
|
407
|
|
408 if (TREE_CODE (expr) == PLUS_EXPR
|
|
409 || TREE_CODE (expr) == MINUS_EXPR)
|
|
410 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
|
|
411 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
|
|
412
|
|
413 return is_gimple_min_invariant (expr);
|
|
414 }
|
|
415
|
|
416 /* If OP has a value range with a single constant value return that,
|
|
417 otherwise return NULL_TREE. This returns OP itself if OP is a
|
|
418 constant. */
|
|
419
|
|
420 tree
|
|
421 vr_values::op_with_constant_singleton_value_range (tree op)
|
|
422 {
|
|
423 if (is_gimple_min_invariant (op))
|
|
424 return op;
|
|
425
|
|
426 if (TREE_CODE (op) != SSA_NAME)
|
|
427 return NULL_TREE;
|
|
428
|
145
|
429 tree t;
|
|
430 if (get_value_range (op)->singleton_p (&t))
|
|
431 return t;
|
|
432 return NULL;
|
131
|
433 }
|
|
434
|
|
435 /* Return true if op is in a boolean [0, 1] value-range. */
|
|
436
|
|
437 bool
|
|
438 vr_values::op_with_boolean_value_range_p (tree op)
|
|
439 {
|
145
|
440 const value_range_equiv *vr;
|
131
|
441
|
|
442 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
|
|
443 return true;
|
|
444
|
|
445 if (integer_zerop (op)
|
|
446 || integer_onep (op))
|
|
447 return true;
|
|
448
|
|
449 if (TREE_CODE (op) != SSA_NAME)
|
|
450 return false;
|
|
451
|
|
452 vr = get_value_range (op);
|
|
453 return (vr->kind () == VR_RANGE
|
|
454 && integer_zerop (vr->min ())
|
|
455 && integer_onep (vr->max ()));
|
|
456 }
|
|
457
|
|
458 /* Extract value range information for VAR when (OP COND_CODE LIMIT) is
|
|
459 true and store it in *VR_P. */
|
|
460
|
|
461 void
|
|
462 vr_values::extract_range_for_var_from_comparison_expr (tree var,
|
|
463 enum tree_code cond_code,
|
|
464 tree op, tree limit,
|
145
|
465 value_range_equiv *vr_p)
|
131
|
466 {
|
|
467 tree min, max, type;
|
145
|
468 const value_range_equiv *limit_vr;
|
131
|
469 type = TREE_TYPE (var);
|
|
470
|
|
471 /* For pointer arithmetic, we only keep track of pointer equality
|
|
472 and inequality. If we arrive here with unfolded conditions like
|
|
473 _1 > _1 do not derive anything. */
|
|
474 if ((POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
|
|
475 || limit == var)
|
|
476 {
|
145
|
477 vr_p->set_varying (type);
|
131
|
478 return;
|
|
479 }
|
|
480
|
|
481 /* If LIMIT is another SSA name and LIMIT has a range of its own,
|
|
482 try to use LIMIT's range to avoid creating symbolic ranges
|
|
483 unnecessarily. */
|
|
484 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
|
|
485
|
|
486 /* LIMIT's range is only interesting if it has any useful information. */
|
|
487 if (! limit_vr
|
|
488 || limit_vr->undefined_p ()
|
|
489 || limit_vr->varying_p ()
|
|
490 || (limit_vr->symbolic_p ()
|
|
491 && ! (limit_vr->kind () == VR_RANGE
|
|
492 && (limit_vr->min () == limit_vr->max ()
|
|
493 || operand_equal_p (limit_vr->min (),
|
|
494 limit_vr->max (), 0)))))
|
|
495 limit_vr = NULL;
|
|
496
|
|
497 /* Initially, the new range has the same set of equivalences of
|
|
498 VAR's range. This will be revised before returning the final
|
|
499 value. Since assertions may be chained via mutually exclusive
|
|
500 predicates, we will need to trim the set of equivalences before
|
|
501 we are done. */
|
|
502 gcc_assert (vr_p->equiv () == NULL);
|
|
503 vr_p->equiv_add (var, get_value_range (var), &vrp_equiv_obstack);
|
|
504
|
|
505 /* Extract a new range based on the asserted comparison for VAR and
|
|
506 LIMIT's value range. Notice that if LIMIT has an anti-range, we
|
|
507 will only use it for equality comparisons (EQ_EXPR). For any
|
|
508 other kind of assertion, we cannot derive a range from LIMIT's
|
|
509 anti-range that can be used to describe the new range. For
|
|
510 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
|
|
511 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
|
|
512 no single range for x_2 that could describe LE_EXPR, so we might
|
|
513 as well build the range [b_4, +INF] for it.
|
|
514 One special case we handle is extracting a range from a
|
|
515 range test encoded as (unsigned)var + CST <= limit. */
|
|
516 if (TREE_CODE (op) == NOP_EXPR
|
|
517 || TREE_CODE (op) == PLUS_EXPR)
|
|
518 {
|
|
519 if (TREE_CODE (op) == PLUS_EXPR)
|
|
520 {
|
|
521 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
|
|
522 TREE_OPERAND (op, 1));
|
|
523 max = int_const_binop (PLUS_EXPR, limit, min);
|
|
524 op = TREE_OPERAND (op, 0);
|
|
525 }
|
|
526 else
|
|
527 {
|
|
528 min = build_int_cst (TREE_TYPE (var), 0);
|
|
529 max = limit;
|
|
530 }
|
|
531
|
|
532 /* Make sure to not set TREE_OVERFLOW on the final type
|
|
533 conversion. We are willingly interpreting large positive
|
|
534 unsigned values as negative signed values here. */
|
|
535 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
|
|
536 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
|
|
537
|
|
538 /* We can transform a max, min range to an anti-range or
|
|
539 vice-versa. Use set_and_canonicalize which does this for
|
|
540 us. */
|
|
541 if (cond_code == LE_EXPR)
|
145
|
542 vr_p->set (min, max, vr_p->equiv ());
|
131
|
543 else if (cond_code == GT_EXPR)
|
145
|
544 vr_p->set (min, max, vr_p->equiv (), VR_ANTI_RANGE);
|
131
|
545 else
|
|
546 gcc_unreachable ();
|
|
547 }
|
|
548 else if (cond_code == EQ_EXPR)
|
|
549 {
|
145
|
550 enum value_range_kind range_kind;
|
131
|
551
|
|
552 if (limit_vr)
|
|
553 {
|
145
|
554 range_kind = limit_vr->kind ();
|
131
|
555 min = limit_vr->min ();
|
|
556 max = limit_vr->max ();
|
|
557 }
|
|
558 else
|
|
559 {
|
145
|
560 range_kind = VR_RANGE;
|
131
|
561 min = limit;
|
|
562 max = limit;
|
|
563 }
|
|
564
|
145
|
565 vr_p->update (min, max, range_kind);
|
131
|
566
|
|
567 /* When asserting the equality VAR == LIMIT and LIMIT is another
|
|
568 SSA name, the new range will also inherit the equivalence set
|
|
569 from LIMIT. */
|
|
570 if (TREE_CODE (limit) == SSA_NAME)
|
|
571 vr_p->equiv_add (limit, get_value_range (limit), &vrp_equiv_obstack);
|
|
572 }
|
|
573 else if (cond_code == NE_EXPR)
|
|
574 {
|
|
575 /* As described above, when LIMIT's range is an anti-range and
|
|
576 this assertion is an inequality (NE_EXPR), then we cannot
|
|
577 derive anything from the anti-range. For instance, if
|
|
578 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
|
|
579 not imply that VAR's range is [0, 0]. So, in the case of
|
|
580 anti-ranges, we just assert the inequality using LIMIT and
|
|
581 not its anti-range.
|
|
582
|
|
583 If LIMIT_VR is a range, we can only use it to build a new
|
|
584 anti-range if LIMIT_VR is a single-valued range. For
|
|
585 instance, if LIMIT_VR is [0, 1], the predicate
|
|
586 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
|
|
587 Rather, it means that for value 0 VAR should be ~[0, 0]
|
|
588 and for value 1, VAR should be ~[1, 1]. We cannot
|
|
589 represent these ranges.
|
|
590
|
|
591 The only situation in which we can build a valid
|
|
592 anti-range is when LIMIT_VR is a single-valued range
|
|
593 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
|
|
594 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
|
|
595 if (limit_vr
|
|
596 && limit_vr->kind () == VR_RANGE
|
|
597 && compare_values (limit_vr->min (), limit_vr->max ()) == 0)
|
|
598 {
|
|
599 min = limit_vr->min ();
|
|
600 max = limit_vr->max ();
|
|
601 }
|
|
602 else
|
|
603 {
|
|
604 /* In any other case, we cannot use LIMIT's range to build a
|
|
605 valid anti-range. */
|
|
606 min = max = limit;
|
|
607 }
|
|
608
|
|
609 /* If MIN and MAX cover the whole range for their type, then
|
|
610 just use the original LIMIT. */
|
|
611 if (INTEGRAL_TYPE_P (type)
|
|
612 && vrp_val_is_min (min)
|
|
613 && vrp_val_is_max (max))
|
|
614 min = max = limit;
|
|
615
|
145
|
616 vr_p->set (min, max, vr_p->equiv (), VR_ANTI_RANGE);
|
131
|
617 }
|
|
618 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
|
|
619 {
|
|
620 min = TYPE_MIN_VALUE (type);
|
|
621
|
|
622 if (limit_vr == NULL || limit_vr->kind () == VR_ANTI_RANGE)
|
|
623 max = limit;
|
|
624 else
|
|
625 {
|
|
626 /* If LIMIT_VR is of the form [N1, N2], we need to build the
|
|
627 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
|
|
628 LT_EXPR. */
|
|
629 max = limit_vr->max ();
|
|
630 }
|
|
631
|
|
632 /* If the maximum value forces us to be out of bounds, simply punt.
|
|
633 It would be pointless to try and do anything more since this
|
|
634 all should be optimized away above us. */
|
|
635 if (cond_code == LT_EXPR
|
|
636 && compare_values (max, min) == 0)
|
145
|
637 vr_p->set_varying (TREE_TYPE (min));
|
131
|
638 else
|
|
639 {
|
|
640 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
|
|
641 if (cond_code == LT_EXPR)
|
|
642 {
|
|
643 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
|
|
644 && !TYPE_UNSIGNED (TREE_TYPE (max)))
|
|
645 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
|
|
646 build_int_cst (TREE_TYPE (max), -1));
|
|
647 else
|
|
648 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
|
|
649 build_int_cst (TREE_TYPE (max), 1));
|
|
650 /* Signal to compare_values_warnv this expr doesn't overflow. */
|
|
651 if (EXPR_P (max))
|
|
652 TREE_NO_WARNING (max) = 1;
|
|
653 }
|
|
654
|
145
|
655 vr_p->update (min, max);
|
131
|
656 }
|
|
657 }
|
|
658 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
|
|
659 {
|
|
660 max = TYPE_MAX_VALUE (type);
|
|
661
|
|
662 if (limit_vr == NULL || limit_vr->kind () == VR_ANTI_RANGE)
|
|
663 min = limit;
|
|
664 else
|
|
665 {
|
|
666 /* If LIMIT_VR is of the form [N1, N2], we need to build the
|
|
667 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
|
|
668 GT_EXPR. */
|
|
669 min = limit_vr->min ();
|
|
670 }
|
|
671
|
|
672 /* If the minimum value forces us to be out of bounds, simply punt.
|
|
673 It would be pointless to try and do anything more since this
|
|
674 all should be optimized away above us. */
|
|
675 if (cond_code == GT_EXPR
|
|
676 && compare_values (min, max) == 0)
|
145
|
677 vr_p->set_varying (TREE_TYPE (min));
|
131
|
678 else
|
|
679 {
|
|
680 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
|
|
681 if (cond_code == GT_EXPR)
|
|
682 {
|
|
683 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
|
|
684 && !TYPE_UNSIGNED (TREE_TYPE (min)))
|
|
685 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
|
|
686 build_int_cst (TREE_TYPE (min), -1));
|
|
687 else
|
|
688 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
|
|
689 build_int_cst (TREE_TYPE (min), 1));
|
|
690 /* Signal to compare_values_warnv this expr doesn't overflow. */
|
|
691 if (EXPR_P (min))
|
|
692 TREE_NO_WARNING (min) = 1;
|
|
693 }
|
|
694
|
145
|
695 vr_p->update (min, max);
|
131
|
696 }
|
|
697 }
|
|
698 else
|
|
699 gcc_unreachable ();
|
|
700
|
|
701 /* Finally intersect the new range with what we already know about var. */
|
|
702 vr_p->intersect (get_value_range (var));
|
|
703 }
|
|
704
|
|
705 /* Extract value range information from an ASSERT_EXPR EXPR and store
|
|
706 it in *VR_P. */
|
|
707
|
|
708 void
|
145
|
709 vr_values::extract_range_from_assert (value_range_equiv *vr_p, tree expr)
|
131
|
710 {
|
|
711 tree var = ASSERT_EXPR_VAR (expr);
|
|
712 tree cond = ASSERT_EXPR_COND (expr);
|
|
713 tree limit, op;
|
|
714 enum tree_code cond_code;
|
|
715 gcc_assert (COMPARISON_CLASS_P (cond));
|
|
716
|
|
717 /* Find VAR in the ASSERT_EXPR conditional. */
|
|
718 if (var == TREE_OPERAND (cond, 0)
|
|
719 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
|
|
720 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
|
|
721 {
|
|
722 /* If the predicate is of the form VAR COMP LIMIT, then we just
|
|
723 take LIMIT from the RHS and use the same comparison code. */
|
|
724 cond_code = TREE_CODE (cond);
|
|
725 limit = TREE_OPERAND (cond, 1);
|
|
726 op = TREE_OPERAND (cond, 0);
|
|
727 }
|
|
728 else
|
|
729 {
|
|
730 /* If the predicate is of the form LIMIT COMP VAR, then we need
|
|
731 to flip around the comparison code to create the proper range
|
|
732 for VAR. */
|
|
733 cond_code = swap_tree_comparison (TREE_CODE (cond));
|
|
734 limit = TREE_OPERAND (cond, 0);
|
|
735 op = TREE_OPERAND (cond, 1);
|
|
736 }
|
|
737 extract_range_for_var_from_comparison_expr (var, cond_code, op,
|
|
738 limit, vr_p);
|
|
739 }
|
|
740
|
|
741 /* Extract range information from SSA name VAR and store it in VR. If
|
|
742 VAR has an interesting range, use it. Otherwise, create the
|
|
743 range [VAR, VAR] and return it. This is useful in situations where
|
|
744 we may have conditionals testing values of VARYING names. For
|
|
745 instance,
|
|
746
|
|
747 x_3 = y_5;
|
|
748 if (x_3 > y_5)
|
|
749 ...
|
|
750
|
|
751 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
|
|
752 always false. */
|
|
753
|
|
754 void
|
145
|
755 vr_values::extract_range_from_ssa_name (value_range_equiv *vr, tree var)
|
131
|
756 {
|
145
|
757 const value_range_equiv *var_vr = get_value_range (var);
|
131
|
758
|
|
759 if (!var_vr->varying_p ())
|
|
760 vr->deep_copy (var_vr);
|
|
761 else
|
145
|
762 vr->set (var);
|
|
763
|
|
764 if (!vr->undefined_p ())
|
|
765 vr->equiv_add (var, get_value_range (var), &vrp_equiv_obstack);
|
131
|
766 }
|
|
767
|
|
768 /* Extract range information from a binary expression OP0 CODE OP1 based on
|
|
769 the ranges of each of its operands with resulting type EXPR_TYPE.
|
|
770 The resulting range is stored in *VR. */
|
|
771
|
|
772 void
|
145
|
773 vr_values::extract_range_from_binary_expr (value_range_equiv *vr,
|
131
|
774 enum tree_code code,
|
|
775 tree expr_type, tree op0, tree op1)
|
|
776 {
|
|
777 /* Get value ranges for each operand. For constant operands, create
|
|
778 a new value range with the operand to simplify processing. */
|
|
779 value_range vr0, vr1;
|
|
780 if (TREE_CODE (op0) == SSA_NAME)
|
|
781 vr0 = *(get_value_range (op0));
|
|
782 else if (is_gimple_min_invariant (op0))
|
145
|
783 vr0.set (op0);
|
131
|
784 else
|
145
|
785 vr0.set_varying (TREE_TYPE (op0));
|
131
|
786
|
|
787 if (TREE_CODE (op1) == SSA_NAME)
|
|
788 vr1 = *(get_value_range (op1));
|
|
789 else if (is_gimple_min_invariant (op1))
|
145
|
790 vr1.set (op1);
|
131
|
791 else
|
145
|
792 vr1.set_varying (TREE_TYPE (op1));
|
131
|
793
|
|
794 /* If one argument is varying, we can sometimes still deduce a
|
|
795 range for the output: any + [3, +INF] is in [MIN+3, +INF]. */
|
|
796 if (INTEGRAL_TYPE_P (TREE_TYPE (op0))
|
|
797 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
|
|
798 {
|
|
799 if (vr0.varying_p () && !vr1.varying_p ())
|
145
|
800 vr0 = value_range (vrp_val_min (expr_type), vrp_val_max (expr_type));
|
131
|
801 else if (vr1.varying_p () && !vr0.varying_p ())
|
145
|
802 vr1 = value_range (vrp_val_min (expr_type), vrp_val_max (expr_type));
|
131
|
803 }
|
|
804
|
145
|
805 range_fold_binary_expr (vr, code, expr_type, &vr0, &vr1);
|
131
|
806
|
|
807 /* Set value_range for n in following sequence:
|
|
808 def = __builtin_memchr (arg, 0, sz)
|
|
809 n = def - arg
|
|
810 Here the range for n can be set to [0, PTRDIFF_MAX - 1]. */
|
|
811
|
|
812 if (vr->varying_p ()
|
|
813 && code == POINTER_DIFF_EXPR
|
|
814 && TREE_CODE (op0) == SSA_NAME
|
|
815 && TREE_CODE (op1) == SSA_NAME)
|
|
816 {
|
|
817 tree op0_ptype = TREE_TYPE (TREE_TYPE (op0));
|
|
818 tree op1_ptype = TREE_TYPE (TREE_TYPE (op1));
|
|
819 gcall *call_stmt = NULL;
|
|
820
|
|
821 if (TYPE_MODE (op0_ptype) == TYPE_MODE (char_type_node)
|
|
822 && TYPE_PRECISION (op0_ptype) == TYPE_PRECISION (char_type_node)
|
|
823 && TYPE_MODE (op1_ptype) == TYPE_MODE (char_type_node)
|
|
824 && TYPE_PRECISION (op1_ptype) == TYPE_PRECISION (char_type_node)
|
|
825 && (call_stmt = dyn_cast<gcall *>(SSA_NAME_DEF_STMT (op0)))
|
|
826 && gimple_call_builtin_p (call_stmt, BUILT_IN_MEMCHR)
|
|
827 && operand_equal_p (op0, gimple_call_lhs (call_stmt), 0)
|
|
828 && operand_equal_p (op1, gimple_call_arg (call_stmt, 0), 0)
|
|
829 && integer_zerop (gimple_call_arg (call_stmt, 1)))
|
|
830 {
|
|
831 tree max = vrp_val_max (ptrdiff_type_node);
|
|
832 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
|
|
833 tree range_min = build_zero_cst (expr_type);
|
|
834 tree range_max = wide_int_to_tree (expr_type, wmax - 1);
|
145
|
835 vr->set (range_min, range_max);
|
131
|
836 return;
|
|
837 }
|
|
838 }
|
|
839
|
|
840 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
|
|
841 and based on the other operand, for example if it was deduced from a
|
|
842 symbolic comparison. When a bound of the range of the first operand
|
|
843 is invariant, we set the corresponding bound of the new range to INF
|
|
844 in order to avoid recursing on the range of the second operand. */
|
|
845 if (vr->varying_p ()
|
|
846 && (code == PLUS_EXPR || code == MINUS_EXPR)
|
|
847 && TREE_CODE (op1) == SSA_NAME
|
|
848 && vr0.kind () == VR_RANGE
|
|
849 && symbolic_range_based_on_p (&vr0, op1))
|
|
850 {
|
|
851 const bool minus_p = (code == MINUS_EXPR);
|
|
852 value_range n_vr1;
|
|
853
|
|
854 /* Try with VR0 and [-INF, OP1]. */
|
|
855 if (is_gimple_min_invariant (minus_p ? vr0.max () : vr0.min ()))
|
145
|
856 n_vr1.set (vrp_val_min (expr_type), op1);
|
131
|
857
|
|
858 /* Try with VR0 and [OP1, +INF]. */
|
|
859 else if (is_gimple_min_invariant (minus_p ? vr0.min () : vr0.max ()))
|
145
|
860 n_vr1.set (op1, vrp_val_max (expr_type));
|
131
|
861
|
|
862 /* Try with VR0 and [OP1, OP1]. */
|
|
863 else
|
145
|
864 n_vr1.set (op1, op1);
|
|
865
|
|
866 range_fold_binary_expr (vr, code, expr_type, &vr0, &n_vr1);
|
131
|
867 }
|
|
868
|
|
869 if (vr->varying_p ()
|
|
870 && (code == PLUS_EXPR || code == MINUS_EXPR)
|
|
871 && TREE_CODE (op0) == SSA_NAME
|
|
872 && vr1.kind () == VR_RANGE
|
|
873 && symbolic_range_based_on_p (&vr1, op0))
|
|
874 {
|
|
875 const bool minus_p = (code == MINUS_EXPR);
|
|
876 value_range n_vr0;
|
|
877
|
|
878 /* Try with [-INF, OP0] and VR1. */
|
|
879 if (is_gimple_min_invariant (minus_p ? vr1.max () : vr1.min ()))
|
145
|
880 n_vr0.set (vrp_val_min (expr_type), op0);
|
131
|
881
|
|
882 /* Try with [OP0, +INF] and VR1. */
|
|
883 else if (is_gimple_min_invariant (minus_p ? vr1.min (): vr1.max ()))
|
145
|
884 n_vr0.set (op0, vrp_val_max (expr_type));
|
131
|
885
|
|
886 /* Try with [OP0, OP0] and VR1. */
|
|
887 else
|
145
|
888 n_vr0.set (op0);
|
|
889
|
|
890 range_fold_binary_expr (vr, code, expr_type, &n_vr0, &vr1);
|
131
|
891 }
|
|
892
|
|
893 /* If we didn't derive a range for MINUS_EXPR, and
|
|
894 op1's range is ~[op0,op0] or vice-versa, then we
|
|
895 can derive a non-null range. This happens often for
|
|
896 pointer subtraction. */
|
|
897 if (vr->varying_p ()
|
|
898 && (code == MINUS_EXPR || code == POINTER_DIFF_EXPR)
|
|
899 && TREE_CODE (op0) == SSA_NAME
|
|
900 && ((vr0.kind () == VR_ANTI_RANGE
|
|
901 && vr0.min () == op1
|
|
902 && vr0.min () == vr0.max ())
|
|
903 || (vr1.kind () == VR_ANTI_RANGE
|
|
904 && vr1.min () == op0
|
|
905 && vr1.min () == vr1.max ())))
|
145
|
906 {
|
|
907 vr->set_nonzero (expr_type);
|
|
908 vr->equiv_clear ();
|
|
909 }
|
131
|
910 }
|
|
911
|
|
912 /* Extract range information from a unary expression CODE OP0 based on
|
|
913 the range of its operand with resulting type TYPE.
|
|
914 The resulting range is stored in *VR. */
|
|
915
|
|
916 void
|
145
|
917 vr_values::extract_range_from_unary_expr (value_range_equiv *vr,
|
|
918 enum tree_code code,
|
131
|
919 tree type, tree op0)
|
|
920 {
|
|
921 value_range vr0;
|
|
922
|
|
923 /* Get value ranges for the operand. For constant operands, create
|
|
924 a new value range with the operand to simplify processing. */
|
|
925 if (TREE_CODE (op0) == SSA_NAME)
|
|
926 vr0 = *(get_value_range (op0));
|
|
927 else if (is_gimple_min_invariant (op0))
|
145
|
928 vr0.set (op0);
|
131
|
929 else
|
145
|
930 vr0.set_varying (type);
|
|
931
|
|
932 range_fold_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
|
131
|
933 }
|
|
934
|
|
935
|
|
936 /* Extract range information from a conditional expression STMT based on
|
|
937 the ranges of each of its operands and the expression code. */
|
|
938
|
|
939 void
|
145
|
940 vr_values::extract_range_from_cond_expr (value_range_equiv *vr, gassign *stmt)
|
131
|
941 {
|
|
942 /* Get value ranges for each operand. For constant operands, create
|
|
943 a new value range with the operand to simplify processing. */
|
|
944 tree op0 = gimple_assign_rhs2 (stmt);
|
145
|
945 value_range_equiv tem0;
|
|
946 const value_range_equiv *vr0 = &tem0;
|
131
|
947 if (TREE_CODE (op0) == SSA_NAME)
|
145
|
948 vr0 = get_value_range (op0);
|
131
|
949 else if (is_gimple_min_invariant (op0))
|
145
|
950 tem0.set (op0);
|
131
|
951 else
|
145
|
952 tem0.set_varying (TREE_TYPE (op0));
|
131
|
953
|
|
954 tree op1 = gimple_assign_rhs3 (stmt);
|
145
|
955 value_range_equiv tem1;
|
|
956 const value_range_equiv *vr1 = &tem1;
|
131
|
957 if (TREE_CODE (op1) == SSA_NAME)
|
145
|
958 vr1 = get_value_range (op1);
|
131
|
959 else if (is_gimple_min_invariant (op1))
|
145
|
960 tem1.set (op1);
|
131
|
961 else
|
145
|
962 tem1.set_varying (TREE_TYPE (op1));
|
131
|
963
|
|
964 /* The resulting value range is the union of the operand ranges */
|
145
|
965 vr->deep_copy (vr0);
|
|
966 vr->union_ (vr1);
|
131
|
967 }
|
|
968
|
|
969
|
|
970 /* Extract range information from a comparison expression EXPR based
|
|
971 on the range of its operand and the expression code. */
|
|
972
|
|
973 void
|
145
|
974 vr_values::extract_range_from_comparison (value_range_equiv *vr,
|
|
975 enum tree_code code,
|
131
|
976 tree type, tree op0, tree op1)
|
|
977 {
|
|
978 bool sop;
|
|
979 tree val;
|
|
980
|
|
981 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
|
|
982 NULL);
|
|
983 if (val)
|
|
984 {
|
|
985 /* Since this expression was found on the RHS of an assignment,
|
|
986 its type may be different from _Bool. Convert VAL to EXPR's
|
|
987 type. */
|
|
988 val = fold_convert (type, val);
|
|
989 if (is_gimple_min_invariant (val))
|
145
|
990 vr->set (val);
|
131
|
991 else
|
145
|
992 vr->update (val, val);
|
131
|
993 }
|
|
994 else
|
|
995 /* The result of a comparison is always true or false. */
|
|
996 set_value_range_to_truthvalue (vr, type);
|
|
997 }
|
|
998
|
|
999 /* Helper function for simplify_internal_call_using_ranges and
|
|
1000 extract_range_basic. Return true if OP0 SUBCODE OP1 for
|
|
1001 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
|
|
1002 always overflow. Set *OVF to true if it is known to always
|
|
1003 overflow. */
|
|
1004
|
|
1005 bool
|
|
1006 vr_values::check_for_binary_op_overflow (enum tree_code subcode, tree type,
|
|
1007 tree op0, tree op1, bool *ovf)
|
|
1008 {
|
|
1009 value_range vr0, vr1;
|
|
1010 if (TREE_CODE (op0) == SSA_NAME)
|
|
1011 vr0 = *get_value_range (op0);
|
|
1012 else if (TREE_CODE (op0) == INTEGER_CST)
|
145
|
1013 vr0.set (op0);
|
131
|
1014 else
|
145
|
1015 vr0.set_varying (TREE_TYPE (op0));
|
131
|
1016
|
|
1017 if (TREE_CODE (op1) == SSA_NAME)
|
|
1018 vr1 = *get_value_range (op1);
|
|
1019 else if (TREE_CODE (op1) == INTEGER_CST)
|
145
|
1020 vr1.set (op1);
|
131
|
1021 else
|
145
|
1022 vr1.set_varying (TREE_TYPE (op1));
|
131
|
1023
|
|
1024 tree vr0min = vr0.min (), vr0max = vr0.max ();
|
|
1025 tree vr1min = vr1.min (), vr1max = vr1.max ();
|
|
1026 if (!range_int_cst_p (&vr0)
|
|
1027 || TREE_OVERFLOW (vr0min)
|
|
1028 || TREE_OVERFLOW (vr0max))
|
|
1029 {
|
|
1030 vr0min = vrp_val_min (TREE_TYPE (op0));
|
|
1031 vr0max = vrp_val_max (TREE_TYPE (op0));
|
|
1032 }
|
|
1033 if (!range_int_cst_p (&vr1)
|
|
1034 || TREE_OVERFLOW (vr1min)
|
|
1035 || TREE_OVERFLOW (vr1max))
|
|
1036 {
|
|
1037 vr1min = vrp_val_min (TREE_TYPE (op1));
|
|
1038 vr1max = vrp_val_max (TREE_TYPE (op1));
|
|
1039 }
|
|
1040 *ovf = arith_overflowed_p (subcode, type, vr0min,
|
|
1041 subcode == MINUS_EXPR ? vr1max : vr1min);
|
|
1042 if (arith_overflowed_p (subcode, type, vr0max,
|
|
1043 subcode == MINUS_EXPR ? vr1min : vr1max) != *ovf)
|
|
1044 return false;
|
|
1045 if (subcode == MULT_EXPR)
|
|
1046 {
|
|
1047 if (arith_overflowed_p (subcode, type, vr0min, vr1max) != *ovf
|
|
1048 || arith_overflowed_p (subcode, type, vr0max, vr1min) != *ovf)
|
|
1049 return false;
|
|
1050 }
|
|
1051 if (*ovf)
|
|
1052 {
|
|
1053 /* So far we found that there is an overflow on the boundaries.
|
|
1054 That doesn't prove that there is an overflow even for all values
|
|
1055 in between the boundaries. For that compute widest_int range
|
|
1056 of the result and see if it doesn't overlap the range of
|
|
1057 type. */
|
|
1058 widest_int wmin, wmax;
|
|
1059 widest_int w[4];
|
|
1060 int i;
|
|
1061 w[0] = wi::to_widest (vr0min);
|
|
1062 w[1] = wi::to_widest (vr0max);
|
|
1063 w[2] = wi::to_widest (vr1min);
|
|
1064 w[3] = wi::to_widest (vr1max);
|
|
1065 for (i = 0; i < 4; i++)
|
|
1066 {
|
|
1067 widest_int wt;
|
|
1068 switch (subcode)
|
|
1069 {
|
|
1070 case PLUS_EXPR:
|
|
1071 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
|
|
1072 break;
|
|
1073 case MINUS_EXPR:
|
|
1074 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
|
|
1075 break;
|
|
1076 case MULT_EXPR:
|
|
1077 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
|
|
1078 break;
|
|
1079 default:
|
|
1080 gcc_unreachable ();
|
|
1081 }
|
|
1082 if (i == 0)
|
|
1083 {
|
|
1084 wmin = wt;
|
|
1085 wmax = wt;
|
|
1086 }
|
|
1087 else
|
|
1088 {
|
|
1089 wmin = wi::smin (wmin, wt);
|
|
1090 wmax = wi::smax (wmax, wt);
|
|
1091 }
|
|
1092 }
|
|
1093 /* The result of op0 CODE op1 is known to be in range
|
|
1094 [wmin, wmax]. */
|
|
1095 widest_int wtmin = wi::to_widest (vrp_val_min (type));
|
|
1096 widest_int wtmax = wi::to_widest (vrp_val_max (type));
|
|
1097 /* If all values in [wmin, wmax] are smaller than
|
|
1098 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
|
|
1099 the arithmetic operation will always overflow. */
|
|
1100 if (wmax < wtmin || wmin > wtmax)
|
|
1101 return true;
|
|
1102 return false;
|
|
1103 }
|
|
1104 return true;
|
|
1105 }
|
|
1106
|
|
1107 /* Try to derive a nonnegative or nonzero range out of STMT relying
|
|
1108 primarily on generic routines in fold in conjunction with range data.
|
|
1109 Store the result in *VR */
|
|
1110
|
|
1111 void
|
145
|
1112 vr_values::extract_range_basic (value_range_equiv *vr, gimple *stmt)
|
131
|
1113 {
|
|
1114 bool sop;
|
|
1115 tree type = gimple_expr_type (stmt);
|
|
1116
|
|
1117 if (is_gimple_call (stmt))
|
|
1118 {
|
|
1119 tree arg;
|
|
1120 int mini, maxi, zerov = 0, prec;
|
|
1121 enum tree_code subcode = ERROR_MARK;
|
|
1122 combined_fn cfn = gimple_call_combined_fn (stmt);
|
|
1123 scalar_int_mode mode;
|
|
1124
|
|
1125 switch (cfn)
|
|
1126 {
|
|
1127 case CFN_BUILT_IN_CONSTANT_P:
|
145
|
1128 /* Resolve calls to __builtin_constant_p after inlining. */
|
|
1129 if (cfun->after_inlining)
|
131
|
1130 {
|
145
|
1131 vr->set_zero (type);
|
|
1132 vr->equiv_clear ();
|
131
|
1133 return;
|
|
1134 }
|
|
1135 break;
|
|
1136 /* Both __builtin_ffs* and __builtin_popcount return
|
|
1137 [0, prec]. */
|
|
1138 CASE_CFN_FFS:
|
|
1139 CASE_CFN_POPCOUNT:
|
|
1140 arg = gimple_call_arg (stmt, 0);
|
|
1141 prec = TYPE_PRECISION (TREE_TYPE (arg));
|
|
1142 mini = 0;
|
|
1143 maxi = prec;
|
|
1144 if (TREE_CODE (arg) == SSA_NAME)
|
|
1145 {
|
145
|
1146 const value_range_equiv *vr0 = get_value_range (arg);
|
131
|
1147 /* If arg is non-zero, then ffs or popcount are non-zero. */
|
|
1148 if (range_includes_zero_p (vr0) == 0)
|
|
1149 mini = 1;
|
|
1150 /* If some high bits are known to be zero,
|
|
1151 we can decrease the maximum. */
|
|
1152 if (vr0->kind () == VR_RANGE
|
|
1153 && TREE_CODE (vr0->max ()) == INTEGER_CST
|
|
1154 && !operand_less_p (vr0->min (),
|
|
1155 build_zero_cst (TREE_TYPE (vr0->min ()))))
|
|
1156 maxi = tree_floor_log2 (vr0->max ()) + 1;
|
|
1157 }
|
|
1158 goto bitop_builtin;
|
|
1159 /* __builtin_parity* returns [0, 1]. */
|
|
1160 CASE_CFN_PARITY:
|
|
1161 mini = 0;
|
|
1162 maxi = 1;
|
|
1163 goto bitop_builtin;
|
|
1164 /* __builtin_c[lt]z* return [0, prec-1], except for
|
|
1165 when the argument is 0, but that is undefined behavior.
|
|
1166 On many targets where the CLZ RTL or optab value is defined
|
|
1167 for 0 the value is prec, so include that in the range
|
|
1168 by default. */
|
|
1169 CASE_CFN_CLZ:
|
|
1170 arg = gimple_call_arg (stmt, 0);
|
|
1171 prec = TYPE_PRECISION (TREE_TYPE (arg));
|
|
1172 mini = 0;
|
|
1173 maxi = prec;
|
|
1174 mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
|
|
1175 if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
|
|
1176 && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
|
|
1177 /* Handle only the single common value. */
|
|
1178 && zerov != prec)
|
|
1179 /* Magic value to give up, unless vr0 proves
|
|
1180 arg is non-zero. */
|
|
1181 mini = -2;
|
|
1182 if (TREE_CODE (arg) == SSA_NAME)
|
|
1183 {
|
145
|
1184 const value_range_equiv *vr0 = get_value_range (arg);
|
131
|
1185 /* From clz of VR_RANGE minimum we can compute
|
|
1186 result maximum. */
|
|
1187 if (vr0->kind () == VR_RANGE
|
|
1188 && TREE_CODE (vr0->min ()) == INTEGER_CST)
|
|
1189 {
|
|
1190 maxi = prec - 1 - tree_floor_log2 (vr0->min ());
|
|
1191 if (maxi != prec)
|
|
1192 mini = 0;
|
|
1193 }
|
|
1194 else if (vr0->kind () == VR_ANTI_RANGE
|
|
1195 && integer_zerop (vr0->min ()))
|
|
1196 {
|
|
1197 maxi = prec - 1;
|
|
1198 mini = 0;
|
|
1199 }
|
|
1200 if (mini == -2)
|
|
1201 break;
|
|
1202 /* From clz of VR_RANGE maximum we can compute
|
|
1203 result minimum. */
|
|
1204 if (vr0->kind () == VR_RANGE
|
|
1205 && TREE_CODE (vr0->max ()) == INTEGER_CST)
|
|
1206 {
|
|
1207 mini = prec - 1 - tree_floor_log2 (vr0->max ());
|
|
1208 if (mini == prec)
|
|
1209 break;
|
|
1210 }
|
|
1211 }
|
|
1212 if (mini == -2)
|
|
1213 break;
|
|
1214 goto bitop_builtin;
|
|
1215 /* __builtin_ctz* return [0, prec-1], except for
|
|
1216 when the argument is 0, but that is undefined behavior.
|
|
1217 If there is a ctz optab for this mode and
|
|
1218 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
|
|
1219 otherwise just assume 0 won't be seen. */
|
|
1220 CASE_CFN_CTZ:
|
|
1221 arg = gimple_call_arg (stmt, 0);
|
|
1222 prec = TYPE_PRECISION (TREE_TYPE (arg));
|
|
1223 mini = 0;
|
|
1224 maxi = prec - 1;
|
|
1225 mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
|
|
1226 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
|
|
1227 && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
|
|
1228 {
|
|
1229 /* Handle only the two common values. */
|
|
1230 if (zerov == -1)
|
|
1231 mini = -1;
|
|
1232 else if (zerov == prec)
|
|
1233 maxi = prec;
|
|
1234 else
|
|
1235 /* Magic value to give up, unless vr0 proves
|
|
1236 arg is non-zero. */
|
|
1237 mini = -2;
|
|
1238 }
|
|
1239 if (TREE_CODE (arg) == SSA_NAME)
|
|
1240 {
|
145
|
1241 const value_range_equiv *vr0 = get_value_range (arg);
|
131
|
1242 /* If arg is non-zero, then use [0, prec - 1]. */
|
|
1243 if ((vr0->kind () == VR_RANGE
|
|
1244 && integer_nonzerop (vr0->min ()))
|
|
1245 || (vr0->kind () == VR_ANTI_RANGE
|
|
1246 && integer_zerop (vr0->min ())))
|
|
1247 {
|
|
1248 mini = 0;
|
|
1249 maxi = prec - 1;
|
|
1250 }
|
|
1251 /* If some high bits are known to be zero,
|
|
1252 we can decrease the result maximum. */
|
|
1253 if (vr0->kind () == VR_RANGE
|
|
1254 && TREE_CODE (vr0->max ()) == INTEGER_CST)
|
|
1255 {
|
|
1256 maxi = tree_floor_log2 (vr0->max ());
|
|
1257 /* For vr0 [0, 0] give up. */
|
|
1258 if (maxi == -1)
|
|
1259 break;
|
|
1260 }
|
|
1261 }
|
|
1262 if (mini == -2)
|
|
1263 break;
|
|
1264 goto bitop_builtin;
|
|
1265 /* __builtin_clrsb* returns [0, prec-1]. */
|
|
1266 CASE_CFN_CLRSB:
|
|
1267 arg = gimple_call_arg (stmt, 0);
|
|
1268 prec = TYPE_PRECISION (TREE_TYPE (arg));
|
|
1269 mini = 0;
|
|
1270 maxi = prec - 1;
|
|
1271 goto bitop_builtin;
|
|
1272 bitop_builtin:
|
145
|
1273 vr->set (build_int_cst (type, mini), build_int_cst (type, maxi));
|
131
|
1274 return;
|
|
1275 case CFN_UBSAN_CHECK_ADD:
|
|
1276 subcode = PLUS_EXPR;
|
|
1277 break;
|
|
1278 case CFN_UBSAN_CHECK_SUB:
|
|
1279 subcode = MINUS_EXPR;
|
|
1280 break;
|
|
1281 case CFN_UBSAN_CHECK_MUL:
|
|
1282 subcode = MULT_EXPR;
|
|
1283 break;
|
|
1284 case CFN_GOACC_DIM_SIZE:
|
|
1285 case CFN_GOACC_DIM_POS:
|
|
1286 /* Optimizing these two internal functions helps the loop
|
|
1287 optimizer eliminate outer comparisons. Size is [1,N]
|
|
1288 and pos is [0,N-1]. */
|
|
1289 {
|
|
1290 bool is_pos = cfn == CFN_GOACC_DIM_POS;
|
|
1291 int axis = oacc_get_ifn_dim_arg (stmt);
|
|
1292 int size = oacc_get_fn_dim_size (current_function_decl, axis);
|
|
1293
|
|
1294 if (!size)
|
|
1295 /* If it's dynamic, the backend might know a hardware
|
|
1296 limitation. */
|
|
1297 size = targetm.goacc.dim_limit (axis);
|
|
1298
|
|
1299 tree type = TREE_TYPE (gimple_call_lhs (stmt));
|
145
|
1300 vr->set(build_int_cst (type, is_pos ? 0 : 1),
|
|
1301 size
|
|
1302 ? build_int_cst (type, size - is_pos) : vrp_val_max (type));
|
131
|
1303 }
|
|
1304 return;
|
|
1305 case CFN_BUILT_IN_STRLEN:
|
|
1306 if (tree lhs = gimple_call_lhs (stmt))
|
|
1307 if (ptrdiff_type_node
|
|
1308 && (TYPE_PRECISION (ptrdiff_type_node)
|
|
1309 == TYPE_PRECISION (TREE_TYPE (lhs))))
|
|
1310 {
|
|
1311 tree type = TREE_TYPE (lhs);
|
|
1312 tree max = vrp_val_max (ptrdiff_type_node);
|
|
1313 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
|
|
1314 tree range_min = build_zero_cst (type);
|
145
|
1315 /* To account for the terminating NUL, the maximum length
|
|
1316 is one less than the maximum array size, which in turn
|
|
1317 is one less than PTRDIFF_MAX (or SIZE_MAX where it's
|
|
1318 smaller than the former type).
|
|
1319 FIXME: Use max_object_size() - 1 here. */
|
|
1320 tree range_max = wide_int_to_tree (type, wmax - 2);
|
|
1321 vr->set (range_min, range_max);
|
131
|
1322 return;
|
|
1323 }
|
|
1324 break;
|
|
1325 default:
|
|
1326 break;
|
|
1327 }
|
|
1328 if (subcode != ERROR_MARK)
|
|
1329 {
|
|
1330 bool saved_flag_wrapv = flag_wrapv;
|
|
1331 /* Pretend the arithmetics is wrapping. If there is
|
|
1332 any overflow, we'll complain, but will actually do
|
|
1333 wrapping operation. */
|
|
1334 flag_wrapv = 1;
|
|
1335 extract_range_from_binary_expr (vr, subcode, type,
|
|
1336 gimple_call_arg (stmt, 0),
|
|
1337 gimple_call_arg (stmt, 1));
|
|
1338 flag_wrapv = saved_flag_wrapv;
|
|
1339
|
|
1340 /* If for both arguments vrp_valueize returned non-NULL,
|
|
1341 this should have been already folded and if not, it
|
|
1342 wasn't folded because of overflow. Avoid removing the
|
|
1343 UBSAN_CHECK_* calls in that case. */
|
|
1344 if (vr->kind () == VR_RANGE
|
|
1345 && (vr->min () == vr->max ()
|
|
1346 || operand_equal_p (vr->min (), vr->max (), 0)))
|
145
|
1347 vr->set_varying (vr->type ());
|
131
|
1348 return;
|
|
1349 }
|
|
1350 }
|
|
1351 /* Handle extraction of the two results (result of arithmetics and
|
|
1352 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
|
|
1353 internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
|
|
1354 else if (is_gimple_assign (stmt)
|
|
1355 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
|
|
1356 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
|
|
1357 && INTEGRAL_TYPE_P (type))
|
|
1358 {
|
|
1359 enum tree_code code = gimple_assign_rhs_code (stmt);
|
|
1360 tree op = gimple_assign_rhs1 (stmt);
|
|
1361 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
|
|
1362 {
|
|
1363 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
|
|
1364 if (is_gimple_call (g) && gimple_call_internal_p (g))
|
|
1365 {
|
|
1366 enum tree_code subcode = ERROR_MARK;
|
|
1367 switch (gimple_call_internal_fn (g))
|
|
1368 {
|
|
1369 case IFN_ADD_OVERFLOW:
|
|
1370 subcode = PLUS_EXPR;
|
|
1371 break;
|
|
1372 case IFN_SUB_OVERFLOW:
|
|
1373 subcode = MINUS_EXPR;
|
|
1374 break;
|
|
1375 case IFN_MUL_OVERFLOW:
|
|
1376 subcode = MULT_EXPR;
|
|
1377 break;
|
|
1378 case IFN_ATOMIC_COMPARE_EXCHANGE:
|
|
1379 if (code == IMAGPART_EXPR)
|
|
1380 {
|
|
1381 /* This is the boolean return value whether compare and
|
|
1382 exchange changed anything or not. */
|
145
|
1383 vr->set (build_int_cst (type, 0),
|
|
1384 build_int_cst (type, 1));
|
131
|
1385 return;
|
|
1386 }
|
|
1387 break;
|
|
1388 default:
|
|
1389 break;
|
|
1390 }
|
|
1391 if (subcode != ERROR_MARK)
|
|
1392 {
|
|
1393 tree op0 = gimple_call_arg (g, 0);
|
|
1394 tree op1 = gimple_call_arg (g, 1);
|
|
1395 if (code == IMAGPART_EXPR)
|
|
1396 {
|
|
1397 bool ovf = false;
|
|
1398 if (check_for_binary_op_overflow (subcode, type,
|
|
1399 op0, op1, &ovf))
|
145
|
1400 vr->set (build_int_cst (type, ovf));
|
131
|
1401 else if (TYPE_PRECISION (type) == 1
|
|
1402 && !TYPE_UNSIGNED (type))
|
145
|
1403 vr->set_varying (type);
|
131
|
1404 else
|
145
|
1405 vr->set (build_int_cst (type, 0),
|
|
1406 build_int_cst (type, 1));
|
131
|
1407 }
|
|
1408 else if (types_compatible_p (type, TREE_TYPE (op0))
|
|
1409 && types_compatible_p (type, TREE_TYPE (op1)))
|
|
1410 {
|
|
1411 bool saved_flag_wrapv = flag_wrapv;
|
|
1412 /* Pretend the arithmetics is wrapping. If there is
|
|
1413 any overflow, IMAGPART_EXPR will be set. */
|
|
1414 flag_wrapv = 1;
|
|
1415 extract_range_from_binary_expr (vr, subcode, type,
|
|
1416 op0, op1);
|
|
1417 flag_wrapv = saved_flag_wrapv;
|
|
1418 }
|
|
1419 else
|
|
1420 {
|
145
|
1421 value_range_equiv vr0, vr1;
|
131
|
1422 bool saved_flag_wrapv = flag_wrapv;
|
|
1423 /* Pretend the arithmetics is wrapping. If there is
|
|
1424 any overflow, IMAGPART_EXPR will be set. */
|
|
1425 flag_wrapv = 1;
|
|
1426 extract_range_from_unary_expr (&vr0, NOP_EXPR,
|
|
1427 type, op0);
|
|
1428 extract_range_from_unary_expr (&vr1, NOP_EXPR,
|
|
1429 type, op1);
|
145
|
1430 range_fold_binary_expr (vr, subcode, type, &vr0, &vr1);
|
131
|
1431 flag_wrapv = saved_flag_wrapv;
|
|
1432 }
|
|
1433 return;
|
|
1434 }
|
|
1435 }
|
|
1436 }
|
|
1437 }
|
|
1438 if (INTEGRAL_TYPE_P (type)
|
|
1439 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
|
|
1440 set_value_range_to_nonnegative (vr, type);
|
|
1441 else if (vrp_stmt_computes_nonzero (stmt))
|
145
|
1442 {
|
|
1443 vr->set_nonzero (type);
|
|
1444 vr->equiv_clear ();
|
|
1445 }
|
131
|
1446 else
|
145
|
1447 vr->set_varying (type);
|
131
|
1448 }
|
|
1449
|
|
1450
|
|
1451 /* Try to compute a useful range out of assignment STMT and store it
|
|
1452 in *VR. */
|
|
1453
|
|
1454 void
|
145
|
1455 vr_values::extract_range_from_assignment (value_range_equiv *vr, gassign *stmt)
|
131
|
1456 {
|
|
1457 enum tree_code code = gimple_assign_rhs_code (stmt);
|
|
1458
|
|
1459 if (code == ASSERT_EXPR)
|
|
1460 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
|
|
1461 else if (code == SSA_NAME)
|
|
1462 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
|
|
1463 else if (TREE_CODE_CLASS (code) == tcc_binary)
|
|
1464 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
|
|
1465 gimple_expr_type (stmt),
|
|
1466 gimple_assign_rhs1 (stmt),
|
|
1467 gimple_assign_rhs2 (stmt));
|
|
1468 else if (TREE_CODE_CLASS (code) == tcc_unary)
|
|
1469 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
|
|
1470 gimple_expr_type (stmt),
|
|
1471 gimple_assign_rhs1 (stmt));
|
|
1472 else if (code == COND_EXPR)
|
|
1473 extract_range_from_cond_expr (vr, stmt);
|
|
1474 else if (TREE_CODE_CLASS (code) == tcc_comparison)
|
|
1475 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
|
|
1476 gimple_expr_type (stmt),
|
|
1477 gimple_assign_rhs1 (stmt),
|
|
1478 gimple_assign_rhs2 (stmt));
|
|
1479 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
|
|
1480 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
|
145
|
1481 vr->set (gimple_assign_rhs1 (stmt));
|
131
|
1482 else
|
145
|
1483 vr->set_varying (TREE_TYPE (gimple_assign_lhs (stmt)));
|
131
|
1484
|
|
1485 if (vr->varying_p ())
|
|
1486 extract_range_basic (vr, stmt);
|
|
1487 }
|
|
1488
|
|
1489 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
|
|
1490
|
|
1491 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
|
|
1492 all the values in the ranges.
|
|
1493
|
|
1494 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
|
|
1495
|
|
1496 - Return NULL_TREE if it is not always possible to determine the
|
|
1497 value of the comparison.
|
|
1498
|
|
1499 Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
|
|
1500 assumed signed overflow is undefined. */
|
|
1501
|
|
1502
|
|
1503 static tree
|
145
|
1504 compare_ranges (enum tree_code comp, const value_range_equiv *vr0,
|
|
1505 const value_range_equiv *vr1, bool *strict_overflow_p)
|
131
|
1506 {
|
|
1507 /* VARYING or UNDEFINED ranges cannot be compared. */
|
|
1508 if (vr0->varying_p ()
|
|
1509 || vr0->undefined_p ()
|
|
1510 || vr1->varying_p ()
|
|
1511 || vr1->undefined_p ())
|
|
1512 return NULL_TREE;
|
|
1513
|
|
1514 /* Anti-ranges need to be handled separately. */
|
|
1515 if (vr0->kind () == VR_ANTI_RANGE || vr1->kind () == VR_ANTI_RANGE)
|
|
1516 {
|
|
1517 /* If both are anti-ranges, then we cannot compute any
|
|
1518 comparison. */
|
|
1519 if (vr0->kind () == VR_ANTI_RANGE && vr1->kind () == VR_ANTI_RANGE)
|
|
1520 return NULL_TREE;
|
|
1521
|
|
1522 /* These comparisons are never statically computable. */
|
|
1523 if (comp == GT_EXPR
|
|
1524 || comp == GE_EXPR
|
|
1525 || comp == LT_EXPR
|
|
1526 || comp == LE_EXPR)
|
|
1527 return NULL_TREE;
|
|
1528
|
|
1529 /* Equality can be computed only between a range and an
|
|
1530 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
|
|
1531 if (vr0->kind () == VR_RANGE)
|
145
|
1532 /* To simplify processing, make VR0 the anti-range. */
|
|
1533 std::swap (vr0, vr1);
|
131
|
1534
|
|
1535 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
|
|
1536
|
|
1537 if (compare_values_warnv (vr0->min (), vr1->min (), strict_overflow_p) == 0
|
|
1538 && compare_values_warnv (vr0->max (), vr1->max (), strict_overflow_p) == 0)
|
|
1539 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
|
|
1540
|
|
1541 return NULL_TREE;
|
|
1542 }
|
|
1543
|
|
1544 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
|
|
1545 operands around and change the comparison code. */
|
|
1546 if (comp == GT_EXPR || comp == GE_EXPR)
|
|
1547 {
|
|
1548 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
|
|
1549 std::swap (vr0, vr1);
|
|
1550 }
|
|
1551
|
|
1552 if (comp == EQ_EXPR)
|
|
1553 {
|
|
1554 /* Equality may only be computed if both ranges represent
|
|
1555 exactly one value. */
|
|
1556 if (compare_values_warnv (vr0->min (), vr0->max (), strict_overflow_p) == 0
|
|
1557 && compare_values_warnv (vr1->min (), vr1->max (), strict_overflow_p) == 0)
|
|
1558 {
|
|
1559 int cmp_min = compare_values_warnv (vr0->min (), vr1->min (),
|
|
1560 strict_overflow_p);
|
|
1561 int cmp_max = compare_values_warnv (vr0->max (), vr1->max (),
|
|
1562 strict_overflow_p);
|
|
1563 if (cmp_min == 0 && cmp_max == 0)
|
|
1564 return boolean_true_node;
|
|
1565 else if (cmp_min != -2 && cmp_max != -2)
|
|
1566 return boolean_false_node;
|
|
1567 }
|
|
1568 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
|
|
1569 else if (compare_values_warnv (vr0->min (), vr1->max (),
|
|
1570 strict_overflow_p) == 1
|
|
1571 || compare_values_warnv (vr1->min (), vr0->max (),
|
|
1572 strict_overflow_p) == 1)
|
|
1573 return boolean_false_node;
|
|
1574
|
|
1575 return NULL_TREE;
|
|
1576 }
|
|
1577 else if (comp == NE_EXPR)
|
|
1578 {
|
|
1579 int cmp1, cmp2;
|
|
1580
|
|
1581 /* If VR0 is completely to the left or completely to the right
|
|
1582 of VR1, they are always different. Notice that we need to
|
|
1583 make sure that both comparisons yield similar results to
|
|
1584 avoid comparing values that cannot be compared at
|
|
1585 compile-time. */
|
|
1586 cmp1 = compare_values_warnv (vr0->max (), vr1->min (), strict_overflow_p);
|
|
1587 cmp2 = compare_values_warnv (vr0->min (), vr1->max (), strict_overflow_p);
|
|
1588 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
|
|
1589 return boolean_true_node;
|
|
1590
|
|
1591 /* If VR0 and VR1 represent a single value and are identical,
|
|
1592 return false. */
|
|
1593 else if (compare_values_warnv (vr0->min (), vr0->max (),
|
|
1594 strict_overflow_p) == 0
|
|
1595 && compare_values_warnv (vr1->min (), vr1->max (),
|
|
1596 strict_overflow_p) == 0
|
|
1597 && compare_values_warnv (vr0->min (), vr1->min (),
|
|
1598 strict_overflow_p) == 0
|
|
1599 && compare_values_warnv (vr0->max (), vr1->max (),
|
|
1600 strict_overflow_p) == 0)
|
|
1601 return boolean_false_node;
|
|
1602
|
|
1603 /* Otherwise, they may or may not be different. */
|
|
1604 else
|
|
1605 return NULL_TREE;
|
|
1606 }
|
|
1607 else if (comp == LT_EXPR || comp == LE_EXPR)
|
|
1608 {
|
|
1609 int tst;
|
|
1610
|
|
1611 /* If VR0 is to the left of VR1, return true. */
|
|
1612 tst = compare_values_warnv (vr0->max (), vr1->min (), strict_overflow_p);
|
|
1613 if ((comp == LT_EXPR && tst == -1)
|
|
1614 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
|
|
1615 return boolean_true_node;
|
|
1616
|
|
1617 /* If VR0 is to the right of VR1, return false. */
|
|
1618 tst = compare_values_warnv (vr0->min (), vr1->max (), strict_overflow_p);
|
|
1619 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
|
|
1620 || (comp == LE_EXPR && tst == 1))
|
|
1621 return boolean_false_node;
|
|
1622
|
|
1623 /* Otherwise, we don't know. */
|
|
1624 return NULL_TREE;
|
|
1625 }
|
|
1626
|
|
1627 gcc_unreachable ();
|
|
1628 }
|
|
1629
|
|
1630 /* Given a value range VR, a value VAL and a comparison code COMP, return
|
|
1631 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
|
|
1632 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
|
|
1633 always returns false. Return NULL_TREE if it is not always
|
|
1634 possible to determine the value of the comparison. Also set
|
|
1635 *STRICT_OVERFLOW_P to indicate whether comparision evaluation
|
|
1636 assumed signed overflow is undefined. */
|
|
1637
|
|
1638 static tree
|
145
|
1639 compare_range_with_value (enum tree_code comp, const value_range_equiv *vr,
|
|
1640 tree val, bool *strict_overflow_p)
|
131
|
1641 {
|
|
1642 if (vr->varying_p () || vr->undefined_p ())
|
|
1643 return NULL_TREE;
|
|
1644
|
|
1645 /* Anti-ranges need to be handled separately. */
|
|
1646 if (vr->kind () == VR_ANTI_RANGE)
|
|
1647 {
|
|
1648 /* For anti-ranges, the only predicates that we can compute at
|
|
1649 compile time are equality and inequality. */
|
|
1650 if (comp == GT_EXPR
|
|
1651 || comp == GE_EXPR
|
|
1652 || comp == LT_EXPR
|
|
1653 || comp == LE_EXPR)
|
|
1654 return NULL_TREE;
|
|
1655
|
|
1656 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
|
145
|
1657 if (!vr->may_contain_p (val))
|
131
|
1658 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
|
|
1659
|
|
1660 return NULL_TREE;
|
|
1661 }
|
|
1662
|
|
1663 if (comp == EQ_EXPR)
|
|
1664 {
|
|
1665 /* EQ_EXPR may only be computed if VR represents exactly
|
|
1666 one value. */
|
|
1667 if (compare_values_warnv (vr->min (), vr->max (), strict_overflow_p) == 0)
|
|
1668 {
|
|
1669 int cmp = compare_values_warnv (vr->min (), val, strict_overflow_p);
|
|
1670 if (cmp == 0)
|
|
1671 return boolean_true_node;
|
|
1672 else if (cmp == -1 || cmp == 1 || cmp == 2)
|
|
1673 return boolean_false_node;
|
|
1674 }
|
|
1675 else if (compare_values_warnv (val, vr->min (), strict_overflow_p) == -1
|
|
1676 || compare_values_warnv (vr->max (), val, strict_overflow_p) == -1)
|
|
1677 return boolean_false_node;
|
|
1678
|
|
1679 return NULL_TREE;
|
|
1680 }
|
|
1681 else if (comp == NE_EXPR)
|
|
1682 {
|
|
1683 /* If VAL is not inside VR, then they are always different. */
|
|
1684 if (compare_values_warnv (vr->max (), val, strict_overflow_p) == -1
|
|
1685 || compare_values_warnv (vr->min (), val, strict_overflow_p) == 1)
|
|
1686 return boolean_true_node;
|
|
1687
|
|
1688 /* If VR represents exactly one value equal to VAL, then return
|
|
1689 false. */
|
|
1690 if (compare_values_warnv (vr->min (), vr->max (), strict_overflow_p) == 0
|
|
1691 && compare_values_warnv (vr->min (), val, strict_overflow_p) == 0)
|
|
1692 return boolean_false_node;
|
|
1693
|
|
1694 /* Otherwise, they may or may not be different. */
|
|
1695 return NULL_TREE;
|
|
1696 }
|
|
1697 else if (comp == LT_EXPR || comp == LE_EXPR)
|
|
1698 {
|
|
1699 int tst;
|
|
1700
|
|
1701 /* If VR is to the left of VAL, return true. */
|
|
1702 tst = compare_values_warnv (vr->max (), val, strict_overflow_p);
|
|
1703 if ((comp == LT_EXPR && tst == -1)
|
|
1704 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
|
|
1705 return boolean_true_node;
|
|
1706
|
|
1707 /* If VR is to the right of VAL, return false. */
|
|
1708 tst = compare_values_warnv (vr->min (), val, strict_overflow_p);
|
|
1709 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
|
|
1710 || (comp == LE_EXPR && tst == 1))
|
|
1711 return boolean_false_node;
|
|
1712
|
|
1713 /* Otherwise, we don't know. */
|
|
1714 return NULL_TREE;
|
|
1715 }
|
|
1716 else if (comp == GT_EXPR || comp == GE_EXPR)
|
|
1717 {
|
|
1718 int tst;
|
|
1719
|
|
1720 /* If VR is to the right of VAL, return true. */
|
|
1721 tst = compare_values_warnv (vr->min (), val, strict_overflow_p);
|
|
1722 if ((comp == GT_EXPR && tst == 1)
|
|
1723 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
|
|
1724 return boolean_true_node;
|
|
1725
|
|
1726 /* If VR is to the left of VAL, return false. */
|
|
1727 tst = compare_values_warnv (vr->max (), val, strict_overflow_p);
|
|
1728 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
|
|
1729 || (comp == GE_EXPR && tst == -1))
|
|
1730 return boolean_false_node;
|
|
1731
|
|
1732 /* Otherwise, we don't know. */
|
|
1733 return NULL_TREE;
|
|
1734 }
|
|
1735
|
|
1736 gcc_unreachable ();
|
|
1737 }
|
|
1738 /* Given a range VR, a LOOP and a variable VAR, determine whether it
|
|
1739 would be profitable to adjust VR using scalar evolution information
|
|
1740 for VAR. If so, update VR with the new limits. */
|
|
1741
|
|
1742 void
|
145
|
1743 vr_values::adjust_range_with_scev (value_range_equiv *vr, class loop *loop,
|
131
|
1744 gimple *stmt, tree var)
|
|
1745 {
|
|
1746 tree init, step, chrec, tmin, tmax, min, max, type, tem;
|
|
1747 enum ev_direction dir;
|
|
1748
|
|
1749 /* TODO. Don't adjust anti-ranges. An anti-range may provide
|
|
1750 better opportunities than a regular range, but I'm not sure. */
|
|
1751 if (vr->kind () == VR_ANTI_RANGE)
|
|
1752 return;
|
|
1753
|
|
1754 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
|
|
1755
|
|
1756 /* Like in PR19590, scev can return a constant function. */
|
|
1757 if (is_gimple_min_invariant (chrec))
|
|
1758 {
|
145
|
1759 vr->set (chrec);
|
131
|
1760 return;
|
|
1761 }
|
|
1762
|
|
1763 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
|
|
1764 return;
|
|
1765
|
|
1766 init = initial_condition_in_loop_num (chrec, loop->num);
|
|
1767 tem = op_with_constant_singleton_value_range (init);
|
|
1768 if (tem)
|
|
1769 init = tem;
|
|
1770 step = evolution_part_in_loop_num (chrec, loop->num);
|
|
1771 tem = op_with_constant_singleton_value_range (step);
|
|
1772 if (tem)
|
|
1773 step = tem;
|
|
1774
|
|
1775 /* If STEP is symbolic, we can't know whether INIT will be the
|
|
1776 minimum or maximum value in the range. Also, unless INIT is
|
|
1777 a simple expression, compare_values and possibly other functions
|
|
1778 in tree-vrp won't be able to handle it. */
|
|
1779 if (step == NULL_TREE
|
|
1780 || !is_gimple_min_invariant (step)
|
|
1781 || !valid_value_p (init))
|
|
1782 return;
|
|
1783
|
|
1784 dir = scev_direction (chrec);
|
|
1785 if (/* Do not adjust ranges if we do not know whether the iv increases
|
|
1786 or decreases, ... */
|
|
1787 dir == EV_DIR_UNKNOWN
|
|
1788 /* ... or if it may wrap. */
|
|
1789 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
|
|
1790 get_chrec_loop (chrec), true))
|
|
1791 return;
|
|
1792
|
|
1793 type = TREE_TYPE (var);
|
|
1794 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
|
|
1795 tmin = lower_bound_in_type (type, type);
|
|
1796 else
|
|
1797 tmin = TYPE_MIN_VALUE (type);
|
|
1798 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
|
|
1799 tmax = upper_bound_in_type (type, type);
|
|
1800 else
|
|
1801 tmax = TYPE_MAX_VALUE (type);
|
|
1802
|
|
1803 /* Try to use estimated number of iterations for the loop to constrain the
|
|
1804 final value in the evolution. */
|
|
1805 if (TREE_CODE (step) == INTEGER_CST
|
|
1806 && is_gimple_val (init)
|
|
1807 && (TREE_CODE (init) != SSA_NAME
|
|
1808 || get_value_range (init)->kind () == VR_RANGE))
|
|
1809 {
|
|
1810 widest_int nit;
|
|
1811
|
|
1812 /* We are only entering here for loop header PHI nodes, so using
|
|
1813 the number of latch executions is the correct thing to use. */
|
|
1814 if (max_loop_iterations (loop, &nit))
|
|
1815 {
|
|
1816 signop sgn = TYPE_SIGN (TREE_TYPE (step));
|
|
1817 wi::overflow_type overflow;
|
|
1818
|
|
1819 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
|
|
1820 &overflow);
|
|
1821 /* If the multiplication overflowed we can't do a meaningful
|
|
1822 adjustment. Likewise if the result doesn't fit in the type
|
|
1823 of the induction variable. For a signed type we have to
|
|
1824 check whether the result has the expected signedness which
|
|
1825 is that of the step as number of iterations is unsigned. */
|
|
1826 if (!overflow
|
|
1827 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
|
|
1828 && (sgn == UNSIGNED
|
|
1829 || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
|
|
1830 {
|
145
|
1831 value_range_equiv maxvr;
|
131
|
1832 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
|
|
1833 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
|
|
1834 TREE_TYPE (init), init, tem);
|
|
1835 /* Likewise if the addition did. */
|
|
1836 if (maxvr.kind () == VR_RANGE)
|
|
1837 {
|
|
1838 value_range initvr;
|
|
1839
|
|
1840 if (TREE_CODE (init) == SSA_NAME)
|
|
1841 initvr = *(get_value_range (init));
|
|
1842 else if (is_gimple_min_invariant (init))
|
145
|
1843 initvr.set (init);
|
131
|
1844 else
|
|
1845 return;
|
|
1846
|
|
1847 /* Check if init + nit * step overflows. Though we checked
|
|
1848 scev {init, step}_loop doesn't wrap, it is not enough
|
|
1849 because the loop may exit immediately. Overflow could
|
|
1850 happen in the plus expression in this case. */
|
|
1851 if ((dir == EV_DIR_DECREASES
|
|
1852 && compare_values (maxvr.min (), initvr.min ()) != -1)
|
|
1853 || (dir == EV_DIR_GROWS
|
|
1854 && compare_values (maxvr.max (), initvr.max ()) != 1))
|
|
1855 return;
|
|
1856
|
|
1857 tmin = maxvr.min ();
|
|
1858 tmax = maxvr.max ();
|
|
1859 }
|
|
1860 }
|
|
1861 }
|
|
1862 }
|
|
1863
|
|
1864 if (vr->varying_p () || vr->undefined_p ())
|
|
1865 {
|
|
1866 min = tmin;
|
|
1867 max = tmax;
|
|
1868
|
|
1869 /* For VARYING or UNDEFINED ranges, just about anything we get
|
|
1870 from scalar evolutions should be better. */
|
|
1871
|
|
1872 if (dir == EV_DIR_DECREASES)
|
|
1873 max = init;
|
|
1874 else
|
|
1875 min = init;
|
|
1876 }
|
|
1877 else if (vr->kind () == VR_RANGE)
|
|
1878 {
|
|
1879 min = vr->min ();
|
|
1880 max = vr->max ();
|
|
1881
|
|
1882 if (dir == EV_DIR_DECREASES)
|
|
1883 {
|
|
1884 /* INIT is the maximum value. If INIT is lower than VR->MAX ()
|
|
1885 but no smaller than VR->MIN (), set VR->MAX () to INIT. */
|
|
1886 if (compare_values (init, max) == -1)
|
|
1887 max = init;
|
|
1888
|
|
1889 /* According to the loop information, the variable does not
|
|
1890 overflow. */
|
|
1891 if (compare_values (min, tmin) == -1)
|
|
1892 min = tmin;
|
|
1893
|
|
1894 }
|
|
1895 else
|
|
1896 {
|
|
1897 /* If INIT is bigger than VR->MIN (), set VR->MIN () to INIT. */
|
|
1898 if (compare_values (init, min) == 1)
|
|
1899 min = init;
|
|
1900
|
|
1901 if (compare_values (tmax, max) == -1)
|
|
1902 max = tmax;
|
|
1903 }
|
|
1904 }
|
|
1905 else
|
|
1906 return;
|
|
1907
|
|
1908 /* If we just created an invalid range with the minimum
|
|
1909 greater than the maximum, we fail conservatively.
|
|
1910 This should happen only in unreachable
|
|
1911 parts of code, or for invalid programs. */
|
|
1912 if (compare_values (min, max) == 1)
|
|
1913 return;
|
|
1914
|
|
1915 /* Even for valid range info, sometimes overflow flag will leak in.
|
|
1916 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
|
|
1917 drop them. */
|
|
1918 if (TREE_OVERFLOW_P (min))
|
|
1919 min = drop_tree_overflow (min);
|
|
1920 if (TREE_OVERFLOW_P (max))
|
|
1921 max = drop_tree_overflow (max);
|
|
1922
|
145
|
1923 vr->update (min, max);
|
131
|
1924 }
|
|
1925
|
|
1926 /* Dump value ranges of all SSA_NAMEs to FILE. */
|
|
1927
|
|
1928 void
|
|
1929 vr_values::dump_all_value_ranges (FILE *file)
|
|
1930 {
|
|
1931 size_t i;
|
|
1932
|
|
1933 for (i = 0; i < num_vr_values; i++)
|
|
1934 {
|
|
1935 if (vr_value[i])
|
|
1936 {
|
|
1937 print_generic_expr (file, ssa_name (i));
|
|
1938 fprintf (file, ": ");
|
|
1939 dump_value_range (file, vr_value[i]);
|
|
1940 fprintf (file, "\n");
|
|
1941 }
|
|
1942 }
|
|
1943
|
|
1944 fprintf (file, "\n");
|
|
1945 }
|
|
1946
|
|
1947 /* Initialize VRP lattice. */
|
|
1948
|
|
1949 vr_values::vr_values () : vrp_value_range_pool ("Tree VRP value ranges")
|
|
1950 {
|
|
1951 values_propagated = false;
|
145
|
1952 num_vr_values = num_ssa_names * 2;
|
|
1953 vr_value = XCNEWVEC (value_range_equiv *, num_vr_values);
|
131
|
1954 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
|
|
1955 bitmap_obstack_initialize (&vrp_equiv_obstack);
|
|
1956 to_remove_edges = vNULL;
|
|
1957 to_update_switch_stmts = vNULL;
|
|
1958 }
|
|
1959
|
|
1960 /* Free VRP lattice. */
|
|
1961
|
|
1962 vr_values::~vr_values ()
|
|
1963 {
|
|
1964 /* Free allocated memory. */
|
|
1965 free (vr_value);
|
|
1966 free (vr_phi_edge_counts);
|
|
1967 bitmap_obstack_release (&vrp_equiv_obstack);
|
|
1968 vrp_value_range_pool.release ();
|
|
1969
|
|
1970 /* So that we can distinguish between VRP data being available
|
|
1971 and not available. */
|
|
1972 vr_value = NULL;
|
|
1973 vr_phi_edge_counts = NULL;
|
|
1974
|
|
1975 /* If there are entries left in TO_REMOVE_EDGES or TO_UPDATE_SWITCH_STMTS
|
|
1976 then an EVRP client did not clean up properly. Catch it now rather
|
|
1977 than seeing something more obscure later. */
|
|
1978 gcc_assert (to_remove_edges.is_empty ()
|
|
1979 && to_update_switch_stmts.is_empty ());
|
|
1980 }
|
|
1981
|
|
1982
|
|
1983 /* A hack. */
|
|
1984 static class vr_values *x_vr_values;
|
|
1985
|
|
1986 /* Return the singleton value-range for NAME or NAME. */
|
|
1987
|
|
1988 static inline tree
|
|
1989 vrp_valueize (tree name)
|
|
1990 {
|
|
1991 if (TREE_CODE (name) == SSA_NAME)
|
|
1992 {
|
145
|
1993 const value_range_equiv *vr = x_vr_values->get_value_range (name);
|
131
|
1994 if (vr->kind () == VR_RANGE
|
|
1995 && (TREE_CODE (vr->min ()) == SSA_NAME
|
|
1996 || is_gimple_min_invariant (vr->min ()))
|
|
1997 && vrp_operand_equal_p (vr->min (), vr->max ()))
|
|
1998 return vr->min ();
|
|
1999 }
|
|
2000 return name;
|
|
2001 }
|
|
2002
|
|
2003 /* Return the singleton value-range for NAME if that is a constant
|
|
2004 but signal to not follow SSA edges. */
|
|
2005
|
|
2006 static inline tree
|
|
2007 vrp_valueize_1 (tree name)
|
|
2008 {
|
|
2009 if (TREE_CODE (name) == SSA_NAME)
|
|
2010 {
|
|
2011 /* If the definition may be simulated again we cannot follow
|
|
2012 this SSA edge as the SSA propagator does not necessarily
|
|
2013 re-visit the use. */
|
|
2014 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
|
|
2015 if (!gimple_nop_p (def_stmt)
|
|
2016 && prop_simulate_again_p (def_stmt))
|
|
2017 return NULL_TREE;
|
145
|
2018 const value_range_equiv *vr = x_vr_values->get_value_range (name);
|
131
|
2019 tree singleton;
|
|
2020 if (vr->singleton_p (&singleton))
|
|
2021 return singleton;
|
|
2022 }
|
|
2023 return name;
|
|
2024 }
|
|
2025
|
|
2026 /* Given STMT, an assignment or call, return its LHS if the type
|
|
2027 of the LHS is suitable for VRP analysis, else return NULL_TREE. */
|
|
2028
|
|
2029 tree
|
|
2030 get_output_for_vrp (gimple *stmt)
|
|
2031 {
|
|
2032 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
|
|
2033 return NULL_TREE;
|
|
2034
|
|
2035 /* We only keep track of ranges in integral and pointer types. */
|
|
2036 tree lhs = gimple_get_lhs (stmt);
|
|
2037 if (TREE_CODE (lhs) == SSA_NAME
|
|
2038 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
|
|
2039 /* It is valid to have NULL MIN/MAX values on a type. See
|
|
2040 build_range_type. */
|
|
2041 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
|
|
2042 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
|
|
2043 || POINTER_TYPE_P (TREE_TYPE (lhs))))
|
|
2044 return lhs;
|
|
2045
|
|
2046 return NULL_TREE;
|
|
2047 }
|
|
2048
|
|
2049 /* Visit assignment STMT. If it produces an interesting range, record
|
|
2050 the range in VR and set LHS to OUTPUT_P. */
|
|
2051
|
|
2052 void
|
|
2053 vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
|
145
|
2054 value_range_equiv *vr)
|
131
|
2055 {
|
|
2056 tree lhs = get_output_for_vrp (stmt);
|
|
2057 *output_p = lhs;
|
|
2058
|
|
2059 /* We only keep track of ranges in integral and pointer types. */
|
|
2060 if (lhs)
|
|
2061 {
|
|
2062 enum gimple_code code = gimple_code (stmt);
|
|
2063
|
|
2064 /* Try folding the statement to a constant first. */
|
|
2065 x_vr_values = this;
|
|
2066 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
|
|
2067 vrp_valueize_1);
|
|
2068 x_vr_values = NULL;
|
|
2069 if (tem)
|
|
2070 {
|
|
2071 if (TREE_CODE (tem) == SSA_NAME
|
|
2072 && (SSA_NAME_IS_DEFAULT_DEF (tem)
|
|
2073 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
|
|
2074 {
|
|
2075 extract_range_from_ssa_name (vr, tem);
|
|
2076 return;
|
|
2077 }
|
|
2078 else if (is_gimple_min_invariant (tem))
|
|
2079 {
|
145
|
2080 vr->set (tem);
|
131
|
2081 return;
|
|
2082 }
|
|
2083 }
|
|
2084 /* Then dispatch to value-range extracting functions. */
|
|
2085 if (code == GIMPLE_CALL)
|
|
2086 extract_range_basic (vr, stmt);
|
|
2087 else
|
|
2088 extract_range_from_assignment (vr, as_a <gassign *> (stmt));
|
|
2089 }
|
|
2090 }
|
|
2091
|
|
2092 /* Helper that gets the value range of the SSA_NAME with version I
|
|
2093 or a symbolic range containing the SSA_NAME only if the value range
|
145
|
2094 is varying or undefined. Uses TEM as storage for the alternate range. */
|
|
2095
|
|
2096 const value_range_equiv *
|
|
2097 vr_values::get_vr_for_comparison (int i, value_range_equiv *tem)
|
131
|
2098 {
|
145
|
2099 /* Shallow-copy equiv bitmap. */
|
|
2100 const value_range_equiv *vr = get_value_range (ssa_name (i));
|
131
|
2101
|
|
2102 /* If name N_i does not have a valid range, use N_i as its own
|
|
2103 range. This allows us to compare against names that may
|
|
2104 have N_i in their ranges. */
|
145
|
2105 if (vr->varying_p () || vr->undefined_p ())
|
|
2106 {
|
|
2107 tem->set (ssa_name (i));
|
|
2108 return tem;
|
|
2109 }
|
131
|
2110
|
|
2111 return vr;
|
|
2112 }
|
|
2113
|
|
2114 /* Compare all the value ranges for names equivalent to VAR with VAL
|
|
2115 using comparison code COMP. Return the same value returned by
|
|
2116 compare_range_with_value, including the setting of
|
|
2117 *STRICT_OVERFLOW_P. */
|
|
2118
|
|
2119 tree
|
|
2120 vr_values::compare_name_with_value (enum tree_code comp, tree var, tree val,
|
|
2121 bool *strict_overflow_p, bool use_equiv_p)
|
|
2122 {
|
|
2123 /* Get the set of equivalences for VAR. */
|
145
|
2124 bitmap e = get_value_range (var)->equiv ();
|
131
|
2125
|
|
2126 /* Start at -1. Set it to 0 if we do a comparison without relying
|
|
2127 on overflow, or 1 if all comparisons rely on overflow. */
|
145
|
2128 int used_strict_overflow = -1;
|
131
|
2129
|
|
2130 /* Compare vars' value range with val. */
|
145
|
2131 value_range_equiv tem_vr;
|
|
2132 const value_range_equiv *equiv_vr
|
|
2133 = get_vr_for_comparison (SSA_NAME_VERSION (var), &tem_vr);
|
|
2134 bool sop = false;
|
|
2135 tree retval = compare_range_with_value (comp, equiv_vr, val, &sop);
|
131
|
2136 if (retval)
|
|
2137 used_strict_overflow = sop ? 1 : 0;
|
|
2138
|
|
2139 /* If the equiv set is empty we have done all work we need to do. */
|
|
2140 if (e == NULL)
|
|
2141 {
|
145
|
2142 if (retval && used_strict_overflow > 0)
|
131
|
2143 *strict_overflow_p = true;
|
|
2144 return retval;
|
|
2145 }
|
|
2146
|
145
|
2147 unsigned i;
|
|
2148 bitmap_iterator bi;
|
131
|
2149 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
|
|
2150 {
|
|
2151 tree name = ssa_name (i);
|
145
|
2152 if (!name)
|
131
|
2153 continue;
|
|
2154
|
145
|
2155 if (!use_equiv_p
|
|
2156 && !SSA_NAME_IS_DEFAULT_DEF (name)
|
131
|
2157 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
|
|
2158 continue;
|
|
2159
|
145
|
2160 equiv_vr = get_vr_for_comparison (i, &tem_vr);
|
131
|
2161 sop = false;
|
145
|
2162 tree t = compare_range_with_value (comp, equiv_vr, val, &sop);
|
131
|
2163 if (t)
|
|
2164 {
|
|
2165 /* If we get different answers from different members
|
|
2166 of the equivalence set this check must be in a dead
|
|
2167 code region. Folding it to a trap representation
|
|
2168 would be correct here. For now just return don't-know. */
|
|
2169 if (retval != NULL
|
|
2170 && t != retval)
|
|
2171 {
|
|
2172 retval = NULL_TREE;
|
|
2173 break;
|
|
2174 }
|
|
2175 retval = t;
|
|
2176
|
|
2177 if (!sop)
|
|
2178 used_strict_overflow = 0;
|
|
2179 else if (used_strict_overflow < 0)
|
|
2180 used_strict_overflow = 1;
|
|
2181 }
|
|
2182 }
|
|
2183
|
145
|
2184 if (retval && used_strict_overflow > 0)
|
131
|
2185 *strict_overflow_p = true;
|
|
2186
|
|
2187 return retval;
|
|
2188 }
|
|
2189
|
|
2190
|
|
2191 /* Given a comparison code COMP and names N1 and N2, compare all the
|
|
2192 ranges equivalent to N1 against all the ranges equivalent to N2
|
|
2193 to determine the value of N1 COMP N2. Return the same value
|
|
2194 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
|
|
2195 whether we relied on undefined signed overflow in the comparison. */
|
|
2196
|
|
2197
|
|
2198 tree
|
|
2199 vr_values::compare_names (enum tree_code comp, tree n1, tree n2,
|
|
2200 bool *strict_overflow_p)
|
|
2201 {
|
|
2202 /* Compare the ranges of every name equivalent to N1 against the
|
|
2203 ranges of every name equivalent to N2. */
|
145
|
2204 bitmap e1 = get_value_range (n1)->equiv ();
|
|
2205 bitmap e2 = get_value_range (n2)->equiv ();
|
131
|
2206
|
|
2207 /* Use the fake bitmaps if e1 or e2 are not available. */
|
145
|
2208 static bitmap s_e1 = NULL, s_e2 = NULL;
|
|
2209 static bitmap_obstack *s_obstack = NULL;
|
131
|
2210 if (s_obstack == NULL)
|
|
2211 {
|
|
2212 s_obstack = XNEW (bitmap_obstack);
|
|
2213 bitmap_obstack_initialize (s_obstack);
|
|
2214 s_e1 = BITMAP_ALLOC (s_obstack);
|
|
2215 s_e2 = BITMAP_ALLOC (s_obstack);
|
|
2216 }
|
|
2217 if (e1 == NULL)
|
|
2218 e1 = s_e1;
|
|
2219 if (e2 == NULL)
|
|
2220 e2 = s_e2;
|
|
2221
|
|
2222 /* Add N1 and N2 to their own set of equivalences to avoid
|
|
2223 duplicating the body of the loop just to check N1 and N2
|
|
2224 ranges. */
|
|
2225 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
|
|
2226 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
|
|
2227
|
|
2228 /* If the equivalence sets have a common intersection, then the two
|
|
2229 names can be compared without checking their ranges. */
|
|
2230 if (bitmap_intersect_p (e1, e2))
|
|
2231 {
|
|
2232 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
|
|
2233 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
|
|
2234
|
|
2235 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
|
|
2236 ? boolean_true_node
|
|
2237 : boolean_false_node;
|
|
2238 }
|
|
2239
|
|
2240 /* Start at -1. Set it to 0 if we do a comparison without relying
|
|
2241 on overflow, or 1 if all comparisons rely on overflow. */
|
145
|
2242 int used_strict_overflow = -1;
|
131
|
2243
|
|
2244 /* Otherwise, compare all the equivalent ranges. First, add N1 and
|
|
2245 N2 to their own set of equivalences to avoid duplicating the body
|
|
2246 of the loop just to check N1 and N2 ranges. */
|
145
|
2247 bitmap_iterator bi1;
|
|
2248 unsigned i1;
|
131
|
2249 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
|
|
2250 {
|
145
|
2251 if (!ssa_name (i1))
|
131
|
2252 continue;
|
|
2253
|
145
|
2254 value_range_equiv tem_vr1;
|
|
2255 const value_range_equiv *vr1 = get_vr_for_comparison (i1, &tem_vr1);
|
|
2256
|
|
2257 tree t = NULL_TREE, retval = NULL_TREE;
|
|
2258 bitmap_iterator bi2;
|
|
2259 unsigned i2;
|
131
|
2260 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
|
|
2261 {
|
145
|
2262 if (!ssa_name (i2))
|
131
|
2263 continue;
|
|
2264
|
|
2265 bool sop = false;
|
|
2266
|
145
|
2267 value_range_equiv tem_vr2;
|
|
2268 const value_range_equiv *vr2 = get_vr_for_comparison (i2, &tem_vr2);
|
|
2269
|
|
2270 t = compare_ranges (comp, vr1, vr2, &sop);
|
131
|
2271 if (t)
|
|
2272 {
|
|
2273 /* If we get different answers from different members
|
|
2274 of the equivalence set this check must be in a dead
|
|
2275 code region. Folding it to a trap representation
|
|
2276 would be correct here. For now just return don't-know. */
|
145
|
2277 if (retval != NULL && t != retval)
|
131
|
2278 {
|
|
2279 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
|
|
2280 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
|
|
2281 return NULL_TREE;
|
|
2282 }
|
|
2283 retval = t;
|
|
2284
|
|
2285 if (!sop)
|
|
2286 used_strict_overflow = 0;
|
|
2287 else if (used_strict_overflow < 0)
|
|
2288 used_strict_overflow = 1;
|
|
2289 }
|
|
2290 }
|
|
2291
|
|
2292 if (retval)
|
|
2293 {
|
|
2294 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
|
|
2295 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
|
|
2296 if (used_strict_overflow > 0)
|
|
2297 *strict_overflow_p = true;
|
|
2298 return retval;
|
|
2299 }
|
|
2300 }
|
|
2301
|
|
2302 /* None of the equivalent ranges are useful in computing this
|
|
2303 comparison. */
|
|
2304 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
|
|
2305 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
|
|
2306 return NULL_TREE;
|
|
2307 }
|
|
2308
|
|
2309 /* Helper function for vrp_evaluate_conditional_warnv & other
|
|
2310 optimizers. */
|
|
2311
|
|
2312 tree
|
|
2313 vr_values::vrp_evaluate_conditional_warnv_with_ops_using_ranges
|
|
2314 (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p)
|
|
2315 {
|
145
|
2316 const value_range_equiv *vr0, *vr1;
|
131
|
2317 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
|
|
2318 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
|
|
2319
|
|
2320 tree res = NULL_TREE;
|
|
2321 if (vr0 && vr1)
|
|
2322 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
|
|
2323 if (!res && vr0)
|
|
2324 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
|
|
2325 if (!res && vr1)
|
|
2326 res = (compare_range_with_value
|
|
2327 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
|
|
2328 return res;
|
|
2329 }
|
|
2330
|
|
2331 /* Helper function for vrp_evaluate_conditional_warnv. */
|
|
2332
|
|
2333 tree
|
|
2334 vr_values::vrp_evaluate_conditional_warnv_with_ops (enum tree_code code,
|
|
2335 tree op0, tree op1,
|
|
2336 bool use_equiv_p,
|
|
2337 bool *strict_overflow_p,
|
|
2338 bool *only_ranges)
|
|
2339 {
|
|
2340 tree ret;
|
|
2341 if (only_ranges)
|
|
2342 *only_ranges = true;
|
|
2343
|
|
2344 /* We only deal with integral and pointer types. */
|
|
2345 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
|
|
2346 && !POINTER_TYPE_P (TREE_TYPE (op0)))
|
|
2347 return NULL_TREE;
|
|
2348
|
|
2349 /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
|
|
2350 as a simple equality test, then prefer that over its current form
|
|
2351 for evaluation.
|
|
2352
|
|
2353 An overflow test which collapses to an equality test can always be
|
|
2354 expressed as a comparison of one argument against zero. Overflow
|
|
2355 occurs when the chosen argument is zero and does not occur if the
|
|
2356 chosen argument is not zero. */
|
|
2357 tree x;
|
|
2358 if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
|
|
2359 {
|
|
2360 wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
|
|
2361 /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
|
|
2362 B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
|
|
2363 B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
|
|
2364 B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
|
|
2365 if (integer_zerop (x))
|
|
2366 {
|
|
2367 op1 = x;
|
|
2368 code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
|
|
2369 }
|
|
2370 /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
|
|
2371 B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
|
|
2372 B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
|
|
2373 B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
|
|
2374 else if (wi::to_wide (x) == max - 1)
|
|
2375 {
|
|
2376 op0 = op1;
|
|
2377 op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
|
|
2378 code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
|
|
2379 }
|
145
|
2380 else
|
|
2381 {
|
|
2382 value_range vro, vri;
|
|
2383 if (code == GT_EXPR || code == GE_EXPR)
|
|
2384 {
|
|
2385 vro.set (TYPE_MIN_VALUE (TREE_TYPE (op0)), x, VR_ANTI_RANGE);
|
|
2386 vri.set (TYPE_MIN_VALUE (TREE_TYPE (op0)), x);
|
|
2387 }
|
|
2388 else if (code == LT_EXPR || code == LE_EXPR)
|
|
2389 {
|
|
2390 vro.set (TYPE_MIN_VALUE (TREE_TYPE (op0)), x);
|
|
2391 vri.set (TYPE_MIN_VALUE (TREE_TYPE (op0)), x, VR_ANTI_RANGE);
|
|
2392 }
|
|
2393 else
|
|
2394 gcc_unreachable ();
|
|
2395 const value_range_equiv *vr0 = get_value_range (op0);
|
|
2396 /* If vro, the range for OP0 to pass the overflow test, has
|
|
2397 no intersection with *vr0, OP0's known range, then the
|
|
2398 overflow test can't pass, so return the node for false.
|
|
2399 If it is the inverted range, vri, that has no
|
|
2400 intersection, then the overflow test must pass, so return
|
|
2401 the node for true. In other cases, we could proceed with
|
|
2402 a simplified condition comparing OP0 and X, with LE_EXPR
|
|
2403 for previously LE_ or LT_EXPR and GT_EXPR otherwise, but
|
|
2404 the comments next to the enclosing if suggest it's not
|
|
2405 generally profitable to do so. */
|
|
2406 vro.intersect (vr0);
|
|
2407 if (vro.undefined_p ())
|
|
2408 return boolean_false_node;
|
|
2409 vri.intersect (vr0);
|
|
2410 if (vri.undefined_p ())
|
|
2411 return boolean_true_node;
|
|
2412 }
|
131
|
2413 }
|
|
2414
|
|
2415 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
|
|
2416 (code, op0, op1, strict_overflow_p)))
|
|
2417 return ret;
|
|
2418 if (only_ranges)
|
|
2419 *only_ranges = false;
|
|
2420 /* Do not use compare_names during propagation, it's quadratic. */
|
|
2421 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
|
|
2422 && use_equiv_p)
|
|
2423 return compare_names (code, op0, op1, strict_overflow_p);
|
|
2424 else if (TREE_CODE (op0) == SSA_NAME)
|
|
2425 return compare_name_with_value (code, op0, op1,
|
|
2426 strict_overflow_p, use_equiv_p);
|
|
2427 else if (TREE_CODE (op1) == SSA_NAME)
|
|
2428 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
|
|
2429 strict_overflow_p, use_equiv_p);
|
|
2430 return NULL_TREE;
|
|
2431 }
|
|
2432
|
|
2433 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
|
145
|
2434 information. Return NULL if the conditional cannot be evaluated.
|
131
|
2435 The ranges of all the names equivalent with the operands in COND
|
|
2436 will be used when trying to compute the value. If the result is
|
|
2437 based on undefined signed overflow, issue a warning if
|
|
2438 appropriate. */
|
|
2439
|
|
2440 tree
|
|
2441 vr_values::vrp_evaluate_conditional (tree_code code, tree op0,
|
|
2442 tree op1, gimple *stmt)
|
|
2443 {
|
|
2444 bool sop;
|
|
2445 tree ret;
|
|
2446 bool only_ranges;
|
|
2447
|
|
2448 /* Some passes and foldings leak constants with overflow flag set
|
|
2449 into the IL. Avoid doing wrong things with these and bail out. */
|
|
2450 if ((TREE_CODE (op0) == INTEGER_CST
|
|
2451 && TREE_OVERFLOW (op0))
|
|
2452 || (TREE_CODE (op1) == INTEGER_CST
|
|
2453 && TREE_OVERFLOW (op1)))
|
|
2454 return NULL_TREE;
|
|
2455
|
|
2456 sop = false;
|
|
2457 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
|
|
2458 &only_ranges);
|
|
2459
|
|
2460 if (ret && sop)
|
|
2461 {
|
|
2462 enum warn_strict_overflow_code wc;
|
|
2463 const char* warnmsg;
|
|
2464
|
|
2465 if (is_gimple_min_invariant (ret))
|
|
2466 {
|
|
2467 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
|
|
2468 warnmsg = G_("assuming signed overflow does not occur when "
|
|
2469 "simplifying conditional to constant");
|
|
2470 }
|
|
2471 else
|
|
2472 {
|
|
2473 wc = WARN_STRICT_OVERFLOW_COMPARISON;
|
|
2474 warnmsg = G_("assuming signed overflow does not occur when "
|
|
2475 "simplifying conditional");
|
|
2476 }
|
|
2477
|
|
2478 if (issue_strict_overflow_warning (wc))
|
|
2479 {
|
|
2480 location_t location;
|
|
2481
|
|
2482 if (!gimple_has_location (stmt))
|
|
2483 location = input_location;
|
|
2484 else
|
|
2485 location = gimple_location (stmt);
|
|
2486 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
|
|
2487 }
|
|
2488 }
|
|
2489
|
|
2490 if (warn_type_limits
|
|
2491 && ret && only_ranges
|
|
2492 && TREE_CODE_CLASS (code) == tcc_comparison
|
|
2493 && TREE_CODE (op0) == SSA_NAME)
|
|
2494 {
|
|
2495 /* If the comparison is being folded and the operand on the LHS
|
|
2496 is being compared against a constant value that is outside of
|
|
2497 the natural range of OP0's type, then the predicate will
|
|
2498 always fold regardless of the value of OP0. If -Wtype-limits
|
|
2499 was specified, emit a warning. */
|
|
2500 tree type = TREE_TYPE (op0);
|
145
|
2501 const value_range_equiv *vr0 = get_value_range (op0);
|
131
|
2502
|
|
2503 if (vr0->kind () == VR_RANGE
|
|
2504 && INTEGRAL_TYPE_P (type)
|
|
2505 && vrp_val_is_min (vr0->min ())
|
|
2506 && vrp_val_is_max (vr0->max ())
|
|
2507 && is_gimple_min_invariant (op1))
|
|
2508 {
|
|
2509 location_t location;
|
|
2510
|
|
2511 if (!gimple_has_location (stmt))
|
|
2512 location = input_location;
|
|
2513 else
|
|
2514 location = gimple_location (stmt);
|
|
2515
|
|
2516 warning_at (location, OPT_Wtype_limits,
|
|
2517 integer_zerop (ret)
|
|
2518 ? G_("comparison always false "
|
|
2519 "due to limited range of data type")
|
|
2520 : G_("comparison always true "
|
|
2521 "due to limited range of data type"));
|
|
2522 }
|
|
2523 }
|
|
2524
|
|
2525 return ret;
|
|
2526 }
|
|
2527
|
|
2528
|
|
2529 /* Visit conditional statement STMT. If we can determine which edge
|
|
2530 will be taken out of STMT's basic block, record it in
|
|
2531 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
|
|
2532
|
|
2533 void
|
|
2534 vr_values::vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
|
|
2535 {
|
|
2536 tree val;
|
|
2537
|
|
2538 *taken_edge_p = NULL;
|
|
2539
|
|
2540 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2541 {
|
|
2542 tree use;
|
|
2543 ssa_op_iter i;
|
|
2544
|
|
2545 fprintf (dump_file, "\nVisiting conditional with predicate: ");
|
|
2546 print_gimple_stmt (dump_file, stmt, 0);
|
|
2547 fprintf (dump_file, "\nWith known ranges\n");
|
|
2548
|
|
2549 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
|
|
2550 {
|
|
2551 fprintf (dump_file, "\t");
|
|
2552 print_generic_expr (dump_file, use);
|
|
2553 fprintf (dump_file, ": ");
|
|
2554 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
|
|
2555 }
|
|
2556
|
|
2557 fprintf (dump_file, "\n");
|
|
2558 }
|
|
2559
|
|
2560 /* Compute the value of the predicate COND by checking the known
|
|
2561 ranges of each of its operands.
|
|
2562
|
|
2563 Note that we cannot evaluate all the equivalent ranges here
|
|
2564 because those ranges may not yet be final and with the current
|
|
2565 propagation strategy, we cannot determine when the value ranges
|
|
2566 of the names in the equivalence set have changed.
|
|
2567
|
|
2568 For instance, given the following code fragment
|
|
2569
|
|
2570 i_5 = PHI <8, i_13>
|
|
2571 ...
|
|
2572 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
|
|
2573 if (i_14 == 1)
|
|
2574 ...
|
|
2575
|
|
2576 Assume that on the first visit to i_14, i_5 has the temporary
|
|
2577 range [8, 8] because the second argument to the PHI function is
|
|
2578 not yet executable. We derive the range ~[0, 0] for i_14 and the
|
|
2579 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
|
|
2580 the first time, since i_14 is equivalent to the range [8, 8], we
|
|
2581 determine that the predicate is always false.
|
|
2582
|
|
2583 On the next round of propagation, i_13 is determined to be
|
|
2584 VARYING, which causes i_5 to drop down to VARYING. So, another
|
|
2585 visit to i_14 is scheduled. In this second visit, we compute the
|
|
2586 exact same range and equivalence set for i_14, namely ~[0, 0] and
|
|
2587 { i_5 }. But we did not have the previous range for i_5
|
|
2588 registered, so vrp_visit_assignment thinks that the range for
|
|
2589 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
|
|
2590 is not visited again, which stops propagation from visiting
|
|
2591 statements in the THEN clause of that if().
|
|
2592
|
|
2593 To properly fix this we would need to keep the previous range
|
|
2594 value for the names in the equivalence set. This way we would've
|
|
2595 discovered that from one visit to the other i_5 changed from
|
|
2596 range [8, 8] to VR_VARYING.
|
|
2597
|
|
2598 However, fixing this apparent limitation may not be worth the
|
|
2599 additional checking. Testing on several code bases (GCC, DLV,
|
|
2600 MICO, TRAMP3D and SPEC2000) showed that doing this results in
|
|
2601 4 more predicates folded in SPEC. */
|
|
2602
|
|
2603 bool sop;
|
|
2604 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
|
|
2605 gimple_cond_lhs (stmt),
|
|
2606 gimple_cond_rhs (stmt),
|
|
2607 false, &sop, NULL);
|
|
2608 if (val)
|
|
2609 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
|
|
2610
|
|
2611 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2612 {
|
|
2613 fprintf (dump_file, "\nPredicate evaluates to: ");
|
|
2614 if (val == NULL_TREE)
|
|
2615 fprintf (dump_file, "DON'T KNOW\n");
|
|
2616 else
|
|
2617 print_generic_stmt (dump_file, val);
|
|
2618 }
|
|
2619 }
|
|
2620
|
|
2621 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
|
|
2622 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
|
|
2623 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
|
|
2624 Returns true if the default label is not needed. */
|
|
2625
|
|
2626 static bool
|
145
|
2627 find_case_label_ranges (gswitch *stmt, const value_range_equiv *vr,
|
|
2628 size_t *min_idx1, size_t *max_idx1,
|
|
2629 size_t *min_idx2, size_t *max_idx2)
|
131
|
2630 {
|
|
2631 size_t i, j, k, l;
|
|
2632 unsigned int n = gimple_switch_num_labels (stmt);
|
|
2633 bool take_default;
|
|
2634 tree case_low, case_high;
|
|
2635 tree min = vr->min (), max = vr->max ();
|
|
2636
|
|
2637 gcc_checking_assert (!vr->varying_p () && !vr->undefined_p ());
|
|
2638
|
|
2639 take_default = !find_case_label_range (stmt, min, max, &i, &j);
|
|
2640
|
145
|
2641 /* Set second range to empty. */
|
131
|
2642 *min_idx2 = 1;
|
|
2643 *max_idx2 = 0;
|
|
2644
|
|
2645 if (vr->kind () == VR_RANGE)
|
|
2646 {
|
|
2647 *min_idx1 = i;
|
|
2648 *max_idx1 = j;
|
|
2649 return !take_default;
|
|
2650 }
|
|
2651
|
|
2652 /* Set first range to all case labels. */
|
|
2653 *min_idx1 = 1;
|
|
2654 *max_idx1 = n - 1;
|
|
2655
|
|
2656 if (i > j)
|
|
2657 return false;
|
|
2658
|
|
2659 /* Make sure all the values of case labels [i , j] are contained in
|
|
2660 range [MIN, MAX]. */
|
|
2661 case_low = CASE_LOW (gimple_switch_label (stmt, i));
|
|
2662 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
|
|
2663 if (tree_int_cst_compare (case_low, min) < 0)
|
|
2664 i += 1;
|
|
2665 if (case_high != NULL_TREE
|
|
2666 && tree_int_cst_compare (max, case_high) < 0)
|
|
2667 j -= 1;
|
|
2668
|
|
2669 if (i > j)
|
|
2670 return false;
|
|
2671
|
|
2672 /* If the range spans case labels [i, j], the corresponding anti-range spans
|
|
2673 the labels [1, i - 1] and [j + 1, n - 1]. */
|
|
2674 k = j + 1;
|
|
2675 l = n - 1;
|
|
2676 if (k > l)
|
|
2677 {
|
|
2678 k = 1;
|
|
2679 l = 0;
|
|
2680 }
|
|
2681
|
|
2682 j = i - 1;
|
|
2683 i = 1;
|
|
2684 if (i > j)
|
|
2685 {
|
|
2686 i = k;
|
|
2687 j = l;
|
|
2688 k = 1;
|
|
2689 l = 0;
|
|
2690 }
|
|
2691
|
|
2692 *min_idx1 = i;
|
|
2693 *max_idx1 = j;
|
|
2694 *min_idx2 = k;
|
|
2695 *max_idx2 = l;
|
|
2696 return false;
|
|
2697 }
|
|
2698
|
|
2699 /* Visit switch statement STMT. If we can determine which edge
|
|
2700 will be taken out of STMT's basic block, record it in
|
|
2701 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
|
|
2702
|
|
2703 void
|
|
2704 vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
|
|
2705 {
|
|
2706 tree op, val;
|
145
|
2707 const value_range_equiv *vr;
|
131
|
2708 size_t i = 0, j = 0, k, l;
|
|
2709 bool take_default;
|
|
2710
|
|
2711 *taken_edge_p = NULL;
|
|
2712 op = gimple_switch_index (stmt);
|
|
2713 if (TREE_CODE (op) != SSA_NAME)
|
|
2714 return;
|
|
2715
|
|
2716 vr = get_value_range (op);
|
|
2717 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2718 {
|
|
2719 fprintf (dump_file, "\nVisiting switch expression with operand ");
|
|
2720 print_generic_expr (dump_file, op);
|
|
2721 fprintf (dump_file, " with known range ");
|
|
2722 dump_value_range (dump_file, vr);
|
|
2723 fprintf (dump_file, "\n");
|
|
2724 }
|
|
2725
|
|
2726 if (vr->undefined_p ()
|
|
2727 || vr->varying_p ()
|
|
2728 || vr->symbolic_p ())
|
|
2729 return;
|
|
2730
|
|
2731 /* Find the single edge that is taken from the switch expression. */
|
|
2732 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
|
|
2733
|
|
2734 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
|
|
2735 label */
|
|
2736 if (j < i)
|
|
2737 {
|
|
2738 gcc_assert (take_default);
|
|
2739 val = gimple_switch_default_label (stmt);
|
|
2740 }
|
|
2741 else
|
|
2742 {
|
|
2743 /* Check if labels with index i to j and maybe the default label
|
|
2744 are all reaching the same label. */
|
|
2745
|
|
2746 val = gimple_switch_label (stmt, i);
|
|
2747 if (take_default
|
|
2748 && CASE_LABEL (gimple_switch_default_label (stmt))
|
|
2749 != CASE_LABEL (val))
|
|
2750 {
|
|
2751 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2752 fprintf (dump_file, " not a single destination for this "
|
|
2753 "range\n");
|
|
2754 return;
|
|
2755 }
|
|
2756 for (++i; i <= j; ++i)
|
|
2757 {
|
|
2758 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
|
|
2759 {
|
|
2760 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2761 fprintf (dump_file, " not a single destination for this "
|
|
2762 "range\n");
|
|
2763 return;
|
|
2764 }
|
|
2765 }
|
|
2766 for (; k <= l; ++k)
|
|
2767 {
|
|
2768 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
|
|
2769 {
|
|
2770 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2771 fprintf (dump_file, " not a single destination for this "
|
|
2772 "range\n");
|
|
2773 return;
|
|
2774 }
|
|
2775 }
|
|
2776 }
|
|
2777
|
|
2778 *taken_edge_p = find_edge (gimple_bb (stmt),
|
|
2779 label_to_block (cfun, CASE_LABEL (val)));
|
|
2780
|
|
2781 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2782 {
|
|
2783 fprintf (dump_file, " will take edge to ");
|
|
2784 print_generic_stmt (dump_file, CASE_LABEL (val));
|
|
2785 }
|
|
2786 }
|
|
2787
|
|
2788
|
|
2789 /* Evaluate statement STMT. If the statement produces a useful range,
|
|
2790 set VR and corepsponding OUTPUT_P.
|
|
2791
|
|
2792 If STMT is a conditional branch and we can determine its truth
|
|
2793 value, the taken edge is recorded in *TAKEN_EDGE_P. */
|
|
2794
|
|
2795 void
|
|
2796 vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
|
145
|
2797 tree *output_p, value_range_equiv *vr)
|
131
|
2798 {
|
|
2799
|
|
2800 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2801 {
|
|
2802 fprintf (dump_file, "\nVisiting statement:\n");
|
|
2803 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
|
|
2804 }
|
|
2805
|
|
2806 if (!stmt_interesting_for_vrp (stmt))
|
|
2807 gcc_assert (stmt_ends_bb_p (stmt));
|
|
2808 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
|
|
2809 vrp_visit_assignment_or_call (stmt, output_p, vr);
|
|
2810 else if (gimple_code (stmt) == GIMPLE_COND)
|
|
2811 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
|
|
2812 else if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
2813 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
|
|
2814 }
|
|
2815
|
|
2816 /* Visit all arguments for PHI node PHI that flow through executable
|
|
2817 edges. If a valid value range can be derived from all the incoming
|
|
2818 value ranges, set a new range in VR_RESULT. */
|
|
2819
|
|
2820 void
|
145
|
2821 vr_values::extract_range_from_phi_node (gphi *phi,
|
|
2822 value_range_equiv *vr_result)
|
131
|
2823 {
|
|
2824 tree lhs = PHI_RESULT (phi);
|
145
|
2825 const value_range_equiv *lhs_vr = get_value_range (lhs);
|
131
|
2826 bool first = true;
|
145
|
2827 int old_edges;
|
|
2828 class loop *l;
|
131
|
2829
|
|
2830 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2831 {
|
|
2832 fprintf (dump_file, "\nVisiting PHI node: ");
|
|
2833 print_gimple_stmt (dump_file, phi, 0, dump_flags);
|
|
2834 }
|
|
2835
|
|
2836 bool may_simulate_backedge_again = false;
|
145
|
2837 int edges = 0;
|
|
2838 for (size_t i = 0; i < gimple_phi_num_args (phi); i++)
|
131
|
2839 {
|
|
2840 edge e = gimple_phi_arg_edge (phi, i);
|
|
2841
|
|
2842 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2843 {
|
|
2844 fprintf (dump_file,
|
|
2845 " Argument #%d (%d -> %d %sexecutable)\n",
|
|
2846 (int) i, e->src->index, e->dest->index,
|
|
2847 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
|
|
2848 }
|
|
2849
|
|
2850 if (e->flags & EDGE_EXECUTABLE)
|
|
2851 {
|
145
|
2852 value_range_equiv vr_arg_tem;
|
|
2853 const value_range_equiv *vr_arg = &vr_arg_tem;
|
131
|
2854
|
|
2855 ++edges;
|
|
2856
|
145
|
2857 tree arg = PHI_ARG_DEF (phi, i);
|
131
|
2858 if (TREE_CODE (arg) == SSA_NAME)
|
|
2859 {
|
|
2860 /* See if we are eventually going to change one of the args. */
|
|
2861 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
|
|
2862 if (! gimple_nop_p (def_stmt)
|
|
2863 && prop_simulate_again_p (def_stmt)
|
|
2864 && e->flags & EDGE_DFS_BACK)
|
|
2865 may_simulate_backedge_again = true;
|
|
2866
|
145
|
2867 const value_range_equiv *vr_arg_ = get_value_range (arg);
|
131
|
2868 /* Do not allow equivalences or symbolic ranges to leak in from
|
|
2869 backedges. That creates invalid equivalencies.
|
|
2870 See PR53465 and PR54767. */
|
|
2871 if (e->flags & EDGE_DFS_BACK)
|
|
2872 {
|
145
|
2873 if (!vr_arg_->varying_p () && !vr_arg_->undefined_p ())
|
131
|
2874 {
|
145
|
2875 vr_arg_tem.set (vr_arg_->min (), vr_arg_->max (), NULL,
|
|
2876 vr_arg_->kind ());
|
|
2877 if (vr_arg_tem.symbolic_p ())
|
|
2878 vr_arg_tem.set_varying (TREE_TYPE (arg));
|
131
|
2879 }
|
145
|
2880 else
|
|
2881 vr_arg = vr_arg_;
|
131
|
2882 }
|
|
2883 /* If the non-backedge arguments range is VR_VARYING then
|
|
2884 we can still try recording a simple equivalence. */
|
145
|
2885 else if (vr_arg_->varying_p ())
|
|
2886 vr_arg_tem.set (arg);
|
|
2887 else
|
|
2888 vr_arg = vr_arg_;
|
131
|
2889 }
|
|
2890 else
|
|
2891 {
|
|
2892 if (TREE_OVERFLOW_P (arg))
|
|
2893 arg = drop_tree_overflow (arg);
|
|
2894
|
145
|
2895 vr_arg_tem.set (arg);
|
131
|
2896 }
|
|
2897
|
|
2898 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2899 {
|
|
2900 fprintf (dump_file, "\t");
|
|
2901 print_generic_expr (dump_file, arg, dump_flags);
|
|
2902 fprintf (dump_file, ": ");
|
145
|
2903 dump_value_range (dump_file, vr_arg);
|
131
|
2904 fprintf (dump_file, "\n");
|
|
2905 }
|
|
2906
|
|
2907 if (first)
|
145
|
2908 vr_result->deep_copy (vr_arg);
|
131
|
2909 else
|
145
|
2910 vr_result->union_ (vr_arg);
|
131
|
2911 first = false;
|
|
2912
|
|
2913 if (vr_result->varying_p ())
|
|
2914 break;
|
|
2915 }
|
|
2916 }
|
|
2917
|
|
2918 if (vr_result->varying_p ())
|
|
2919 goto varying;
|
|
2920 else if (vr_result->undefined_p ())
|
|
2921 goto update_range;
|
|
2922
|
|
2923 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
|
|
2924 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
|
|
2925
|
|
2926 /* To prevent infinite iterations in the algorithm, derive ranges
|
|
2927 when the new value is slightly bigger or smaller than the
|
|
2928 previous one. We don't do this if we have seen a new executable
|
|
2929 edge; this helps us avoid an infinity for conditionals
|
|
2930 which are not in a loop. If the old value-range was VR_UNDEFINED
|
|
2931 use the updated range and iterate one more time. If we will not
|
|
2932 simulate this PHI again via the backedge allow us to iterate. */
|
|
2933 if (edges > 0
|
|
2934 && gimple_phi_num_args (phi) > 1
|
|
2935 && edges == old_edges
|
|
2936 && !lhs_vr->undefined_p ()
|
|
2937 && may_simulate_backedge_again)
|
|
2938 {
|
|
2939 /* Compare old and new ranges, fall back to varying if the
|
|
2940 values are not comparable. */
|
|
2941 int cmp_min = compare_values (lhs_vr->min (), vr_result->min ());
|
|
2942 if (cmp_min == -2)
|
|
2943 goto varying;
|
|
2944 int cmp_max = compare_values (lhs_vr->max (), vr_result->max ());
|
|
2945 if (cmp_max == -2)
|
|
2946 goto varying;
|
|
2947
|
|
2948 /* For non VR_RANGE or for pointers fall back to varying if
|
|
2949 the range changed. */
|
|
2950 if ((lhs_vr->kind () != VR_RANGE || vr_result->kind () != VR_RANGE
|
|
2951 || POINTER_TYPE_P (TREE_TYPE (lhs)))
|
|
2952 && (cmp_min != 0 || cmp_max != 0))
|
|
2953 goto varying;
|
|
2954
|
|
2955 /* If the new minimum is larger than the previous one
|
|
2956 retain the old value. If the new minimum value is smaller
|
|
2957 than the previous one and not -INF go all the way to -INF + 1.
|
|
2958 In the first case, to avoid infinite bouncing between different
|
|
2959 minimums, and in the other case to avoid iterating millions of
|
|
2960 times to reach -INF. Going to -INF + 1 also lets the following
|
|
2961 iteration compute whether there will be any overflow, at the
|
|
2962 expense of one additional iteration. */
|
|
2963 tree new_min = vr_result->min ();
|
|
2964 tree new_max = vr_result->max ();
|
|
2965 if (cmp_min < 0)
|
|
2966 new_min = lhs_vr->min ();
|
|
2967 else if (cmp_min > 0
|
145
|
2968 && (TREE_CODE (vr_result->min ()) != INTEGER_CST
|
|
2969 || tree_int_cst_lt (vrp_val_min (vr_result->type ()),
|
|
2970 vr_result->min ())))
|
131
|
2971 new_min = int_const_binop (PLUS_EXPR,
|
|
2972 vrp_val_min (vr_result->type ()),
|
|
2973 build_int_cst (vr_result->type (), 1));
|
|
2974
|
|
2975 /* Similarly for the maximum value. */
|
|
2976 if (cmp_max > 0)
|
|
2977 new_max = lhs_vr->max ();
|
|
2978 else if (cmp_max < 0
|
145
|
2979 && (TREE_CODE (vr_result->max ()) != INTEGER_CST
|
|
2980 || tree_int_cst_lt (vr_result->max (),
|
|
2981 vrp_val_max (vr_result->type ()))))
|
131
|
2982 new_max = int_const_binop (MINUS_EXPR,
|
|
2983 vrp_val_max (vr_result->type ()),
|
|
2984 build_int_cst (vr_result->type (), 1));
|
|
2985
|
145
|
2986 vr_result->update (new_min, new_max, vr_result->kind ());
|
131
|
2987
|
|
2988 /* If we dropped either bound to +-INF then if this is a loop
|
|
2989 PHI node SCEV may known more about its value-range. */
|
|
2990 if (cmp_min > 0 || cmp_min < 0
|
|
2991 || cmp_max < 0 || cmp_max > 0)
|
|
2992 goto scev_check;
|
|
2993
|
|
2994 goto infinite_check;
|
|
2995 }
|
|
2996
|
|
2997 goto update_range;
|
|
2998
|
|
2999 varying:
|
145
|
3000 vr_result->set_varying (TREE_TYPE (lhs));
|
131
|
3001
|
|
3002 scev_check:
|
|
3003 /* If this is a loop PHI node SCEV may known more about its value-range.
|
|
3004 scev_check can be reached from two paths, one is a fall through from above
|
|
3005 "varying" label, the other is direct goto from code block which tries to
|
|
3006 avoid infinite simulation. */
|
|
3007 if (scev_initialized_p ()
|
|
3008 && (l = loop_containing_stmt (phi))
|
|
3009 && l->header == gimple_bb (phi))
|
|
3010 adjust_range_with_scev (vr_result, l, phi, lhs);
|
|
3011
|
|
3012 infinite_check:
|
|
3013 /* If we will end up with a (-INF, +INF) range, set it to
|
|
3014 VARYING. Same if the previous max value was invalid for
|
|
3015 the type and we end up with vr_result.min > vr_result.max. */
|
|
3016 if ((!vr_result->varying_p () && !vr_result->undefined_p ())
|
|
3017 && !((vrp_val_is_max (vr_result->max ()) && vrp_val_is_min (vr_result->min ()))
|
|
3018 || compare_values (vr_result->min (), vr_result->max ()) > 0))
|
|
3019 ;
|
|
3020 else
|
145
|
3021 vr_result->set_varying (TREE_TYPE (lhs));
|
131
|
3022
|
|
3023 /* If the new range is different than the previous value, keep
|
|
3024 iterating. */
|
|
3025 update_range:
|
|
3026 return;
|
|
3027 }
|
|
3028
|
|
3029 /* Simplify boolean operations if the source is known
|
|
3030 to be already a boolean. */
|
|
3031 bool
|
|
3032 vr_values::simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi,
|
|
3033 gimple *stmt)
|
|
3034 {
|
|
3035 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
|
|
3036 tree lhs, op0, op1;
|
|
3037 bool need_conversion;
|
|
3038
|
|
3039 /* We handle only !=/== case here. */
|
|
3040 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
|
|
3041
|
|
3042 op0 = gimple_assign_rhs1 (stmt);
|
|
3043 if (!op_with_boolean_value_range_p (op0))
|
|
3044 return false;
|
|
3045
|
|
3046 op1 = gimple_assign_rhs2 (stmt);
|
|
3047 if (!op_with_boolean_value_range_p (op1))
|
|
3048 return false;
|
|
3049
|
|
3050 /* Reduce number of cases to handle to NE_EXPR. As there is no
|
|
3051 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
|
|
3052 if (rhs_code == EQ_EXPR)
|
|
3053 {
|
|
3054 if (TREE_CODE (op1) == INTEGER_CST)
|
|
3055 op1 = int_const_binop (BIT_XOR_EXPR, op1,
|
|
3056 build_int_cst (TREE_TYPE (op1), 1));
|
|
3057 else
|
|
3058 return false;
|
|
3059 }
|
|
3060
|
|
3061 lhs = gimple_assign_lhs (stmt);
|
|
3062 need_conversion
|
|
3063 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
|
|
3064
|
|
3065 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
|
|
3066 if (need_conversion
|
|
3067 && !TYPE_UNSIGNED (TREE_TYPE (op0))
|
|
3068 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
|
|
3069 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
|
|
3070 return false;
|
|
3071
|
|
3072 /* For A != 0 we can substitute A itself. */
|
|
3073 if (integer_zerop (op1))
|
|
3074 gimple_assign_set_rhs_with_ops (gsi,
|
|
3075 need_conversion
|
|
3076 ? NOP_EXPR : TREE_CODE (op0), op0);
|
|
3077 /* For A != B we substitute A ^ B. Either with conversion. */
|
|
3078 else if (need_conversion)
|
|
3079 {
|
|
3080 tree tem = make_ssa_name (TREE_TYPE (op0));
|
|
3081 gassign *newop
|
|
3082 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
|
|
3083 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
|
|
3084 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
|
|
3085 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
|
|
3086 set_range_info (tem, VR_RANGE,
|
|
3087 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
|
|
3088 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
|
|
3089 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
|
|
3090 }
|
|
3091 /* Or without. */
|
|
3092 else
|
|
3093 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
|
|
3094 update_stmt (gsi_stmt (*gsi));
|
|
3095 fold_stmt (gsi, follow_single_use_edges);
|
|
3096
|
|
3097 return true;
|
|
3098 }
|
|
3099
|
|
3100 /* Simplify a division or modulo operator to a right shift or bitwise and
|
|
3101 if the first operand is unsigned or is greater than zero and the second
|
|
3102 operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
|
|
3103 constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
|
|
3104 optimize it into just op0 if op0's range is known to be a subset of
|
|
3105 [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
|
|
3106 modulo. */
|
|
3107
|
|
3108 bool
|
|
3109 vr_values::simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi,
|
|
3110 gimple *stmt)
|
|
3111 {
|
|
3112 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
|
|
3113 tree val = NULL;
|
|
3114 tree op0 = gimple_assign_rhs1 (stmt);
|
|
3115 tree op1 = gimple_assign_rhs2 (stmt);
|
|
3116 tree op0min = NULL_TREE, op0max = NULL_TREE;
|
|
3117 tree op1min = op1;
|
145
|
3118 const value_range_equiv *vr = NULL;
|
131
|
3119
|
|
3120 if (TREE_CODE (op0) == INTEGER_CST)
|
|
3121 {
|
|
3122 op0min = op0;
|
|
3123 op0max = op0;
|
|
3124 }
|
|
3125 else
|
|
3126 {
|
|
3127 vr = get_value_range (op0);
|
|
3128 if (range_int_cst_p (vr))
|
|
3129 {
|
|
3130 op0min = vr->min ();
|
|
3131 op0max = vr->max ();
|
|
3132 }
|
|
3133 }
|
|
3134
|
|
3135 if (rhs_code == TRUNC_MOD_EXPR
|
|
3136 && TREE_CODE (op1) == SSA_NAME)
|
|
3137 {
|
145
|
3138 const value_range_equiv *vr1 = get_value_range (op1);
|
131
|
3139 if (range_int_cst_p (vr1))
|
|
3140 op1min = vr1->min ();
|
|
3141 }
|
|
3142 if (rhs_code == TRUNC_MOD_EXPR
|
|
3143 && TREE_CODE (op1min) == INTEGER_CST
|
|
3144 && tree_int_cst_sgn (op1min) == 1
|
|
3145 && op0max
|
|
3146 && tree_int_cst_lt (op0max, op1min))
|
|
3147 {
|
|
3148 if (TYPE_UNSIGNED (TREE_TYPE (op0))
|
|
3149 || tree_int_cst_sgn (op0min) >= 0
|
|
3150 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
|
|
3151 op0min))
|
|
3152 {
|
|
3153 /* If op0 already has the range op0 % op1 has,
|
|
3154 then TRUNC_MOD_EXPR won't change anything. */
|
|
3155 gimple_assign_set_rhs_from_tree (gsi, op0);
|
|
3156 return true;
|
|
3157 }
|
|
3158 }
|
|
3159
|
|
3160 if (TREE_CODE (op0) != SSA_NAME)
|
|
3161 return false;
|
|
3162
|
|
3163 if (!integer_pow2p (op1))
|
|
3164 {
|
|
3165 /* X % -Y can be only optimized into X % Y either if
|
|
3166 X is not INT_MIN, or Y is not -1. Fold it now, as after
|
|
3167 remove_range_assertions the range info might be not available
|
|
3168 anymore. */
|
|
3169 if (rhs_code == TRUNC_MOD_EXPR
|
|
3170 && fold_stmt (gsi, follow_single_use_edges))
|
|
3171 return true;
|
|
3172 return false;
|
|
3173 }
|
|
3174
|
|
3175 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
|
|
3176 val = integer_one_node;
|
|
3177 else
|
|
3178 {
|
|
3179 bool sop = false;
|
|
3180
|
|
3181 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
|
|
3182
|
|
3183 if (val
|
|
3184 && sop
|
|
3185 && integer_onep (val)
|
|
3186 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
|
|
3187 {
|
|
3188 location_t location;
|
|
3189
|
|
3190 if (!gimple_has_location (stmt))
|
|
3191 location = input_location;
|
|
3192 else
|
|
3193 location = gimple_location (stmt);
|
|
3194 warning_at (location, OPT_Wstrict_overflow,
|
|
3195 "assuming signed overflow does not occur when "
|
|
3196 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
|
|
3197 }
|
|
3198 }
|
|
3199
|
|
3200 if (val && integer_onep (val))
|
|
3201 {
|
|
3202 tree t;
|
|
3203
|
|
3204 if (rhs_code == TRUNC_DIV_EXPR)
|
|
3205 {
|
|
3206 t = build_int_cst (integer_type_node, tree_log2 (op1));
|
|
3207 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
|
|
3208 gimple_assign_set_rhs1 (stmt, op0);
|
|
3209 gimple_assign_set_rhs2 (stmt, t);
|
|
3210 }
|
|
3211 else
|
|
3212 {
|
|
3213 t = build_int_cst (TREE_TYPE (op1), 1);
|
|
3214 t = int_const_binop (MINUS_EXPR, op1, t);
|
|
3215 t = fold_convert (TREE_TYPE (op0), t);
|
|
3216
|
|
3217 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
|
|
3218 gimple_assign_set_rhs1 (stmt, op0);
|
|
3219 gimple_assign_set_rhs2 (stmt, t);
|
|
3220 }
|
|
3221
|
|
3222 update_stmt (stmt);
|
|
3223 fold_stmt (gsi, follow_single_use_edges);
|
|
3224 return true;
|
|
3225 }
|
|
3226
|
|
3227 return false;
|
|
3228 }
|
|
3229
|
|
3230 /* Simplify a min or max if the ranges of the two operands are
|
|
3231 disjoint. Return true if we do simplify. */
|
|
3232
|
|
3233 bool
|
|
3234 vr_values::simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi,
|
|
3235 gimple *stmt)
|
|
3236 {
|
|
3237 tree op0 = gimple_assign_rhs1 (stmt);
|
|
3238 tree op1 = gimple_assign_rhs2 (stmt);
|
|
3239 bool sop = false;
|
|
3240 tree val;
|
|
3241
|
|
3242 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
|
|
3243 (LE_EXPR, op0, op1, &sop));
|
|
3244 if (!val)
|
|
3245 {
|
|
3246 sop = false;
|
|
3247 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
|
|
3248 (LT_EXPR, op0, op1, &sop));
|
|
3249 }
|
|
3250
|
|
3251 if (val)
|
|
3252 {
|
|
3253 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
|
|
3254 {
|
|
3255 location_t location;
|
|
3256
|
|
3257 if (!gimple_has_location (stmt))
|
|
3258 location = input_location;
|
|
3259 else
|
|
3260 location = gimple_location (stmt);
|
|
3261 warning_at (location, OPT_Wstrict_overflow,
|
|
3262 "assuming signed overflow does not occur when "
|
|
3263 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
|
|
3264 }
|
|
3265
|
|
3266 /* VAL == TRUE -> OP0 < or <= op1
|
|
3267 VAL == FALSE -> OP0 > or >= op1. */
|
|
3268 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
|
|
3269 == integer_zerop (val)) ? op0 : op1;
|
|
3270 gimple_assign_set_rhs_from_tree (gsi, res);
|
|
3271 return true;
|
|
3272 }
|
|
3273
|
|
3274 return false;
|
|
3275 }
|
|
3276
|
|
3277 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
|
|
3278 ABS_EXPR. If the operand is <= 0, then simplify the
|
|
3279 ABS_EXPR into a NEGATE_EXPR. */
|
|
3280
|
|
3281 bool
|
|
3282 vr_values::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
|
|
3283 {
|
|
3284 tree op = gimple_assign_rhs1 (stmt);
|
145
|
3285 const value_range_equiv *vr = get_value_range (op);
|
131
|
3286
|
|
3287 if (vr)
|
|
3288 {
|
|
3289 tree val = NULL;
|
|
3290 bool sop = false;
|
|
3291
|
|
3292 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
|
|
3293 if (!val)
|
|
3294 {
|
|
3295 /* The range is neither <= 0 nor > 0. Now see if it is
|
|
3296 either < 0 or >= 0. */
|
|
3297 sop = false;
|
|
3298 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
|
|
3299 &sop);
|
|
3300 }
|
|
3301
|
|
3302 if (val)
|
|
3303 {
|
|
3304 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
|
|
3305 {
|
|
3306 location_t location;
|
|
3307
|
|
3308 if (!gimple_has_location (stmt))
|
|
3309 location = input_location;
|
|
3310 else
|
|
3311 location = gimple_location (stmt);
|
|
3312 warning_at (location, OPT_Wstrict_overflow,
|
|
3313 "assuming signed overflow does not occur when "
|
|
3314 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
|
|
3315 }
|
|
3316
|
|
3317 gimple_assign_set_rhs1 (stmt, op);
|
|
3318 if (integer_zerop (val))
|
|
3319 gimple_assign_set_rhs_code (stmt, SSA_NAME);
|
|
3320 else
|
|
3321 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
|
|
3322 update_stmt (stmt);
|
|
3323 fold_stmt (gsi, follow_single_use_edges);
|
|
3324 return true;
|
|
3325 }
|
|
3326 }
|
|
3327
|
|
3328 return false;
|
|
3329 }
|
|
3330
|
145
|
3331 /* value_range wrapper for wi_set_zero_nonzero_bits.
|
|
3332
|
|
3333 Return TRUE if VR was a constant range and we were able to compute
|
|
3334 the bit masks. */
|
|
3335
|
|
3336 static bool
|
|
3337 vr_set_zero_nonzero_bits (const tree expr_type,
|
|
3338 const value_range *vr,
|
|
3339 wide_int *may_be_nonzero,
|
|
3340 wide_int *must_be_nonzero)
|
|
3341 {
|
|
3342 if (range_int_cst_p (vr))
|
|
3343 {
|
|
3344 wi_set_zero_nonzero_bits (expr_type,
|
|
3345 wi::to_wide (vr->min ()),
|
|
3346 wi::to_wide (vr->max ()),
|
|
3347 *may_be_nonzero, *must_be_nonzero);
|
|
3348 return true;
|
|
3349 }
|
|
3350 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
|
|
3351 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
|
|
3352 return false;
|
|
3353 }
|
|
3354
|
131
|
3355 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
|
|
3356 If all the bits that are being cleared by & are already
|
|
3357 known to be zero from VR, or all the bits that are being
|
|
3358 set by | are already known to be one from VR, the bit
|
|
3359 operation is redundant. */
|
|
3360
|
|
3361 bool
|
|
3362 vr_values::simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi,
|
|
3363 gimple *stmt)
|
|
3364 {
|
|
3365 tree op0 = gimple_assign_rhs1 (stmt);
|
|
3366 tree op1 = gimple_assign_rhs2 (stmt);
|
|
3367 tree op = NULL_TREE;
|
|
3368 value_range vr0, vr1;
|
|
3369 wide_int may_be_nonzero0, may_be_nonzero1;
|
|
3370 wide_int must_be_nonzero0, must_be_nonzero1;
|
|
3371 wide_int mask;
|
|
3372
|
|
3373 if (TREE_CODE (op0) == SSA_NAME)
|
|
3374 vr0 = *(get_value_range (op0));
|
|
3375 else if (is_gimple_min_invariant (op0))
|
145
|
3376 vr0.set (op0);
|
131
|
3377 else
|
|
3378 return false;
|
|
3379
|
|
3380 if (TREE_CODE (op1) == SSA_NAME)
|
|
3381 vr1 = *(get_value_range (op1));
|
|
3382 else if (is_gimple_min_invariant (op1))
|
145
|
3383 vr1.set (op1);
|
131
|
3384 else
|
|
3385 return false;
|
|
3386
|
145
|
3387 if (!vr_set_zero_nonzero_bits (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
|
|
3388 &must_be_nonzero0))
|
131
|
3389 return false;
|
145
|
3390 if (!vr_set_zero_nonzero_bits (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
|
|
3391 &must_be_nonzero1))
|
131
|
3392 return false;
|
|
3393
|
|
3394 switch (gimple_assign_rhs_code (stmt))
|
|
3395 {
|
|
3396 case BIT_AND_EXPR:
|
|
3397 mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
|
|
3398 if (mask == 0)
|
|
3399 {
|
|
3400 op = op0;
|
|
3401 break;
|
|
3402 }
|
|
3403 mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
|
|
3404 if (mask == 0)
|
|
3405 {
|
|
3406 op = op1;
|
|
3407 break;
|
|
3408 }
|
|
3409 break;
|
|
3410 case BIT_IOR_EXPR:
|
|
3411 mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
|
|
3412 if (mask == 0)
|
|
3413 {
|
|
3414 op = op1;
|
|
3415 break;
|
|
3416 }
|
|
3417 mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
|
|
3418 if (mask == 0)
|
|
3419 {
|
|
3420 op = op0;
|
|
3421 break;
|
|
3422 }
|
|
3423 break;
|
|
3424 default:
|
|
3425 gcc_unreachable ();
|
|
3426 }
|
|
3427
|
|
3428 if (op == NULL_TREE)
|
|
3429 return false;
|
|
3430
|
|
3431 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
|
|
3432 update_stmt (gsi_stmt (*gsi));
|
|
3433 return true;
|
|
3434 }
|
|
3435
|
|
3436 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
|
|
3437 a known value range VR.
|
|
3438
|
|
3439 If there is one and only one value which will satisfy the
|
|
3440 conditional, then return that value. Else return NULL.
|
|
3441
|
|
3442 If signed overflow must be undefined for the value to satisfy
|
|
3443 the conditional, then set *STRICT_OVERFLOW_P to true. */
|
|
3444
|
|
3445 static tree
|
|
3446 test_for_singularity (enum tree_code cond_code, tree op0,
|
145
|
3447 tree op1, const value_range_equiv *vr)
|
131
|
3448 {
|
|
3449 tree min = NULL;
|
|
3450 tree max = NULL;
|
|
3451
|
|
3452 /* Extract minimum/maximum values which satisfy the conditional as it was
|
|
3453 written. */
|
|
3454 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
|
|
3455 {
|
|
3456 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
|
|
3457
|
|
3458 max = op1;
|
|
3459 if (cond_code == LT_EXPR)
|
|
3460 {
|
|
3461 tree one = build_int_cst (TREE_TYPE (op0), 1);
|
|
3462 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
|
|
3463 /* Signal to compare_values_warnv this expr doesn't overflow. */
|
|
3464 if (EXPR_P (max))
|
|
3465 TREE_NO_WARNING (max) = 1;
|
|
3466 }
|
|
3467 }
|
|
3468 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
|
|
3469 {
|
|
3470 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
|
|
3471
|
|
3472 min = op1;
|
|
3473 if (cond_code == GT_EXPR)
|
|
3474 {
|
|
3475 tree one = build_int_cst (TREE_TYPE (op0), 1);
|
|
3476 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
|
|
3477 /* Signal to compare_values_warnv this expr doesn't overflow. */
|
|
3478 if (EXPR_P (min))
|
|
3479 TREE_NO_WARNING (min) = 1;
|
|
3480 }
|
|
3481 }
|
|
3482
|
|
3483 /* Now refine the minimum and maximum values using any
|
|
3484 value range information we have for op0. */
|
|
3485 if (min && max)
|
|
3486 {
|
|
3487 if (compare_values (vr->min (), min) == 1)
|
|
3488 min = vr->min ();
|
|
3489 if (compare_values (vr->max (), max) == -1)
|
|
3490 max = vr->max ();
|
|
3491
|
|
3492 /* If the new min/max values have converged to a single value,
|
|
3493 then there is only one value which can satisfy the condition,
|
|
3494 return that value. */
|
|
3495 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
|
|
3496 return min;
|
|
3497 }
|
|
3498 return NULL;
|
|
3499 }
|
|
3500
|
|
3501 /* Return whether the value range *VR fits in an integer type specified
|
|
3502 by PRECISION and UNSIGNED_P. */
|
|
3503
|
|
3504 static bool
|
145
|
3505 range_fits_type_p (const value_range_equiv *vr,
|
|
3506 unsigned dest_precision, signop dest_sgn)
|
131
|
3507 {
|
|
3508 tree src_type;
|
|
3509 unsigned src_precision;
|
|
3510 widest_int tem;
|
|
3511 signop src_sgn;
|
|
3512
|
|
3513 /* We can only handle integral and pointer types. */
|
|
3514 src_type = vr->type ();
|
|
3515 if (!INTEGRAL_TYPE_P (src_type)
|
|
3516 && !POINTER_TYPE_P (src_type))
|
|
3517 return false;
|
|
3518
|
|
3519 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
|
|
3520 and so is an identity transform. */
|
|
3521 src_precision = TYPE_PRECISION (vr->type ());
|
|
3522 src_sgn = TYPE_SIGN (src_type);
|
|
3523 if ((src_precision < dest_precision
|
|
3524 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
|
|
3525 || (src_precision == dest_precision && src_sgn == dest_sgn))
|
|
3526 return true;
|
|
3527
|
|
3528 /* Now we can only handle ranges with constant bounds. */
|
|
3529 if (!range_int_cst_p (vr))
|
|
3530 return false;
|
|
3531
|
|
3532 /* For sign changes, the MSB of the wide_int has to be clear.
|
|
3533 An unsigned value with its MSB set cannot be represented by
|
|
3534 a signed wide_int, while a negative value cannot be represented
|
|
3535 by an unsigned wide_int. */
|
|
3536 if (src_sgn != dest_sgn
|
|
3537 && (wi::lts_p (wi::to_wide (vr->min ()), 0)
|
|
3538 || wi::lts_p (wi::to_wide (vr->max ()), 0)))
|
|
3539 return false;
|
|
3540
|
|
3541 /* Then we can perform the conversion on both ends and compare
|
|
3542 the result for equality. */
|
|
3543 tem = wi::ext (wi::to_widest (vr->min ()), dest_precision, dest_sgn);
|
|
3544 if (tem != wi::to_widest (vr->min ()))
|
|
3545 return false;
|
|
3546 tem = wi::ext (wi::to_widest (vr->max ()), dest_precision, dest_sgn);
|
|
3547 if (tem != wi::to_widest (vr->max ()))
|
|
3548 return false;
|
|
3549
|
|
3550 return true;
|
|
3551 }
|
|
3552
|
|
3553 /* Simplify a conditional using a relational operator to an equality
|
|
3554 test if the range information indicates only one value can satisfy
|
|
3555 the original conditional. */
|
|
3556
|
|
3557 bool
|
|
3558 vr_values::simplify_cond_using_ranges_1 (gcond *stmt)
|
|
3559 {
|
|
3560 tree op0 = gimple_cond_lhs (stmt);
|
|
3561 tree op1 = gimple_cond_rhs (stmt);
|
|
3562 enum tree_code cond_code = gimple_cond_code (stmt);
|
|
3563
|
|
3564 if (cond_code != NE_EXPR
|
|
3565 && cond_code != EQ_EXPR
|
|
3566 && TREE_CODE (op0) == SSA_NAME
|
|
3567 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
|
|
3568 && is_gimple_min_invariant (op1))
|
|
3569 {
|
145
|
3570 const value_range_equiv *vr = get_value_range (op0);
|
131
|
3571
|
|
3572 /* If we have range information for OP0, then we might be
|
|
3573 able to simplify this conditional. */
|
|
3574 if (vr->kind () == VR_RANGE)
|
|
3575 {
|
|
3576 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
|
|
3577 if (new_tree)
|
|
3578 {
|
|
3579 if (dump_file)
|
|
3580 {
|
|
3581 fprintf (dump_file, "Simplified relational ");
|
|
3582 print_gimple_stmt (dump_file, stmt, 0);
|
|
3583 fprintf (dump_file, " into ");
|
|
3584 }
|
|
3585
|
|
3586 gimple_cond_set_code (stmt, EQ_EXPR);
|
|
3587 gimple_cond_set_lhs (stmt, op0);
|
|
3588 gimple_cond_set_rhs (stmt, new_tree);
|
|
3589
|
|
3590 update_stmt (stmt);
|
|
3591
|
|
3592 if (dump_file)
|
|
3593 {
|
|
3594 print_gimple_stmt (dump_file, stmt, 0);
|
|
3595 fprintf (dump_file, "\n");
|
|
3596 }
|
|
3597
|
|
3598 return true;
|
|
3599 }
|
|
3600
|
|
3601 /* Try again after inverting the condition. We only deal
|
|
3602 with integral types here, so no need to worry about
|
|
3603 issues with inverting FP comparisons. */
|
|
3604 new_tree = test_for_singularity
|
|
3605 (invert_tree_comparison (cond_code, false),
|
|
3606 op0, op1, vr);
|
|
3607 if (new_tree)
|
|
3608 {
|
|
3609 if (dump_file)
|
|
3610 {
|
|
3611 fprintf (dump_file, "Simplified relational ");
|
|
3612 print_gimple_stmt (dump_file, stmt, 0);
|
|
3613 fprintf (dump_file, " into ");
|
|
3614 }
|
|
3615
|
|
3616 gimple_cond_set_code (stmt, NE_EXPR);
|
|
3617 gimple_cond_set_lhs (stmt, op0);
|
|
3618 gimple_cond_set_rhs (stmt, new_tree);
|
|
3619
|
|
3620 update_stmt (stmt);
|
|
3621
|
|
3622 if (dump_file)
|
|
3623 {
|
|
3624 print_gimple_stmt (dump_file, stmt, 0);
|
|
3625 fprintf (dump_file, "\n");
|
|
3626 }
|
|
3627
|
|
3628 return true;
|
|
3629 }
|
|
3630 }
|
|
3631 }
|
|
3632 return false;
|
|
3633 }
|
|
3634
|
|
3635 /* STMT is a conditional at the end of a basic block.
|
|
3636
|
|
3637 If the conditional is of the form SSA_NAME op constant and the SSA_NAME
|
|
3638 was set via a type conversion, try to replace the SSA_NAME with the RHS
|
|
3639 of the type conversion. Doing so makes the conversion dead which helps
|
|
3640 subsequent passes. */
|
|
3641
|
|
3642 void
|
|
3643 vr_values::simplify_cond_using_ranges_2 (gcond *stmt)
|
|
3644 {
|
|
3645 tree op0 = gimple_cond_lhs (stmt);
|
|
3646 tree op1 = gimple_cond_rhs (stmt);
|
|
3647
|
|
3648 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
|
|
3649 see if OP0 was set by a type conversion where the source of
|
|
3650 the conversion is another SSA_NAME with a range that fits
|
|
3651 into the range of OP0's type.
|
|
3652
|
|
3653 If so, the conversion is redundant as the earlier SSA_NAME can be
|
|
3654 used for the comparison directly if we just massage the constant in the
|
|
3655 comparison. */
|
|
3656 if (TREE_CODE (op0) == SSA_NAME
|
|
3657 && TREE_CODE (op1) == INTEGER_CST)
|
|
3658 {
|
|
3659 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
|
|
3660 tree innerop;
|
|
3661
|
|
3662 if (!is_gimple_assign (def_stmt)
|
|
3663 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
|
|
3664 return;
|
|
3665
|
|
3666 innerop = gimple_assign_rhs1 (def_stmt);
|
|
3667
|
|
3668 if (TREE_CODE (innerop) == SSA_NAME
|
|
3669 && !POINTER_TYPE_P (TREE_TYPE (innerop))
|
|
3670 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
|
|
3671 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
|
|
3672 {
|
145
|
3673 const value_range_equiv *vr = get_value_range (innerop);
|
131
|
3674
|
|
3675 if (range_int_cst_p (vr)
|
|
3676 && range_fits_type_p (vr,
|
|
3677 TYPE_PRECISION (TREE_TYPE (op0)),
|
|
3678 TYPE_SIGN (TREE_TYPE (op0)))
|
|
3679 && int_fits_type_p (op1, TREE_TYPE (innerop)))
|
|
3680 {
|
|
3681 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
|
|
3682 gimple_cond_set_lhs (stmt, innerop);
|
|
3683 gimple_cond_set_rhs (stmt, newconst);
|
|
3684 update_stmt (stmt);
|
|
3685 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
3686 {
|
|
3687 fprintf (dump_file, "Folded into: ");
|
|
3688 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
|
|
3689 fprintf (dump_file, "\n");
|
|
3690 }
|
|
3691 }
|
|
3692 }
|
|
3693 }
|
|
3694 }
|
|
3695
|
|
3696 /* Simplify a switch statement using the value range of the switch
|
|
3697 argument. */
|
|
3698
|
|
3699 bool
|
|
3700 vr_values::simplify_switch_using_ranges (gswitch *stmt)
|
|
3701 {
|
|
3702 tree op = gimple_switch_index (stmt);
|
145
|
3703 const value_range_equiv *vr = NULL;
|
131
|
3704 bool take_default;
|
|
3705 edge e;
|
|
3706 edge_iterator ei;
|
|
3707 size_t i = 0, j = 0, n, n2;
|
|
3708 tree vec2;
|
|
3709 switch_update su;
|
|
3710 size_t k = 1, l = 0;
|
|
3711
|
|
3712 if (TREE_CODE (op) == SSA_NAME)
|
|
3713 {
|
|
3714 vr = get_value_range (op);
|
|
3715
|
|
3716 /* We can only handle integer ranges. */
|
|
3717 if (vr->varying_p ()
|
|
3718 || vr->undefined_p ()
|
|
3719 || vr->symbolic_p ())
|
|
3720 return false;
|
|
3721
|
|
3722 /* Find case label for min/max of the value range. */
|
|
3723 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
|
|
3724 }
|
|
3725 else if (TREE_CODE (op) == INTEGER_CST)
|
|
3726 {
|
|
3727 take_default = !find_case_label_index (stmt, 1, op, &i);
|
|
3728 if (take_default)
|
|
3729 {
|
|
3730 i = 1;
|
|
3731 j = 0;
|
|
3732 }
|
|
3733 else
|
|
3734 {
|
|
3735 j = i;
|
|
3736 }
|
|
3737 }
|
|
3738 else
|
|
3739 return false;
|
|
3740
|
|
3741 n = gimple_switch_num_labels (stmt);
|
|
3742
|
|
3743 /* We can truncate the case label ranges that partially overlap with OP's
|
|
3744 value range. */
|
|
3745 size_t min_idx = 1, max_idx = 0;
|
|
3746 if (vr != NULL)
|
|
3747 find_case_label_range (stmt, vr->min (), vr->max (), &min_idx, &max_idx);
|
|
3748 if (min_idx <= max_idx)
|
|
3749 {
|
|
3750 tree min_label = gimple_switch_label (stmt, min_idx);
|
|
3751 tree max_label = gimple_switch_label (stmt, max_idx);
|
|
3752
|
|
3753 /* Avoid changing the type of the case labels when truncating. */
|
|
3754 tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
|
|
3755 tree vr_min = fold_convert (case_label_type, vr->min ());
|
|
3756 tree vr_max = fold_convert (case_label_type, vr->max ());
|
|
3757
|
|
3758 if (vr->kind () == VR_RANGE)
|
|
3759 {
|
|
3760 /* If OP's value range is [2,8] and the low label range is
|
|
3761 0 ... 3, truncate the label's range to 2 .. 3. */
|
|
3762 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
|
|
3763 && CASE_HIGH (min_label) != NULL_TREE
|
|
3764 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
|
|
3765 CASE_LOW (min_label) = vr_min;
|
|
3766
|
|
3767 /* If OP's value range is [2,8] and the high label range is
|
|
3768 7 ... 10, truncate the label's range to 7 .. 8. */
|
|
3769 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
|
|
3770 && CASE_HIGH (max_label) != NULL_TREE
|
|
3771 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
|
|
3772 CASE_HIGH (max_label) = vr_max;
|
|
3773 }
|
|
3774 else if (vr->kind () == VR_ANTI_RANGE)
|
|
3775 {
|
|
3776 tree one_cst = build_one_cst (case_label_type);
|
|
3777
|
|
3778 if (min_label == max_label)
|
|
3779 {
|
|
3780 /* If OP's value range is ~[7,8] and the label's range is
|
|
3781 7 ... 10, truncate the label's range to 9 ... 10. */
|
|
3782 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
|
|
3783 && CASE_HIGH (min_label) != NULL_TREE
|
|
3784 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
|
|
3785 CASE_LOW (min_label)
|
|
3786 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
|
|
3787
|
|
3788 /* If OP's value range is ~[7,8] and the label's range is
|
|
3789 5 ... 8, truncate the label's range to 5 ... 6. */
|
|
3790 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
|
|
3791 && CASE_HIGH (min_label) != NULL_TREE
|
|
3792 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
|
|
3793 CASE_HIGH (min_label)
|
|
3794 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
|
|
3795 }
|
|
3796 else
|
|
3797 {
|
|
3798 /* If OP's value range is ~[2,8] and the low label range is
|
|
3799 0 ... 3, truncate the label's range to 0 ... 1. */
|
|
3800 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
|
|
3801 && CASE_HIGH (min_label) != NULL_TREE
|
|
3802 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
|
|
3803 CASE_HIGH (min_label)
|
|
3804 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
|
|
3805
|
|
3806 /* If OP's value range is ~[2,8] and the high label range is
|
|
3807 7 ... 10, truncate the label's range to 9 ... 10. */
|
|
3808 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
|
|
3809 && CASE_HIGH (max_label) != NULL_TREE
|
|
3810 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
|
|
3811 CASE_LOW (max_label)
|
|
3812 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
|
|
3813 }
|
|
3814 }
|
|
3815
|
|
3816 /* Canonicalize singleton case ranges. */
|
|
3817 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
|
|
3818 CASE_HIGH (min_label) = NULL_TREE;
|
|
3819 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
|
|
3820 CASE_HIGH (max_label) = NULL_TREE;
|
|
3821 }
|
|
3822
|
|
3823 /* We can also eliminate case labels that lie completely outside OP's value
|
|
3824 range. */
|
|
3825
|
|
3826 /* Bail out if this is just all edges taken. */
|
|
3827 if (i == 1
|
|
3828 && j == n - 1
|
|
3829 && take_default)
|
|
3830 return false;
|
|
3831
|
|
3832 /* Build a new vector of taken case labels. */
|
|
3833 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
|
|
3834 n2 = 0;
|
|
3835
|
|
3836 /* Add the default edge, if necessary. */
|
|
3837 if (take_default)
|
|
3838 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
|
|
3839
|
|
3840 for (; i <= j; ++i, ++n2)
|
|
3841 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
|
|
3842
|
|
3843 for (; k <= l; ++k, ++n2)
|
|
3844 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
|
|
3845
|
|
3846 /* Mark needed edges. */
|
|
3847 for (i = 0; i < n2; ++i)
|
|
3848 {
|
|
3849 e = find_edge (gimple_bb (stmt),
|
|
3850 label_to_block (cfun,
|
|
3851 CASE_LABEL (TREE_VEC_ELT (vec2, i))));
|
|
3852 e->aux = (void *)-1;
|
|
3853 }
|
|
3854
|
|
3855 /* Queue not needed edges for later removal. */
|
|
3856 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
|
|
3857 {
|
|
3858 if (e->aux == (void *)-1)
|
|
3859 {
|
|
3860 e->aux = NULL;
|
|
3861 continue;
|
|
3862 }
|
|
3863
|
|
3864 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
3865 {
|
|
3866 fprintf (dump_file, "removing unreachable case label\n");
|
|
3867 }
|
|
3868 to_remove_edges.safe_push (e);
|
|
3869 e->flags &= ~EDGE_EXECUTABLE;
|
|
3870 e->flags |= EDGE_IGNORE;
|
|
3871 }
|
|
3872
|
|
3873 /* And queue an update for the stmt. */
|
|
3874 su.stmt = stmt;
|
|
3875 su.vec = vec2;
|
|
3876 to_update_switch_stmts.safe_push (su);
|
|
3877 return false;
|
|
3878 }
|
|
3879
|
|
3880 void
|
|
3881 vr_values::cleanup_edges_and_switches (void)
|
|
3882 {
|
|
3883 int i;
|
|
3884 edge e;
|
|
3885 switch_update *su;
|
|
3886
|
|
3887 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
|
|
3888 CFG in a broken state and requires a cfg_cleanup run. */
|
|
3889 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
|
|
3890 remove_edge (e);
|
|
3891
|
|
3892 /* Update SWITCH_EXPR case label vector. */
|
|
3893 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
|
|
3894 {
|
|
3895 size_t j;
|
|
3896 size_t n = TREE_VEC_LENGTH (su->vec);
|
|
3897 tree label;
|
|
3898 gimple_switch_set_num_labels (su->stmt, n);
|
|
3899 for (j = 0; j < n; j++)
|
|
3900 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
|
|
3901 /* As we may have replaced the default label with a regular one
|
|
3902 make sure to make it a real default label again. This ensures
|
|
3903 optimal expansion. */
|
|
3904 label = gimple_switch_label (su->stmt, 0);
|
|
3905 CASE_LOW (label) = NULL_TREE;
|
|
3906 CASE_HIGH (label) = NULL_TREE;
|
|
3907 }
|
|
3908
|
|
3909 if (!to_remove_edges.is_empty ())
|
|
3910 {
|
|
3911 free_dominance_info (CDI_DOMINATORS);
|
|
3912 loops_state_set (LOOPS_NEED_FIXUP);
|
|
3913 }
|
|
3914
|
|
3915 to_remove_edges.release ();
|
|
3916 to_update_switch_stmts.release ();
|
|
3917 }
|
|
3918
|
|
3919 /* Simplify an integral conversion from an SSA name in STMT. */
|
|
3920
|
|
3921 static bool
|
|
3922 simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
|
|
3923 {
|
|
3924 tree innerop, middleop, finaltype;
|
|
3925 gimple *def_stmt;
|
|
3926 signop inner_sgn, middle_sgn, final_sgn;
|
|
3927 unsigned inner_prec, middle_prec, final_prec;
|
|
3928 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
|
|
3929
|
|
3930 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
|
|
3931 if (!INTEGRAL_TYPE_P (finaltype))
|
|
3932 return false;
|
|
3933 middleop = gimple_assign_rhs1 (stmt);
|
|
3934 def_stmt = SSA_NAME_DEF_STMT (middleop);
|
|
3935 if (!is_gimple_assign (def_stmt)
|
|
3936 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
|
|
3937 return false;
|
|
3938 innerop = gimple_assign_rhs1 (def_stmt);
|
|
3939 if (TREE_CODE (innerop) != SSA_NAME
|
|
3940 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
|
|
3941 return false;
|
|
3942
|
|
3943 /* Get the value-range of the inner operand. Use get_range_info in
|
|
3944 case innerop was created during substitute-and-fold. */
|
|
3945 wide_int imin, imax;
|
|
3946 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
|
|
3947 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
|
|
3948 return false;
|
|
3949 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
|
|
3950 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
|
|
3951
|
|
3952 /* Simulate the conversion chain to check if the result is equal if
|
|
3953 the middle conversion is removed. */
|
|
3954 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
|
|
3955 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
|
|
3956 final_prec = TYPE_PRECISION (finaltype);
|
|
3957
|
|
3958 /* If the first conversion is not injective, the second must not
|
|
3959 be widening. */
|
|
3960 if (wi::gtu_p (innermax - innermin,
|
|
3961 wi::mask <widest_int> (middle_prec, false))
|
|
3962 && middle_prec < final_prec)
|
|
3963 return false;
|
|
3964 /* We also want a medium value so that we can track the effect that
|
|
3965 narrowing conversions with sign change have. */
|
|
3966 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
|
|
3967 if (inner_sgn == UNSIGNED)
|
|
3968 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
|
|
3969 else
|
|
3970 innermed = 0;
|
|
3971 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
|
|
3972 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
|
|
3973 innermed = innermin;
|
|
3974
|
|
3975 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
|
|
3976 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
|
|
3977 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
|
|
3978 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
|
|
3979
|
|
3980 /* Require that the final conversion applied to both the original
|
|
3981 and the intermediate range produces the same result. */
|
|
3982 final_sgn = TYPE_SIGN (finaltype);
|
|
3983 if (wi::ext (middlemin, final_prec, final_sgn)
|
|
3984 != wi::ext (innermin, final_prec, final_sgn)
|
|
3985 || wi::ext (middlemed, final_prec, final_sgn)
|
|
3986 != wi::ext (innermed, final_prec, final_sgn)
|
|
3987 || wi::ext (middlemax, final_prec, final_sgn)
|
|
3988 != wi::ext (innermax, final_prec, final_sgn))
|
|
3989 return false;
|
|
3990
|
|
3991 gimple_assign_set_rhs1 (stmt, innerop);
|
|
3992 fold_stmt (gsi, follow_single_use_edges);
|
|
3993 return true;
|
|
3994 }
|
|
3995
|
|
3996 /* Simplify a conversion from integral SSA name to float in STMT. */
|
|
3997
|
|
3998 bool
|
|
3999 vr_values::simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
|
|
4000 gimple *stmt)
|
|
4001 {
|
|
4002 tree rhs1 = gimple_assign_rhs1 (stmt);
|
145
|
4003 const value_range_equiv *vr = get_value_range (rhs1);
|
131
|
4004 scalar_float_mode fltmode
|
|
4005 = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
|
|
4006 scalar_int_mode mode;
|
|
4007 tree tem;
|
|
4008 gassign *conv;
|
|
4009
|
|
4010 /* We can only handle constant ranges. */
|
|
4011 if (!range_int_cst_p (vr))
|
|
4012 return false;
|
|
4013
|
|
4014 /* First check if we can use a signed type in place of an unsigned. */
|
|
4015 scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
|
|
4016 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
|
|
4017 && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
|
|
4018 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
|
|
4019 mode = rhs_mode;
|
|
4020 /* If we can do the conversion in the current input mode do nothing. */
|
|
4021 else if (can_float_p (fltmode, rhs_mode,
|
|
4022 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
|
|
4023 return false;
|
|
4024 /* Otherwise search for a mode we can use, starting from the narrowest
|
|
4025 integer mode available. */
|
|
4026 else
|
|
4027 {
|
|
4028 mode = NARROWEST_INT_MODE;
|
|
4029 for (;;)
|
|
4030 {
|
|
4031 /* If we cannot do a signed conversion to float from mode
|
|
4032 or if the value-range does not fit in the signed type
|
|
4033 try with a wider mode. */
|
|
4034 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
|
|
4035 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
|
|
4036 break;
|
|
4037
|
|
4038 /* But do not widen the input. Instead leave that to the
|
|
4039 optabs expansion code. */
|
|
4040 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
|
|
4041 || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
|
|
4042 return false;
|
|
4043 }
|
|
4044 }
|
|
4045
|
|
4046 /* It works, insert a truncation or sign-change before the
|
|
4047 float conversion. */
|
|
4048 tem = make_ssa_name (build_nonstandard_integer_type
|
|
4049 (GET_MODE_PRECISION (mode), 0));
|
|
4050 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
|
|
4051 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
|
|
4052 gimple_assign_set_rhs1 (stmt, tem);
|
|
4053 fold_stmt (gsi, follow_single_use_edges);
|
|
4054
|
|
4055 return true;
|
|
4056 }
|
|
4057
|
|
4058 /* Simplify an internal fn call using ranges if possible. */
|
|
4059
|
|
4060 bool
|
|
4061 vr_values::simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi,
|
|
4062 gimple *stmt)
|
|
4063 {
|
|
4064 enum tree_code subcode;
|
|
4065 bool is_ubsan = false;
|
|
4066 bool ovf = false;
|
|
4067 switch (gimple_call_internal_fn (stmt))
|
|
4068 {
|
|
4069 case IFN_UBSAN_CHECK_ADD:
|
|
4070 subcode = PLUS_EXPR;
|
|
4071 is_ubsan = true;
|
|
4072 break;
|
|
4073 case IFN_UBSAN_CHECK_SUB:
|
|
4074 subcode = MINUS_EXPR;
|
|
4075 is_ubsan = true;
|
|
4076 break;
|
|
4077 case IFN_UBSAN_CHECK_MUL:
|
|
4078 subcode = MULT_EXPR;
|
|
4079 is_ubsan = true;
|
|
4080 break;
|
|
4081 case IFN_ADD_OVERFLOW:
|
|
4082 subcode = PLUS_EXPR;
|
|
4083 break;
|
|
4084 case IFN_SUB_OVERFLOW:
|
|
4085 subcode = MINUS_EXPR;
|
|
4086 break;
|
|
4087 case IFN_MUL_OVERFLOW:
|
|
4088 subcode = MULT_EXPR;
|
|
4089 break;
|
|
4090 default:
|
|
4091 return false;
|
|
4092 }
|
|
4093
|
|
4094 tree op0 = gimple_call_arg (stmt, 0);
|
|
4095 tree op1 = gimple_call_arg (stmt, 1);
|
|
4096 tree type;
|
|
4097 if (is_ubsan)
|
|
4098 {
|
|
4099 type = TREE_TYPE (op0);
|
|
4100 if (VECTOR_TYPE_P (type))
|
|
4101 return false;
|
|
4102 }
|
|
4103 else if (gimple_call_lhs (stmt) == NULL_TREE)
|
|
4104 return false;
|
|
4105 else
|
|
4106 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
|
|
4107 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
|
|
4108 || (is_ubsan && ovf))
|
|
4109 return false;
|
|
4110
|
|
4111 gimple *g;
|
|
4112 location_t loc = gimple_location (stmt);
|
|
4113 if (is_ubsan)
|
|
4114 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
|
|
4115 else
|
|
4116 {
|
|
4117 int prec = TYPE_PRECISION (type);
|
|
4118 tree utype = type;
|
|
4119 if (ovf
|
|
4120 || !useless_type_conversion_p (type, TREE_TYPE (op0))
|
|
4121 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
|
|
4122 utype = build_nonstandard_integer_type (prec, 1);
|
|
4123 if (TREE_CODE (op0) == INTEGER_CST)
|
|
4124 op0 = fold_convert (utype, op0);
|
|
4125 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
|
|
4126 {
|
|
4127 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
|
|
4128 gimple_set_location (g, loc);
|
|
4129 gsi_insert_before (gsi, g, GSI_SAME_STMT);
|
|
4130 op0 = gimple_assign_lhs (g);
|
|
4131 }
|
|
4132 if (TREE_CODE (op1) == INTEGER_CST)
|
|
4133 op1 = fold_convert (utype, op1);
|
|
4134 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
|
|
4135 {
|
|
4136 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
|
|
4137 gimple_set_location (g, loc);
|
|
4138 gsi_insert_before (gsi, g, GSI_SAME_STMT);
|
|
4139 op1 = gimple_assign_lhs (g);
|
|
4140 }
|
|
4141 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
|
|
4142 gimple_set_location (g, loc);
|
|
4143 gsi_insert_before (gsi, g, GSI_SAME_STMT);
|
|
4144 if (utype != type)
|
|
4145 {
|
|
4146 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
|
|
4147 gimple_assign_lhs (g));
|
|
4148 gimple_set_location (g, loc);
|
|
4149 gsi_insert_before (gsi, g, GSI_SAME_STMT);
|
|
4150 }
|
|
4151 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
|
|
4152 gimple_assign_lhs (g),
|
|
4153 build_int_cst (type, ovf));
|
|
4154 }
|
|
4155 gimple_set_location (g, loc);
|
|
4156 gsi_replace (gsi, g, false);
|
|
4157 return true;
|
|
4158 }
|
|
4159
|
|
4160 /* Return true if VAR is a two-valued variable. Set a and b with the
|
|
4161 two-values when it is true. Return false otherwise. */
|
|
4162
|
|
4163 bool
|
|
4164 vr_values::two_valued_val_range_p (tree var, tree *a, tree *b)
|
|
4165 {
|
145
|
4166 const value_range_equiv *vr = get_value_range (var);
|
131
|
4167 if (vr->varying_p ()
|
|
4168 || vr->undefined_p ()
|
|
4169 || TREE_CODE (vr->min ()) != INTEGER_CST
|
|
4170 || TREE_CODE (vr->max ()) != INTEGER_CST)
|
|
4171 return false;
|
|
4172
|
|
4173 if (vr->kind () == VR_RANGE
|
|
4174 && wi::to_wide (vr->max ()) - wi::to_wide (vr->min ()) == 1)
|
|
4175 {
|
|
4176 *a = vr->min ();
|
|
4177 *b = vr->max ();
|
|
4178 return true;
|
|
4179 }
|
|
4180
|
|
4181 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
|
|
4182 if (vr->kind () == VR_ANTI_RANGE
|
|
4183 && (wi::to_wide (vr->min ())
|
|
4184 - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
|
|
4185 && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
|
|
4186 - wi::to_wide (vr->max ())) == 1)
|
|
4187 {
|
|
4188 *a = vrp_val_min (TREE_TYPE (var));
|
|
4189 *b = vrp_val_max (TREE_TYPE (var));
|
|
4190 return true;
|
|
4191 }
|
|
4192
|
|
4193 return false;
|
|
4194 }
|
|
4195
|
|
4196 /* Simplify STMT using ranges if possible. */
|
|
4197
|
|
4198 bool
|
|
4199 vr_values::simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
|
|
4200 {
|
|
4201 gimple *stmt = gsi_stmt (*gsi);
|
|
4202 if (is_gimple_assign (stmt))
|
|
4203 {
|
|
4204 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
|
|
4205 tree rhs1 = gimple_assign_rhs1 (stmt);
|
|
4206 tree rhs2 = gimple_assign_rhs2 (stmt);
|
|
4207 tree lhs = gimple_assign_lhs (stmt);
|
|
4208 tree val1 = NULL_TREE, val2 = NULL_TREE;
|
|
4209 use_operand_p use_p;
|
|
4210 gimple *use_stmt;
|
|
4211
|
|
4212 /* Convert:
|
|
4213 LHS = CST BINOP VAR
|
|
4214 Where VAR is two-valued and LHS is used in GIMPLE_COND only
|
|
4215 To:
|
|
4216 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
|
|
4217
|
|
4218 Also handles:
|
|
4219 LHS = VAR BINOP CST
|
|
4220 Where VAR is two-valued and LHS is used in GIMPLE_COND only
|
|
4221 To:
|
|
4222 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
|
|
4223
|
|
4224 if (TREE_CODE_CLASS (rhs_code) == tcc_binary
|
|
4225 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
|
|
4226 && ((TREE_CODE (rhs1) == INTEGER_CST
|
|
4227 && TREE_CODE (rhs2) == SSA_NAME)
|
|
4228 || (TREE_CODE (rhs2) == INTEGER_CST
|
|
4229 && TREE_CODE (rhs1) == SSA_NAME))
|
|
4230 && single_imm_use (lhs, &use_p, &use_stmt)
|
|
4231 && gimple_code (use_stmt) == GIMPLE_COND)
|
|
4232
|
|
4233 {
|
|
4234 tree new_rhs1 = NULL_TREE;
|
|
4235 tree new_rhs2 = NULL_TREE;
|
|
4236 tree cmp_var = NULL_TREE;
|
|
4237
|
|
4238 if (TREE_CODE (rhs2) == SSA_NAME
|
|
4239 && two_valued_val_range_p (rhs2, &val1, &val2))
|
|
4240 {
|
|
4241 /* Optimize RHS1 OP [VAL1, VAL2]. */
|
|
4242 new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
|
|
4243 new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
|
|
4244 cmp_var = rhs2;
|
|
4245 }
|
|
4246 else if (TREE_CODE (rhs1) == SSA_NAME
|
|
4247 && two_valued_val_range_p (rhs1, &val1, &val2))
|
|
4248 {
|
|
4249 /* Optimize [VAL1, VAL2] OP RHS2. */
|
|
4250 new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
|
|
4251 new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
|
|
4252 cmp_var = rhs1;
|
|
4253 }
|
|
4254
|
|
4255 /* If we could not find two-vals or the optimzation is invalid as
|
|
4256 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
|
|
4257 if (new_rhs1 && new_rhs2)
|
|
4258 {
|
|
4259 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
|
|
4260 gimple_assign_set_rhs_with_ops (gsi,
|
|
4261 COND_EXPR, cond,
|
|
4262 new_rhs1,
|
|
4263 new_rhs2);
|
|
4264 update_stmt (gsi_stmt (*gsi));
|
|
4265 fold_stmt (gsi, follow_single_use_edges);
|
|
4266 return true;
|
|
4267 }
|
|
4268 }
|
|
4269
|
|
4270 switch (rhs_code)
|
|
4271 {
|
|
4272 case EQ_EXPR:
|
|
4273 case NE_EXPR:
|
|
4274 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
|
|
4275 if the RHS is zero or one, and the LHS are known to be boolean
|
|
4276 values. */
|
|
4277 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4278 return simplify_truth_ops_using_ranges (gsi, stmt);
|
|
4279 break;
|
|
4280
|
|
4281 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
|
|
4282 and BIT_AND_EXPR respectively if the first operand is greater
|
|
4283 than zero and the second operand is an exact power of two.
|
|
4284 Also optimize TRUNC_MOD_EXPR away if the second operand is
|
|
4285 constant and the first operand already has the right value
|
|
4286 range. */
|
|
4287 case TRUNC_DIV_EXPR:
|
|
4288 case TRUNC_MOD_EXPR:
|
|
4289 if ((TREE_CODE (rhs1) == SSA_NAME
|
|
4290 || TREE_CODE (rhs1) == INTEGER_CST)
|
|
4291 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4292 return simplify_div_or_mod_using_ranges (gsi, stmt);
|
|
4293 break;
|
|
4294
|
|
4295 /* Transform ABS (X) into X or -X as appropriate. */
|
|
4296 case ABS_EXPR:
|
|
4297 if (TREE_CODE (rhs1) == SSA_NAME
|
|
4298 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4299 return simplify_abs_using_ranges (gsi, stmt);
|
|
4300 break;
|
|
4301
|
|
4302 case BIT_AND_EXPR:
|
|
4303 case BIT_IOR_EXPR:
|
|
4304 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
|
|
4305 if all the bits being cleared are already cleared or
|
|
4306 all the bits being set are already set. */
|
|
4307 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4308 return simplify_bit_ops_using_ranges (gsi, stmt);
|
|
4309 break;
|
|
4310
|
|
4311 CASE_CONVERT:
|
|
4312 if (TREE_CODE (rhs1) == SSA_NAME
|
|
4313 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4314 return simplify_conversion_using_ranges (gsi, stmt);
|
|
4315 break;
|
|
4316
|
|
4317 case FLOAT_EXPR:
|
|
4318 if (TREE_CODE (rhs1) == SSA_NAME
|
|
4319 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
|
|
4320 return simplify_float_conversion_using_ranges (gsi, stmt);
|
|
4321 break;
|
|
4322
|
|
4323 case MIN_EXPR:
|
|
4324 case MAX_EXPR:
|
|
4325 return simplify_min_or_max_using_ranges (gsi, stmt);
|
|
4326
|
|
4327 default:
|
|
4328 break;
|
|
4329 }
|
|
4330 }
|
|
4331 else if (gimple_code (stmt) == GIMPLE_COND)
|
|
4332 return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
|
|
4333 else if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
4334 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
|
|
4335 else if (is_gimple_call (stmt)
|
|
4336 && gimple_call_internal_p (stmt))
|
|
4337 return simplify_internal_call_using_ranges (gsi, stmt);
|
|
4338
|
|
4339 return false;
|
|
4340 }
|
|
4341
|
145
|
4342 /* Set the lattice entry for VAR to VR. */
|
|
4343
|
131
|
4344 void
|
145
|
4345 vr_values::set_vr_value (tree var, value_range_equiv *vr)
|
131
|
4346 {
|
|
4347 if (SSA_NAME_VERSION (var) >= num_vr_values)
|
|
4348 return;
|
|
4349 vr_value[SSA_NAME_VERSION (var)] = vr;
|
|
4350 }
|
|
4351
|
145
|
4352 /* Swap the lattice entry for VAR with VR and return the old entry. */
|
|
4353
|
|
4354 value_range_equiv *
|
|
4355 vr_values::swap_vr_value (tree var, value_range_equiv *vr)
|
|
4356 {
|
|
4357 if (SSA_NAME_VERSION (var) >= num_vr_values)
|
|
4358 return NULL;
|
|
4359 std::swap (vr_value[SSA_NAME_VERSION (var)], vr);
|
|
4360 return vr;
|
|
4361 }
|