Attachment 'avrp20080316.patch'
Download 1 Index: tree-vrp.c
2 ===================================================================
3 --- tree-vrp.c (revision 131599)
4 +++ tree-vrp.c (working copy)
5 @@ -79,25 +79,25 @@
6
7 /* If bit I is present, it means that SSA name N_i has a list of
8 assertions that should be inserted in the IL. */
9 -static bitmap need_assert_for;
10 +bitmap need_assert_for;
11
12 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
13 holds a list of ASSERT_LOCUS_T nodes that describe where
14 ASSERT_EXPRs for SSA name N_I should be inserted. */
15 -static assert_locus_t *asserts_for;
16 +assert_locus_t *asserts_for;
17
18 /* Set of blocks visited in find_assert_locations. Used to avoid
19 visiting the same block more than once. */
20 -static sbitmap blocks_visited;
21 +sbitmap blocks_visited;
22
23 /* Value range array. After propagation, VR_VALUE[I] holds the range
24 of values that SSA name N_I may take. */
25 -static value_range_t **vr_value;
26 +value_range_t **vr_value;
27
28 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
29 number of executable edges we saw the last time we visited the
30 node. */
31 -static int *vr_phi_edge_counts;
32 +int *vr_phi_edge_counts;
33
34
35 /* Return whether TYPE should use an overflow infinity distinct from
36 @@ -3825,7 +3825,8 @@
37 /* Traverse the strictly dominated sub-graph rooted at E->DEST
38 to determine if any of the operands in the conditional
39 predicate are used. */
40 - need_assert |= find_assert_locations (e->dest);
41 + if (e->dest != bb)
42 + need_assert |= find_assert_locations (e->dest);
43
44 /* Register the necessary assertions for each operand in the
45 conditional predicate. */
46 Index: tree-ssa-threadupdate.c
47 ===================================================================
48 --- tree-ssa-threadupdate.c (revision 131599)
49 +++ tree-ssa-threadupdate.c (working copy)
50 @@ -1103,3 +1103,118 @@
51 VEC_safe_push (edge, heap, threaded_edges, e);
52 VEC_safe_push (edge, heap, threaded_edges, e2);
53 }
54 +
55 +bool
56 +thread_through_all_blocks_1 (bool may_peel_loop_headers)
57 +{
58 + bool retval = false;
59 + unsigned int i;
60 + bool cfg_altered = false;
61 + bitmap threaded_blocks;
62 + bool tmp;
63 + tmp = may_peel_loop_headers;
64 +
65 + /* We must know about loops in order to preserve them. */
66 + gcc_assert (current_loops != NULL);
67 +
68 + if (threaded_edges == NULL)
69 + return false;
70 +
71 + threaded_blocks = BITMAP_ALLOC (NULL);
72 + initialize_original_copy_tables ();
73 +
74 + for (i = 0; i < VEC_length (edge, threaded_edges); i += 2)
75 + {
76 + basic_block bbchild, bbparent, bbtmp;
77 + bitmap_iterator bi1;
78 + unsigned i1;
79 + VEC (basic_block, heap) *blocks = VEC_alloc(basic_block, heap, 20);
80 + edge e = VEC_index (edge, threaded_edges, i);
81 + edge e2 = VEC_index (edge, threaded_edges, i + 1);
82 + bbparent = e->dest;
83 + bbchild = e2->src;
84 + VEC_safe_push(basic_block, heap, blocks, bbchild);
85 + bitmap_set_bit(threaded_blocks, bbchild->index);
86 + bitmap_set_bit(threaded_blocks, bbparent->index);
87 + if (bbchild != bbparent)
88 + while (VEC_length(basic_block, blocks) != 0) {
89 + basic_block bb = VEC_pop (basic_block, blocks);
90 + edge e1;
91 + edge_iterator ei;
92 + FOR_EACH_EDGE (e1, ei, bb->preds) {
93 + if (bitmap_bit_p(threaded_blocks, e1->src->index))
94 + continue;
95 + VEC_safe_push(basic_block, heap, blocks, e1->src);
96 + bitmap_set_bit(threaded_blocks, e1->src->index);
97 + }
98 +
99 + }
100 +
101 + EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i1, bi1)
102 + {
103 + basic_block bb = BASIC_BLOCK(i1);
104 + duplicate_block(bb, NULL, NULL);
105 + cfg_altered = true;
106 + }
107 +
108 + EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i1, bi1)
109 + {
110 + basic_block bb = BASIC_BLOCK(i1);
111 + bbtmp = get_bb_copy(bb);
112 + if (bb == bbparent) {
113 + edge e3;
114 + bbtmp->count = e->count;
115 + bbtmp->frequency = EDGE_FREQUENCY (e);
116 + /* EDGE_SUCC (bbtmp, 0)->count += e->count;
117 + e->dest->frequency -= EDGE_FREQUENCY(e);*/
118 + e3 = redirect_edge_and_branch(e, bbtmp);
119 + flush_pending_stmts (e3);
120 +
121 + }
122 + if (bb == bbchild) {
123 + edge e3;
124 + /*bbtmp->count = 0;*/
125 + remove_ctrl_stmt_and_useless_edges(bbtmp, NULL);
126 + e3 = make_edge (bbtmp, e2->dest, EDGE_FALLTHRU);
127 + /*
128 + if (single_succ_p(bb)){
129 + remove_ctrl_stmt_and_useless_edges(bb, NULL);
130 + }
131 + else {
132 + remove_edge(e2);
133 + }*/
134 +
135 + e3->probability = REG_BR_PROB_BASE;
136 + e3->dest->frequency = EDGE_FREQUENCY(e3);
137 + e3->count = bbtmp->count;
138 + }
139 +
140 + {
141 + edge e1;
142 + edge_iterator ei;
143 + FOR_EACH_EDGE (e1, ei, bbtmp->succs) {
144 + if (bitmap_bit_p(threaded_blocks, e1->dest->index)){
145 + edge e3;
146 + /* e1->dest->frequency -= EDGE_FREQUENCY(e1);*/
147 + e3 = redirect_edge_and_branch(e1, get_bb_copy(e1->dest));
148 + flush_pending_stmts (e3);
149 + e3->dest->frequency += EDGE_FREQUENCY(e3);
150 + }
151 +
152 + }
153 + }
154 +
155 + }
156 +
157 + VEC_free (basic_block, heap, blocks);
158 + bitmap_zero(threaded_blocks);
159 + }
160 +
161 + retval = cfg_altered;
162 + free_original_copy_tables ();
163 + BITMAP_FREE (threaded_blocks);
164 + threaded_blocks = NULL;
165 + VEC_free(edge, heap, threaded_edges);
166 + threaded_edges = NULL;
167 + return retval;
168 +}
169 Index: tree-avrp.c
170 ===================================================================
171 --- tree-avrp.c (revision 0)
172 +++ tree-avrp.c (revision 0)
173 @@ -0,0 +1,5682 @@
174 +/* Support routines for Value Range Propagation (VRP).
175 + Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
176 + Contributed by Diego Novillo <dnovillo@redhat.com>.
177 +
178 +This file is part of GCC.
179 +
180 +GCC is free software; you can redistribute it and/or modify
181 +it under the terms of the GNU General Public License as published by
182 +the Free Software Foundation; either version 3, or (at your option)
183 +any later version.
184 +
185 +GCC is distributed in the hope that it will be useful,
186 +but WITHOUT ANY WARRANTY; without even the implied warranty of
187 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
188 +GNU General Public License for more details.
189 +
190 +You should have received a copy of the GNU General Public License
191 +along with GCC; see the file COPYING3. If not see
192 +<http://www.gnu.org/licenses/>. */
193 +
194 +#include "config.h"
195 +#include "system.h"
196 +#include "coretypes.h"
197 +#include "tm.h"
198 +#include "ggc.h"
199 +#include "flags.h"
200 +#include "tree.h"
201 +#include "basic-block.h"
202 +#include "tree-flow.h"
203 +#include "tree-pass.h"
204 +#include "tree-dump.h"
205 +#include "timevar.h"
206 +#include "diagnostic.h"
207 +#include "toplev.h"
208 +#include "intl.h"
209 +#include "cfgloop.h"
210 +#include "tree-scalar-evolution.h"
211 +#include "tree-ssa-propagate.h"
212 +#include "tree-chrec.h"
213 +
214 +/* Set of SSA names found during the dominator traversal of a
215 + sub-graph in find_assert_locations. */
216 +static sbitmap found_in_subgraph;
217 +
218 +/* Local functions. */
219 +static int compare_values (tree val1, tree val2);
220 +static int compare_values_warnv (tree val1, tree val2, bool *);
221 +static void vrp_meet (value_range_t *, value_range_t *);
222 +static tree vrp_evaluate_conditional_warnv (tree, bool, bool *);
223 +
224 +/* Location information for ASSERT_EXPRs. Each instance of this
225 + structure describes an ASSERT_EXPR for an SSA name. Since a single
226 + SSA name may have more than one assertion associated with it, these
227 + locations are kept in a linked list attached to the corresponding
228 + SSA name. */
229 +struct assert_locus_d
230 +{
231 + /* Basic block where the assertion would be inserted. */
232 + basic_block bb;
233 +
234 + /* Some assertions need to be inserted on an edge (e.g., assertions
235 + generated by COND_EXPRs). In those cases, BB will be NULL. */
236 + edge e;
237 +
238 + /* Pointer to the statement that generated this assertion. */
239 + block_stmt_iterator si;
240 +
241 + /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
242 + enum tree_code comp_code;
243 +
244 + /* Value being compared against. */
245 + tree val;
246 +
247 + /* Next node in the linked list. */
248 + struct assert_locus_d *next;
249 +};
250 +
251 +typedef struct assert_locus_d *assert_locus_t;
252 +
253 +/* If bit I is present, it means that SSA name N_i has a list of
254 + assertions that should be inserted in the IL. */
255 +extern bitmap need_assert_for;
256 +
257 +/* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
258 + holds a list of ASSERT_LOCUS_T nodes that describe where
259 + ASSERT_EXPRs for SSA name N_I should be inserted. */
260 +extern assert_locus_t *asserts_for;
261 +
262 +/* Set of blocks visited in find_assert_locations. Used to avoid
263 + visiting the same block more than once. */
264 +extern sbitmap blocks_visited;
265 +
266 +/* Value range array. After propagation, VR_VALUE[I] holds the range
267 + of values that SSA name N_I may take. */
268 +extern value_range_t **vr_value;
269 +
270 +/* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
271 + number of executable edges we saw the last time we visited the
272 + node. */
273 +extern int *vr_phi_edge_counts;
274 +
275 +
276 +/* Return whether TYPE should use an overflow infinity distinct from
277 + TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
278 + represent a signed overflow during VRP computations. An infinity
279 + is distinct from a half-range, which will go from some number to
280 + TYPE_{MIN,MAX}_VALUE. */
281 +
282 +static inline bool
283 +needs_overflow_infinity (const_tree type)
284 +{
285 + return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
286 +}
287 +
288 +/* Return whether TYPE can support our overflow infinity
289 + representation: we use the TREE_OVERFLOW flag, which only exists
290 + for constants. If TYPE doesn't support this, we don't optimize
291 + cases which would require signed overflow--we drop them to
292 + VARYING. */
293 +
294 +static inline bool
295 +supports_overflow_infinity (const_tree type)
296 +{
297 +#ifdef ENABLE_CHECKING
298 + gcc_assert (needs_overflow_infinity (type));
299 +#endif
300 + return (TYPE_MIN_VALUE (type) != NULL_TREE
301 + && CONSTANT_CLASS_P (TYPE_MIN_VALUE (type))
302 + && TYPE_MAX_VALUE (type) != NULL_TREE
303 + && CONSTANT_CLASS_P (TYPE_MAX_VALUE (type)));
304 +}
305 +
306 +/* VAL is the maximum or minimum value of a type. Return a
307 + corresponding overflow infinity. */
308 +
309 +static inline tree
310 +make_overflow_infinity (tree val)
311 +{
312 +#ifdef ENABLE_CHECKING
313 + gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
314 +#endif
315 + val = copy_node (val);
316 + TREE_OVERFLOW (val) = 1;
317 + return val;
318 +}
319 +
320 +/* Return a negative overflow infinity for TYPE. */
321 +
322 +static inline tree
323 +negative_overflow_infinity (tree type)
324 +{
325 +#ifdef ENABLE_CHECKING
326 + gcc_assert (supports_overflow_infinity (type));
327 +#endif
328 + return make_overflow_infinity (TYPE_MIN_VALUE (type));
329 +}
330 +
331 +/* Return a positive overflow infinity for TYPE. */
332 +
333 +static inline tree
334 +positive_overflow_infinity (tree type)
335 +{
336 +#ifdef ENABLE_CHECKING
337 + gcc_assert (supports_overflow_infinity (type));
338 +#endif
339 + return make_overflow_infinity (TYPE_MAX_VALUE (type));
340 +}
341 +
342 +/* Return whether VAL is a negative overflow infinity. */
343 +
344 +static inline bool
345 +is_negative_overflow_infinity (const_tree val)
346 +{
347 + return (needs_overflow_infinity (TREE_TYPE (val))
348 + && CONSTANT_CLASS_P (val)
349 + && TREE_OVERFLOW (val)
350 + && operand_equal_p (val, TYPE_MIN_VALUE (TREE_TYPE (val)), 0));
351 +}
352 +
353 +/* Return whether VAL is a positive overflow infinity. */
354 +
355 +static inline bool
356 +is_positive_overflow_infinity (const_tree val)
357 +{
358 + return (needs_overflow_infinity (TREE_TYPE (val))
359 + && CONSTANT_CLASS_P (val)
360 + && TREE_OVERFLOW (val)
361 + && operand_equal_p (val, TYPE_MAX_VALUE (TREE_TYPE (val)), 0));
362 +}
363 +
364 +/* Return whether VAL is a positive or negative overflow infinity. */
365 +
366 +static inline bool
367 +is_overflow_infinity (const_tree val)
368 +{
369 + return (needs_overflow_infinity (TREE_TYPE (val))
370 + && CONSTANT_CLASS_P (val)
371 + && TREE_OVERFLOW (val)
372 + && (operand_equal_p (val, TYPE_MAX_VALUE (TREE_TYPE (val)), 0)
373 + || operand_equal_p (val, TYPE_MIN_VALUE (TREE_TYPE (val)), 0)));
374 +}
375 +
376 +/* If VAL is now an overflow infinity, return VAL. Otherwise, return
377 + the same value with TREE_OVERFLOW clear. This can be used to avoid
378 + confusing a regular value with an overflow value. */
379 +
380 +static inline tree
381 +avoid_overflow_infinity (tree val)
382 +{
383 + if (!is_overflow_infinity (val))
384 + return val;
385 +
386 + if (operand_equal_p (val, TYPE_MAX_VALUE (TREE_TYPE (val)), 0))
387 + return TYPE_MAX_VALUE (TREE_TYPE (val));
388 + else
389 + {
390 +#ifdef ENABLE_CHECKING
391 + gcc_assert (operand_equal_p (val, TYPE_MIN_VALUE (TREE_TYPE (val)), 0));
392 +#endif
393 + return TYPE_MIN_VALUE (TREE_TYPE (val));
394 + }
395 +}
396 +
397 +
398 +/* Return whether VAL is equal to the maximum value of its type. This
399 + will be true for a positive overflow infinity. We can't do a
400 + simple equality comparison with TYPE_MAX_VALUE because C typedefs
401 + and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
402 + to the integer constant with the same value in the type. */
403 +
404 +static inline bool
405 +vrp_val_is_max (const_tree val)
406 +{
407 + tree type_max = TYPE_MAX_VALUE (TREE_TYPE (val));
408 +
409 + return (val == type_max
410 + || (type_max != NULL_TREE
411 + && operand_equal_p (val, type_max, 0)));
412 +}
413 +
414 +/* Return whether VAL is equal to the minimum value of its type. This
415 + will be true for a negative overflow infinity. */
416 +
417 +static inline bool
418 +vrp_val_is_min (const_tree val)
419 +{
420 + tree type_min = TYPE_MIN_VALUE (TREE_TYPE (val));
421 +
422 + return (val == type_min
423 + || (type_min != NULL_TREE
424 + && operand_equal_p (val, type_min, 0)));
425 +}
426 +
427 +
428 +/* Return true if ARG is marked with the nonnull attribute in the
429 + current function signature. */
430 +
431 +static bool
432 +nonnull_arg_p (const_tree arg)
433 +{
434 + tree t, attrs, fntype;
435 + unsigned HOST_WIDE_INT arg_num;
436 +
437 + gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
438 +
439 + /* The static chain decl is always non null. */
440 + if (arg == cfun->static_chain_decl)
441 + return true;
442 +
443 + fntype = TREE_TYPE (current_function_decl);
444 + attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
445 +
446 + /* If "nonnull" wasn't specified, we know nothing about the argument. */
447 + if (attrs == NULL_TREE)
448 + return false;
449 +
450 + /* If "nonnull" applies to all the arguments, then ARG is non-null. */
451 + if (TREE_VALUE (attrs) == NULL_TREE)
452 + return true;
453 +
454 + /* Get the position number for ARG in the function signature. */
455 + for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
456 + t;
457 + t = TREE_CHAIN (t), arg_num++)
458 + {
459 + if (t == arg)
460 + break;
461 + }
462 +
463 + gcc_assert (t == arg);
464 +
465 + /* Now see if ARG_NUM is mentioned in the nonnull list. */
466 + for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
467 + {
468 + if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
469 + return true;
470 + }
471 +
472 + return false;
473 +}
474 +
475 +
476 +/* Set value range VR to {T, MIN, MAX, EQUIV}. */
477 +
478 +static void
479 +set_value_range (value_range_t *vr, enum value_range_type t, tree min,
480 + tree max, bitmap equiv)
481 +{
482 +#if defined ENABLE_CHECKING
483 + /* Check the validity of the range. */
484 + if (t == VR_RANGE || t == VR_ANTI_RANGE)
485 + {
486 + int cmp;
487 +
488 + gcc_assert (min && max);
489 +
490 + if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
491 + gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
492 +
493 + cmp = compare_values (min, max);
494 + gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
495 +
496 + if (needs_overflow_infinity (TREE_TYPE (min)))
497 + gcc_assert (!is_overflow_infinity (min)
498 + || !is_overflow_infinity (max));
499 + }
500 +
501 + if (t == VR_UNDEFINED || t == VR_VARYING)
502 + gcc_assert (min == NULL_TREE && max == NULL_TREE);
503 +
504 + if (t == VR_UNDEFINED || t == VR_VARYING)
505 + gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
506 +#endif
507 +
508 + vr->type = t;
509 + vr->min = min;
510 + vr->max = max;
511 +
512 + /* Since updating the equivalence set involves deep copying the
513 + bitmaps, only do it if absolutely necessary. */
514 + if (vr->equiv == NULL
515 + && equiv != NULL)
516 + vr->equiv = BITMAP_ALLOC (NULL);
517 +
518 + if (equiv != vr->equiv)
519 + {
520 + if (equiv && !bitmap_empty_p (equiv))
521 + bitmap_copy (vr->equiv, equiv);
522 + else
523 + bitmap_clear (vr->equiv);
524 + }
525 +}
526 +
527 +
528 +/* Copy value range FROM into value range TO. */
529 +
530 +static inline void
531 +copy_value_range (value_range_t *to, value_range_t *from)
532 +{
533 + set_value_range (to, from->type, from->min, from->max, from->equiv);
534 +}
535 +
536 +
537 +/* Set value range VR to VR_VARYING. */
538 +
539 +static inline void
540 +set_value_range_to_varying (value_range_t *vr)
541 +{
542 + vr->type = VR_VARYING;
543 + vr->min = vr->max = NULL_TREE;
544 + if (vr->equiv)
545 + bitmap_clear (vr->equiv);
546 +}
547 +
548 +/* Set value range VR to a single value. This function is only called
549 + with values we get from statements, and exists to clear the
550 + TREE_OVERFLOW flag so that we don't think we have an overflow
551 + infinity when we shouldn't. */
552 +
553 +static inline void
554 +set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
555 +{
556 + gcc_assert (is_gimple_min_invariant (val));
557 + val = avoid_overflow_infinity (val);
558 + set_value_range (vr, VR_RANGE, val, val, equiv);
559 +}
560 +
561 +/* Set value range VR to a non-negative range of type TYPE.
562 + OVERFLOW_INFINITY indicates whether to use an overflow infinity
563 + rather than TYPE_MAX_VALUE; this should be true if we determine
564 + that the range is nonnegative based on the assumption that signed
565 + overflow does not occur. */
566 +
567 +static inline void
568 +set_value_range_to_nonnegative (value_range_t *vr, tree type,
569 + bool overflow_infinity)
570 +{
571 + tree zero;
572 +
573 + if (overflow_infinity && !supports_overflow_infinity (type))
574 + {
575 + set_value_range_to_varying (vr);
576 + return;
577 + }
578 +
579 + zero = build_int_cst (type, 0);
580 + set_value_range (vr, VR_RANGE, zero,
581 + (overflow_infinity
582 + ? positive_overflow_infinity (type)
583 + : TYPE_MAX_VALUE (type)),
584 + vr->equiv);
585 +}
586 +
587 +/* Set value range VR to a non-NULL range of type TYPE. */
588 +
589 +static inline void
590 +set_value_range_to_nonnull (value_range_t *vr, tree type)
591 +{
592 + tree zero = build_int_cst (type, 0);
593 + set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
594 +}
595 +
596 +
597 +/* Set value range VR to a NULL range of type TYPE. */
598 +
599 +static inline void
600 +set_value_range_to_null (value_range_t *vr, tree type)
601 +{
602 + set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
603 +}
604 +
605 +
606 +/* Set value range VR to a range of a truthvalue of type TYPE. */
607 +
608 +static inline void
609 +set_value_range_to_truthvalue (value_range_t *vr, tree type)
610 +{
611 + if (TYPE_PRECISION (type) == 1)
612 + set_value_range_to_varying (vr);
613 + else
614 + set_value_range (vr, VR_RANGE,
615 + build_int_cst (type, 0), build_int_cst (type, 1),
616 + vr->equiv);
617 +}
618 +
619 +
620 +/* Set value range VR to VR_UNDEFINED. */
621 +
622 +static inline void
623 +set_value_range_to_undefined (value_range_t *vr)
624 +{
625 + vr->type = VR_UNDEFINED;
626 + vr->min = vr->max = NULL_TREE;
627 + if (vr->equiv)
628 + bitmap_clear (vr->equiv);
629 +}
630 +
631 +
632 +/* Return value range information for VAR.
633 +
634 + If we have no values ranges recorded (ie, VRP is not running), then
635 + return NULL. Otherwise create an empty range if none existed for VAR. */
636 +
637 +static value_range_t *
638 +get_value_range (const_tree var)
639 +{
640 + value_range_t *vr;
641 + tree sym;
642 + unsigned ver = SSA_NAME_VERSION (var);
643 +
644 + /* If we have no recorded ranges, then return NULL. */
645 + if (! vr_value)
646 + return NULL;
647 +
648 + vr = vr_value[ver];
649 + if (vr)
650 + return vr;
651 +
652 + /* Create a default value range. */
653 + vr_value[ver] = vr = XCNEW (value_range_t);
654 +
655 + /* Defer allocating the equivalence set. */
656 + vr->equiv = NULL;
657 +
658 + /* If VAR is a default definition, the variable can take any value
659 + in VAR's type. */
660 + sym = SSA_NAME_VAR (var);
661 + if (SSA_NAME_IS_DEFAULT_DEF (var))
662 + {
663 + /* Try to use the "nonnull" attribute to create ~[0, 0]
664 + anti-ranges for pointers. Note that this is only valid with
665 + default definitions of PARM_DECLs. */
666 + if (TREE_CODE (sym) == PARM_DECL
667 + && POINTER_TYPE_P (TREE_TYPE (sym))
668 + && nonnull_arg_p (sym))
669 + set_value_range_to_nonnull (vr, TREE_TYPE (sym));
670 + else
671 + set_value_range_to_varying (vr);
672 + }
673 +
674 + return vr;
675 +}
676 +
677 +/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
678 +
679 +static inline bool
680 +vrp_operand_equal_p (const_tree val1, const_tree val2)
681 +{
682 + if (val1 == val2)
683 + return true;
684 + if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
685 + return false;
686 + if (is_overflow_infinity (val1))
687 + return is_overflow_infinity (val2);
688 + return true;
689 +}
690 +
691 +/* Return true, if the bitmaps B1 and B2 are equal. */
692 +
693 +static inline bool
694 +vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
695 +{
696 + return (b1 == b2
697 + || (b1 && b2
698 + && bitmap_equal_p (b1, b2)));
699 +}
700 +
701 +/* Update the value range and equivalence set for variable VAR to
702 + NEW_VR. Return true if NEW_VR is different from VAR's previous
703 + value.
704 +
705 + NOTE: This function assumes that NEW_VR is a temporary value range
706 + object created for the sole purpose of updating VAR's range. The
707 + storage used by the equivalence set from NEW_VR will be freed by
708 + this function. Do not call update_value_range when NEW_VR
709 + is the range object associated with another SSA name. */
710 +
711 +static inline bool
712 +update_value_range (const_tree var, value_range_t *new_vr)
713 +{
714 + value_range_t *old_vr;
715 + bool is_new;
716 +
717 + /* Update the value range, if necessary. */
718 + old_vr = get_value_range (var);
719 + is_new = old_vr->type != new_vr->type
720 + || !vrp_operand_equal_p (old_vr->min, new_vr->min)
721 + || !vrp_operand_equal_p (old_vr->max, new_vr->max)
722 + || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
723 +
724 + if (is_new)
725 + set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
726 + new_vr->equiv);
727 +
728 + BITMAP_FREE (new_vr->equiv);
729 +
730 + return is_new;
731 +}
732 +
733 +
734 +/* Add VAR and VAR's equivalence set to EQUIV. This is the central
735 + point where equivalence processing can be turned on/off. */
736 +
737 +static void
738 +add_equivalence (bitmap *equiv, const_tree var)
739 +{
740 + unsigned ver = SSA_NAME_VERSION (var);
741 + value_range_t *vr = vr_value[ver];
742 +
743 + if (*equiv == NULL)
744 + *equiv = BITMAP_ALLOC (NULL);
745 + bitmap_set_bit (*equiv, ver);
746 + if (vr && vr->equiv)
747 + bitmap_ior_into (*equiv, vr->equiv);
748 +}
749 +
750 +
751 +/* Return true if VR is ~[0, 0]. */
752 +
753 +static inline bool
754 +range_is_nonnull (value_range_t *vr)
755 +{
756 + return vr->type == VR_ANTI_RANGE
757 + && integer_zerop (vr->min)
758 + && integer_zerop (vr->max);
759 +}
760 +
761 +
762 +/* Return true if VR is [0, 0]. */
763 +
764 +static inline bool
765 +range_is_null (value_range_t *vr)
766 +{
767 + return vr->type == VR_RANGE
768 + && integer_zerop (vr->min)
769 + && integer_zerop (vr->max);
770 +}
771 +
772 +
773 +/* Return true if value range VR involves at least one symbol. */
774 +
775 +static inline bool
776 +symbolic_range_p (value_range_t *vr)
777 +{
778 + return (!is_gimple_min_invariant (vr->min)
779 + || !is_gimple_min_invariant (vr->max));
780 +}
781 +
782 +/* Return true if value range VR uses an overflow infinity. */
783 +
784 +static inline bool
785 +overflow_infinity_range_p (value_range_t *vr)
786 +{
787 + return (vr->type == VR_RANGE
788 + && (is_overflow_infinity (vr->min)
789 + || is_overflow_infinity (vr->max)));
790 +}
791 +
792 +/* Return false if we can not make a valid comparison based on VR;
793 + this will be the case if it uses an overflow infinity and overflow
794 + is not undefined (i.e., -fno-strict-overflow is in effect).
795 + Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
796 + uses an overflow infinity. */
797 +
798 +static bool
799 +usable_range_p (value_range_t *vr, bool *strict_overflow_p)
800 +{
801 + gcc_assert (vr->type == VR_RANGE);
802 + if (is_overflow_infinity (vr->min))
803 + {
804 + *strict_overflow_p = true;
805 + if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
806 + return false;
807 + }
808 + if (is_overflow_infinity (vr->max))
809 + {
810 + *strict_overflow_p = true;
811 + if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
812 + return false;
813 + }
814 + return true;
815 +}
816 +
817 +
818 +/* Like tree_expr_nonnegative_warnv_p, but this function uses value
819 + ranges obtained so far. */
820 +
821 +static bool
822 +vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
823 +{
824 + return tree_expr_nonnegative_warnv_p (expr, strict_overflow_p);
825 +}
826 +
827 +/* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
828 + obtained so far. */
829 +
830 +static bool
831 +vrp_expr_computes_nonzero (tree expr, bool *strict_overflow_p)
832 +{
833 + if (tree_expr_nonzero_warnv_p (expr, strict_overflow_p))
834 + return true;
835 +
836 + /* If we have an expression of the form &X->a, then the expression
837 + is nonnull if X is nonnull. */
838 + if (TREE_CODE (expr) == ADDR_EXPR)
839 + {
840 + tree base = get_base_address (TREE_OPERAND (expr, 0));
841 +
842 + if (base != NULL_TREE
843 + && TREE_CODE (base) == INDIRECT_REF
844 + && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
845 + {
846 + value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
847 + if (range_is_nonnull (vr))
848 + return true;
849 + }
850 + }
851 +
852 + return false;
853 +}
854 +
855 +/* Returns true if EXPR is a valid value (as expected by compare_values) --
856 + a gimple invariant, or SSA_NAME +- CST. */
857 +
858 +static bool
859 +valid_value_p (tree expr)
860 +{
861 + if (TREE_CODE (expr) == SSA_NAME)
862 + return true;
863 +
864 + if (TREE_CODE (expr) == PLUS_EXPR
865 + || TREE_CODE (expr) == MINUS_EXPR)
866 + return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
867 + && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
868 +
869 + return is_gimple_min_invariant (expr);
870 +}
871 +
872 +/* Return
873 + 1 if VAL < VAL2
874 + 0 if !(VAL < VAL2)
875 + -2 if those are incomparable. */
876 +static inline int
877 +operand_less_p (tree val, tree val2)
878 +{
879 + /* LT is folded faster than GE and others. Inline the common case. */
880 + if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
881 + {
882 + if (TYPE_UNSIGNED (TREE_TYPE (val)))
883 + return INT_CST_LT_UNSIGNED (val, val2);
884 + else
885 + {
886 + if (INT_CST_LT (val, val2))
887 + return 1;
888 + }
889 + }
890 + else
891 + {
892 + tree tcmp;
893 +
894 + fold_defer_overflow_warnings ();
895 +
896 + tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
897 +
898 + fold_undefer_and_ignore_overflow_warnings ();
899 +
900 + if (!tcmp)
901 + return -2;
902 +
903 + if (!integer_zerop (tcmp))
904 + return 1;
905 + }
906 +
907 + /* val >= val2, not considering overflow infinity. */
908 + if (is_negative_overflow_infinity (val))
909 + return is_negative_overflow_infinity (val2) ? 0 : 1;
910 + else if (is_positive_overflow_infinity (val2))
911 + return is_positive_overflow_infinity (val) ? 0 : 1;
912 +
913 + return 0;
914 +}
915 +
916 +/* Compare two values VAL1 and VAL2. Return
917 +
918 + -2 if VAL1 and VAL2 cannot be compared at compile-time,
919 + -1 if VAL1 < VAL2,
920 + 0 if VAL1 == VAL2,
921 + +1 if VAL1 > VAL2, and
922 + +2 if VAL1 != VAL2
923 +
924 + This is similar to tree_int_cst_compare but supports pointer values
925 + and values that cannot be compared at compile time.
926 +
927 + If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
928 + true if the return value is only valid if we assume that signed
929 + overflow is undefined. */
930 +
931 +static int
932 +compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
933 +{
934 + if (val1 == val2)
935 + return 0;
936 +
937 + /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
938 + both integers. */
939 + gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
940 + == POINTER_TYPE_P (TREE_TYPE (val2)));
941 + /* Convert the two values into the same type. This is needed because
942 + sizetype causes sign extension even for unsigned types. */
943 + val2 = fold_convert (TREE_TYPE (val1), val2);
944 + STRIP_USELESS_TYPE_CONVERSION (val2);
945 +
946 + if ((TREE_CODE (val1) == SSA_NAME
947 + || TREE_CODE (val1) == PLUS_EXPR
948 + || TREE_CODE (val1) == MINUS_EXPR)
949 + && (TREE_CODE (val2) == SSA_NAME
950 + || TREE_CODE (val2) == PLUS_EXPR
951 + || TREE_CODE (val2) == MINUS_EXPR))
952 + {
953 + tree n1, c1, n2, c2;
954 + enum tree_code code1, code2;
955 +
956 + /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
957 + return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
958 + same name, return -2. */
959 + if (TREE_CODE (val1) == SSA_NAME)
960 + {
961 + code1 = SSA_NAME;
962 + n1 = val1;
963 + c1 = NULL_TREE;
964 + }
965 + else
966 + {
967 + code1 = TREE_CODE (val1);
968 + n1 = TREE_OPERAND (val1, 0);
969 + c1 = TREE_OPERAND (val1, 1);
970 + if (tree_int_cst_sgn (c1) == -1)
971 + {
972 + if (is_negative_overflow_infinity (c1))
973 + return -2;
974 + c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
975 + if (!c1)
976 + return -2;
977 + code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
978 + }
979 + }
980 +
981 + if (TREE_CODE (val2) == SSA_NAME)
982 + {
983 + code2 = SSA_NAME;
984 + n2 = val2;
985 + c2 = NULL_TREE;
986 + }
987 + else
988 + {
989 + code2 = TREE_CODE (val2);
990 + n2 = TREE_OPERAND (val2, 0);
991 + c2 = TREE_OPERAND (val2, 1);
992 + if (tree_int_cst_sgn (c2) == -1)
993 + {
994 + if (is_negative_overflow_infinity (c2))
995 + return -2;
996 + c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
997 + if (!c2)
998 + return -2;
999 + code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1000 + }
1001 + }
1002 +
1003 + /* Both values must use the same name. */
1004 + if (n1 != n2)
1005 + return -2;
1006 +
1007 + if (code1 == SSA_NAME
1008 + && code2 == SSA_NAME)
1009 + /* NAME == NAME */
1010 + return 0;
1011 +
1012 + /* If overflow is defined we cannot simplify more. */
1013 + if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1014 + return -2;
1015 +
1016 + if (strict_overflow_p != NULL
1017 + && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1018 + && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1019 + *strict_overflow_p = true;
1020 +
1021 + if (code1 == SSA_NAME)
1022 + {
1023 + if (code2 == PLUS_EXPR)
1024 + /* NAME < NAME + CST */
1025 + return -1;
1026 + else if (code2 == MINUS_EXPR)
1027 + /* NAME > NAME - CST */
1028 + return 1;
1029 + }
1030 + else if (code1 == PLUS_EXPR)
1031 + {
1032 + if (code2 == SSA_NAME)
1033 + /* NAME + CST > NAME */
1034 + return 1;
1035 + else if (code2 == PLUS_EXPR)
1036 + /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1037 + return compare_values_warnv (c1, c2, strict_overflow_p);
1038 + else if (code2 == MINUS_EXPR)
1039 + /* NAME + CST1 > NAME - CST2 */
1040 + return 1;
1041 + }
1042 + else if (code1 == MINUS_EXPR)
1043 + {
1044 + if (code2 == SSA_NAME)
1045 + /* NAME - CST < NAME */
1046 + return -1;
1047 + else if (code2 == PLUS_EXPR)
1048 + /* NAME - CST1 < NAME + CST2 */
1049 + return -1;
1050 + else if (code2 == MINUS_EXPR)
1051 + /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1052 + C1 and C2 are swapped in the call to compare_values. */
1053 + return compare_values_warnv (c2, c1, strict_overflow_p);
1054 + }
1055 +
1056 + gcc_unreachable ();
1057 + }
1058 +
1059 + /* We cannot compare non-constants. */
1060 + if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1061 + return -2;
1062 +
1063 + if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1064 + {
1065 + /* We cannot compare overflowed values, except for overflow
1066 + infinities. */
1067 + if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1068 + {
1069 + if (strict_overflow_p != NULL)
1070 + *strict_overflow_p = true;
1071 + if (is_negative_overflow_infinity (val1))
1072 + return is_negative_overflow_infinity (val2) ? 0 : -1;
1073 + else if (is_negative_overflow_infinity (val2))
1074 + return 1;
1075 + else if (is_positive_overflow_infinity (val1))
1076 + return is_positive_overflow_infinity (val2) ? 0 : 1;
1077 + else if (is_positive_overflow_infinity (val2))
1078 + return -1;
1079 + return -2;
1080 + }
1081 +
1082 + return tree_int_cst_compare (val1, val2);
1083 + }
1084 + else
1085 + {
1086 + tree t;
1087 +
1088 + /* First see if VAL1 and VAL2 are not the same. */
1089 + if (val1 == val2 || operand_equal_p (val1, val2, 0))
1090 + return 0;
1091 +
1092 + /* If VAL1 is a lower address than VAL2, return -1. */
1093 + if (operand_less_p (val1, val2) == 1)
1094 + return -1;
1095 +
1096 + /* If VAL1 is a higher address than VAL2, return +1. */
1097 + if (operand_less_p (val2, val1) == 1)
1098 + return 1;
1099 +
1100 + /* If VAL1 is different than VAL2, return +2.
1101 + For integer constants we either have already returned -1 or 1
1102 + or they are equivalent. We still might succeed in proving
1103 + something about non-trivial operands. */
1104 + if (TREE_CODE (val1) != INTEGER_CST
1105 + || TREE_CODE (val2) != INTEGER_CST)
1106 + {
1107 + t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1108 + if (t && tree_expr_nonzero_p (t))
1109 + return 2;
1110 + }
1111 +
1112 + return -2;
1113 + }
1114 +}
1115 +
1116 +/* Compare values like compare_values_warnv, but treat comparisons of
1117 + nonconstants which rely on undefined overflow as incomparable. */
1118 +
1119 +static int
1120 +compare_values (tree val1, tree val2)
1121 +{
1122 + bool sop;
1123 + int ret;
1124 +
1125 + sop = false;
1126 + ret = compare_values_warnv (val1, val2, &sop);
1127 + if (sop
1128 + && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1129 + ret = -2;
1130 + return ret;
1131 +}
1132 +
1133 +
1134 +/* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1135 + 0 if VAL is not inside VR,
1136 + -2 if we cannot tell either way.
1137 +
1138 + FIXME, the current semantics of this functions are a bit quirky
1139 + when taken in the context of VRP. In here we do not care
1140 + about VR's type. If VR is the anti-range ~[3, 5] the call
1141 + value_inside_range (4, VR) will return 1.
1142 +
1143 + This is counter-intuitive in a strict sense, but the callers
1144 + currently expect this. They are calling the function
1145 + merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1146 + callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1147 + themselves.
1148 +
1149 + This also applies to value_ranges_intersect_p and
1150 + range_includes_zero_p. The semantics of VR_RANGE and
1151 + VR_ANTI_RANGE should be encoded here, but that also means
1152 + adapting the users of these functions to the new semantics.
1153 +
1154 + Benchmark compile/20001226-1.c compilation time after changing this
1155 + function. */
1156 +
1157 +static inline int
1158 +value_inside_range (tree val, value_range_t * vr)
1159 +{
1160 + int cmp1, cmp2;
1161 +
1162 + cmp1 = operand_less_p (val, vr->min);
1163 + if (cmp1 == -2)
1164 + return -2;
1165 + if (cmp1 == 1)
1166 + return 0;
1167 +
1168 + cmp2 = operand_less_p (vr->max, val);
1169 + if (cmp2 == -2)
1170 + return -2;
1171 +
1172 + return !cmp2;
1173 +}
1174 +
1175 +
1176 +/* Return true if value ranges VR0 and VR1 have a non-empty
1177 + intersection.
1178 +
1179 + Benchmark compile/20001226-1.c compilation time after changing this
1180 + function.
1181 + */
1182 +
1183 +static inline bool
1184 +value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1185 +{
1186 + /* The value ranges do not intersect if the maximum of the first range is
1187 + less than the minimum of the second range or vice versa.
1188 + When those relations are unknown, we can't do any better. */
1189 + if (operand_less_p (vr0->max, vr1->min) != 0)
1190 + return false;
1191 + if (operand_less_p (vr1->max, vr0->min) != 0)
1192 + return false;
1193 + return true;
1194 +}
1195 +
1196 +
1197 +/* Return true if VR includes the value zero, false otherwise. FIXME,
1198 + currently this will return false for an anti-range like ~[-4, 3].
1199 + This will be wrong when the semantics of value_inside_range are
1200 + modified (currently the users of this function expect these
1201 + semantics). */
1202 +
1203 +static inline bool
1204 +range_includes_zero_p (value_range_t *vr)
1205 +{
1206 + tree zero;
1207 +
1208 + gcc_assert (vr->type != VR_UNDEFINED
1209 + && vr->type != VR_VARYING
1210 + && !symbolic_range_p (vr));
1211 +
1212 + zero = build_int_cst (TREE_TYPE (vr->min), 0);
1213 + return (value_inside_range (zero, vr) == 1);
1214 +}
1215 +
1216 +/* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1217 + false otherwise or if no value range information is available. */
1218 +
1219 +
1220 +
1221 +/* Extract value range information from an ASSERT_EXPR EXPR and store
1222 + it in *VR_P. */
1223 +
1224 +static void
1225 +extract_range_from_assert (value_range_t *vr_p, tree expr)
1226 +{
1227 + tree var, cond, limit, min, max, type;
1228 + value_range_t *var_vr, *limit_vr;
1229 + enum tree_code cond_code;
1230 +
1231 + var = ASSERT_EXPR_VAR (expr);
1232 + cond = ASSERT_EXPR_COND (expr);
1233 +
1234 + gcc_assert (COMPARISON_CLASS_P (cond));
1235 +
1236 + /* Find VAR in the ASSERT_EXPR conditional. */
1237 + if (var == TREE_OPERAND (cond, 0))
1238 + {
1239 + /* If the predicate is of the form VAR COMP LIMIT, then we just
1240 + take LIMIT from the RHS and use the same comparison code. */
1241 + limit = TREE_OPERAND (cond, 1);
1242 + cond_code = TREE_CODE (cond);
1243 + }
1244 + else
1245 + {
1246 + /* If the predicate is of the form LIMIT COMP VAR, then we need
1247 + to flip around the comparison code to create the proper range
1248 + for VAR. */
1249 + limit = TREE_OPERAND (cond, 0);
1250 + cond_code = swap_tree_comparison (TREE_CODE (cond));
1251 + }
1252 +
1253 + limit = avoid_overflow_infinity (limit);
1254 +
1255 + type = TREE_TYPE (limit);
1256 + gcc_assert (limit != var);
1257 +
1258 + /* For pointer arithmetic, we only keep track of pointer equality
1259 + and inequality. */
1260 + if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1261 + {
1262 + set_value_range_to_varying (vr_p);
1263 + return;
1264 + }
1265 +
1266 + /* If LIMIT is another SSA name and LIMIT has a range of its own,
1267 + try to use LIMIT's range to avoid creating symbolic ranges
1268 + unnecessarily. */
1269 + limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1270 +
1271 + /* LIMIT's range is only interesting if it has any useful information. */
1272 + if (limit_vr
1273 + && (limit_vr->type == VR_UNDEFINED
1274 + || limit_vr->type == VR_VARYING
1275 + || symbolic_range_p (limit_vr)))
1276 + limit_vr = NULL;
1277 +
1278 + /* Initially, the new range has the same set of equivalences of
1279 + VAR's range. This will be revised before returning the final
1280 + value. Since assertions may be chained via mutually exclusive
1281 + predicates, we will need to trim the set of equivalences before
1282 + we are done. */
1283 + gcc_assert (vr_p->equiv == NULL);
1284 + add_equivalence (&vr_p->equiv, var);
1285 +
1286 + /* Extract a new range based on the asserted comparison for VAR and
1287 + LIMIT's value range. Notice that if LIMIT has an anti-range, we
1288 + will only use it for equality comparisons (EQ_EXPR). For any
1289 + other kind of assertion, we cannot derive a range from LIMIT's
1290 + anti-range that can be used to describe the new range. For
1291 + instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1292 + then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1293 + no single range for x_2 that could describe LE_EXPR, so we might
1294 + as well build the range [b_4, +INF] for it. */
1295 + if (cond_code == EQ_EXPR)
1296 + {
1297 + enum value_range_type range_type;
1298 +
1299 + if (limit_vr)
1300 + {
1301 + range_type = limit_vr->type;
1302 + min = limit_vr->min;
1303 + max = limit_vr->max;
1304 + }
1305 + else
1306 + {
1307 + range_type = VR_RANGE;
1308 + min = limit;
1309 + max = limit;
1310 + }
1311 +
1312 + set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1313 +
1314 + /* When asserting the equality VAR == LIMIT and LIMIT is another
1315 + SSA name, the new range will also inherit the equivalence set
1316 + from LIMIT. */
1317 + if (TREE_CODE (limit) == SSA_NAME)
1318 + add_equivalence (&vr_p->equiv, limit);
1319 + }
1320 + else if (cond_code == NE_EXPR)
1321 + {
1322 + /* As described above, when LIMIT's range is an anti-range and
1323 + this assertion is an inequality (NE_EXPR), then we cannot
1324 + derive anything from the anti-range. For instance, if
1325 + LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1326 + not imply that VAR's range is [0, 0]. So, in the case of
1327 + anti-ranges, we just assert the inequality using LIMIT and
1328 + not its anti-range.
1329 +
1330 + If LIMIT_VR is a range, we can only use it to build a new
1331 + anti-range if LIMIT_VR is a single-valued range. For
1332 + instance, if LIMIT_VR is [0, 1], the predicate
1333 + VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1334 + Rather, it means that for value 0 VAR should be ~[0, 0]
1335 + and for value 1, VAR should be ~[1, 1]. We cannot
1336 + represent these ranges.
1337 +
1338 + The only situation in which we can build a valid
1339 + anti-range is when LIMIT_VR is a single-valued range
1340 + (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1341 + build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1342 + if (limit_vr
1343 + && limit_vr->type == VR_RANGE
1344 + && compare_values (limit_vr->min, limit_vr->max) == 0)
1345 + {
1346 + min = limit_vr->min;
1347 + max = limit_vr->max;
1348 + }
1349 + else
1350 + {
1351 + /* In any other case, we cannot use LIMIT's range to build a
1352 + valid anti-range. */
1353 + min = max = limit;
1354 + }
1355 +
1356 + /* If MIN and MAX cover the whole range for their type, then
1357 + just use the original LIMIT. */
1358 + if (INTEGRAL_TYPE_P (type)
1359 + && vrp_val_is_min (min)
1360 + && vrp_val_is_max (max))
1361 + min = max = limit;
1362 +
1363 + set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1364 + }
1365 + else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1366 + {
1367 + min = TYPE_MIN_VALUE (type);
1368 +
1369 + if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1370 + max = limit;
1371 + else
1372 + {
1373 + /* If LIMIT_VR is of the form [N1, N2], we need to build the
1374 + range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1375 + LT_EXPR. */
1376 + max = limit_vr->max;
1377 + }
1378 +
1379 + /* If the maximum value forces us to be out of bounds, simply punt.
1380 + It would be pointless to try and do anything more since this
1381 + all should be optimized away above us. */
1382 + if ((cond_code == LT_EXPR
1383 + && compare_values (max, min) == 0)
1384 + || is_overflow_infinity (max))
1385 + set_value_range_to_varying (vr_p);
1386 + else
1387 + {
1388 + /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1389 + if (cond_code == LT_EXPR)
1390 + {
1391 + tree one = build_int_cst (type, 1);
1392 + max = fold_build2 (MINUS_EXPR, type, max, one);
1393 + if (EXPR_P (max))
1394 + TREE_NO_WARNING (max) = 1;
1395 + }
1396 +
1397 + set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1398 + }
1399 + }
1400 + else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1401 + {
1402 + max = TYPE_MAX_VALUE (type);
1403 +
1404 + if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1405 + min = limit;
1406 + else
1407 + {
1408 + /* If LIMIT_VR is of the form [N1, N2], we need to build the
1409 + range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1410 + GT_EXPR. */
1411 + min = limit_vr->min;
1412 + }
1413 +
1414 + /* If the minimum value forces us to be out of bounds, simply punt.
1415 + It would be pointless to try and do anything more since this
1416 + all should be optimized away above us. */
1417 + if ((cond_code == GT_EXPR
1418 + && compare_values (min, max) == 0)
1419 + || is_overflow_infinity (min))
1420 + set_value_range_to_varying (vr_p);
1421 + else
1422 + {
1423 + /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1424 + if (cond_code == GT_EXPR)
1425 + {
1426 + tree one = build_int_cst (type, 1);
1427 + min = fold_build2 (PLUS_EXPR, type, min, one);
1428 + if (EXPR_P (min))
1429 + TREE_NO_WARNING (min) = 1;
1430 + }
1431 +
1432 + set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1433 + }
1434 + }
1435 + else
1436 + gcc_unreachable ();
1437 +
1438 + /* If VAR already had a known range, it may happen that the new
1439 + range we have computed and VAR's range are not compatible. For
1440 + instance,
1441 +
1442 + if (p_5 == NULL)
1443 + p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1444 + x_7 = p_6->fld;
1445 + p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1446 +
1447 + While the above comes from a faulty program, it will cause an ICE
1448 + later because p_8 and p_6 will have incompatible ranges and at
1449 + the same time will be considered equivalent. A similar situation
1450 + would arise from
1451 +
1452 + if (i_5 > 10)
1453 + i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1454 + if (i_5 < 5)
1455 + i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1456 +
1457 + Again i_6 and i_7 will have incompatible ranges. It would be
1458 + pointless to try and do anything with i_7's range because
1459 + anything dominated by 'if (i_5 < 5)' will be optimized away.
1460 + Note, due to the wa in which simulation proceeds, the statement
1461 + i_7 = ASSERT_EXPR <...> we would never be visited because the
1462 + conditional 'if (i_5 < 5)' always evaluates to false. However,
1463 + this extra check does not hurt and may protect against future
1464 + changes to VRP that may get into a situation similar to the
1465 + NULL pointer dereference example.
1466 +
1467 + Note that these compatibility tests are only needed when dealing
1468 + with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1469 + are both anti-ranges, they will always be compatible, because two
1470 + anti-ranges will always have a non-empty intersection. */
1471 +
1472 + var_vr = get_value_range (var);
1473 +
1474 + /* We may need to make adjustments when VR_P and VAR_VR are numeric
1475 + ranges or anti-ranges. */
1476 + if (vr_p->type == VR_VARYING
1477 + || vr_p->type == VR_UNDEFINED
1478 + || var_vr->type == VR_VARYING
1479 + || var_vr->type == VR_UNDEFINED
1480 + || symbolic_range_p (vr_p)
1481 + || symbolic_range_p (var_vr))
1482 + return;
1483 +
1484 + if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1485 + {
1486 + /* If the two ranges have a non-empty intersection, we can
1487 + refine the resulting range. Since the assert expression
1488 + creates an equivalency and at the same time it asserts a
1489 + predicate, we can take the intersection of the two ranges to
1490 + get better precision. */
1491 + if (value_ranges_intersect_p (var_vr, vr_p))
1492 + {
1493 + /* Use the larger of the two minimums. */
1494 + if (compare_values (vr_p->min, var_vr->min) == -1)
1495 + min = var_vr->min;
1496 + else
1497 + min = vr_p->min;
1498 +
1499 + /* Use the smaller of the two maximums. */
1500 + if (compare_values (vr_p->max, var_vr->max) == 1)
1501 + max = var_vr->max;
1502 + else
1503 + max = vr_p->max;
1504 +
1505 + set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1506 + }
1507 + else
1508 + {
1509 + /* The two ranges do not intersect, set the new range to
1510 + VARYING, because we will not be able to do anything
1511 + meaningful with it. */
1512 + set_value_range_to_varying (vr_p);
1513 + }
1514 + }
1515 + else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1516 + || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1517 + {
1518 + /* A range and an anti-range will cancel each other only if
1519 + their ends are the same. For instance, in the example above,
1520 + p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1521 + so VR_P should be set to VR_VARYING. */
1522 + if (compare_values (var_vr->min, vr_p->min) == 0
1523 + && compare_values (var_vr->max, vr_p->max) == 0)
1524 + set_value_range_to_varying (vr_p);
1525 + else
1526 + {
1527 + tree min, max, anti_min, anti_max, real_min, real_max;
1528 + int cmp;
1529 +
1530 + /* We want to compute the logical AND of the two ranges;
1531 + there are three cases to consider.
1532 +
1533 +
1534 + 1. The VR_ANTI_RANGE range is completely within the
1535 + VR_RANGE and the endpoints of the ranges are
1536 + different. In that case the resulting range
1537 + should be whichever range is more precise.
1538 + Typically that will be the VR_RANGE.
1539 +
1540 + 2. The VR_ANTI_RANGE is completely disjoint from
1541 + the VR_RANGE. In this case the resulting range
1542 + should be the VR_RANGE.
1543 +
1544 + 3. There is some overlap between the VR_ANTI_RANGE
1545 + and the VR_RANGE.
1546 +
1547 + 3a. If the high limit of the VR_ANTI_RANGE resides
1548 + within the VR_RANGE, then the result is a new
1549 + VR_RANGE starting at the high limit of the
1550 + the VR_ANTI_RANGE + 1 and extending to the
1551 + high limit of the original VR_RANGE.
1552 +
1553 + 3b. If the low limit of the VR_ANTI_RANGE resides
1554 + within the VR_RANGE, then the result is a new
1555 + VR_RANGE starting at the low limit of the original
1556 + VR_RANGE and extending to the low limit of the
1557 + VR_ANTI_RANGE - 1. */
1558 + if (vr_p->type == VR_ANTI_RANGE)
1559 + {
1560 + anti_min = vr_p->min;
1561 + anti_max = vr_p->max;
1562 + real_min = var_vr->min;
1563 + real_max = var_vr->max;
1564 + }
1565 + else
1566 + {
1567 + anti_min = var_vr->min;
1568 + anti_max = var_vr->max;
1569 + real_min = vr_p->min;
1570 + real_max = vr_p->max;
1571 + }
1572 +
1573 +
1574 + /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1575 + not including any endpoints. */
1576 + if (compare_values (anti_max, real_max) == -1
1577 + && compare_values (anti_min, real_min) == 1)
1578 + {
1579 + set_value_range (vr_p, VR_RANGE, real_min,
1580 + real_max, vr_p->equiv);
1581 + }
1582 + /* Case 2, VR_ANTI_RANGE completely disjoint from
1583 + VR_RANGE. */
1584 + else if (compare_values (anti_min, real_max) == 1
1585 + || compare_values (anti_max, real_min) == -1)
1586 + {
1587 + set_value_range (vr_p, VR_RANGE, real_min,
1588 + real_max, vr_p->equiv);
1589 + }
1590 + /* Case 3a, the anti-range extends into the low
1591 + part of the real range. Thus creating a new
1592 + low for the real range. */
1593 + else if (((cmp = compare_values (anti_max, real_min)) == 1
1594 + || cmp == 0)
1595 + && compare_values (anti_max, real_max) == -1)
1596 + {
1597 + gcc_assert (!is_positive_overflow_infinity (anti_max));
1598 + if (needs_overflow_infinity (TREE_TYPE (anti_max))
1599 + && vrp_val_is_max (anti_max))
1600 + {
1601 + if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1602 + {
1603 + set_value_range_to_varying (vr_p);
1604 + return;
1605 + }
1606 + min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1607 + }
1608 + else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1609 + min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1610 + anti_max,
1611 + build_int_cst (TREE_TYPE (var_vr->min), 1));
1612 + else
1613 + min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1614 + anti_max, size_int (1));
1615 + max = real_max;
1616 + set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1617 + }
1618 + /* Case 3b, the anti-range extends into the high
1619 + part of the real range. Thus creating a new
1620 + higher for the real range. */
1621 + else if (compare_values (anti_min, real_min) == 1
1622 + && ((cmp = compare_values (anti_min, real_max)) == -1
1623 + || cmp == 0))
1624 + {
1625 + gcc_assert (!is_negative_overflow_infinity (anti_min));
1626 + if (needs_overflow_infinity (TREE_TYPE (anti_min))
1627 + && vrp_val_is_min (anti_min))
1628 + {
1629 + if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1630 + {
1631 + set_value_range_to_varying (vr_p);
1632 + return;
1633 + }
1634 + max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1635 + }
1636 + else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1637 + max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1638 + anti_min,
1639 + build_int_cst (TREE_TYPE (var_vr->min), 1));
1640 + else
1641 + max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1642 + anti_min,
1643 + size_int (-1));
1644 + min = real_min;
1645 + set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1646 + }
1647 + }
1648 + }
1649 +}
1650 +
1651 +
1652 +/* Extract range information from SSA name VAR and store it in VR. If
1653 + VAR has an interesting range, use it. Otherwise, create the
1654 + range [VAR, VAR] and return it. This is useful in situations where
1655 + we may have conditionals testing values of VARYING names. For
1656 + instance,
1657 +
1658 + x_3 = y_5;
1659 + if (x_3 > y_5)
1660 + ...
1661 +
1662 + Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1663 + always false. */
1664 +
1665 +static void
1666 +extract_range_from_ssa_name (value_range_t *vr, tree var)
1667 +{
1668 + value_range_t *var_vr = get_value_range (var);
1669 +
1670 + if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1671 + copy_value_range (vr, var_vr);
1672 + else
1673 + set_value_range (vr, VR_RANGE, var, var, NULL);
1674 +
1675 + add_equivalence (&vr->equiv, var);
1676 +}
1677 +
1678 +
1679 +/* Wrapper around int_const_binop. If the operation overflows and we
1680 + are not using wrapping arithmetic, then adjust the result to be
1681 + -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1682 + NULL_TREE if we need to use an overflow infinity representation but
1683 + the type does not support it. */
1684 +
1685 +static tree
1686 +vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1687 +{
1688 + tree res;
1689 +
1690 + res = int_const_binop (code, val1, val2, 0);
1691 +
1692 + /* If we are not using wrapping arithmetic, operate symbolically
1693 + on -INF and +INF. */
1694 + if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1695 + {
1696 + int checkz = compare_values (res, val1);
1697 + bool overflow = false;
1698 +
1699 + /* Ensure that res = val1 [+*] val2 >= val1
1700 + or that res = val1 - val2 <= val1. */
1701 + if ((code == PLUS_EXPR
1702 + && !(checkz == 1 || checkz == 0))
1703 + || (code == MINUS_EXPR
1704 + && !(checkz == 0 || checkz == -1)))
1705 + {
1706 + overflow = true;
1707 + }
1708 + /* Checking for multiplication overflow is done by dividing the
1709 + output of the multiplication by the first input of the
1710 + multiplication. If the result of that division operation is
1711 + not equal to the second input of the multiplication, then the
1712 + multiplication overflowed. */
1713 + else if (code == MULT_EXPR && !integer_zerop (val1))
1714 + {
1715 + tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1716 + res,
1717 + val1, 0);
1718 + int check = compare_values (tmp, val2);
1719 +
1720 + if (check != 0)
1721 + overflow = true;
1722 + }
1723 +
1724 + if (overflow)
1725 + {
1726 + res = copy_node (res);
1727 + TREE_OVERFLOW (res) = 1;
1728 + }
1729 +
1730 + }
1731 + else if ((TREE_OVERFLOW (res)
1732 + && !TREE_OVERFLOW (val1)
1733 + && !TREE_OVERFLOW (val2))
1734 + || is_overflow_infinity (val1)
1735 + || is_overflow_infinity (val2))
1736 + {
1737 + /* If the operation overflowed but neither VAL1 nor VAL2 are
1738 + overflown, return -INF or +INF depending on the operation
1739 + and the combination of signs of the operands. */
1740 + int sgn1 = tree_int_cst_sgn (val1);
1741 + int sgn2 = tree_int_cst_sgn (val2);
1742 +
1743 + if (needs_overflow_infinity (TREE_TYPE (res))
1744 + && !supports_overflow_infinity (TREE_TYPE (res)))
1745 + return NULL_TREE;
1746 +
1747 + /* We have to punt on adding infinities of different signs,
1748 + since we can't tell what the sign of the result should be.
1749 + Likewise for subtracting infinities of the same sign. */
1750 + if (((code == PLUS_EXPR && sgn1 != sgn2)
1751 + || (code == MINUS_EXPR && sgn1 == sgn2))
1752 + && is_overflow_infinity (val1)
1753 + && is_overflow_infinity (val2))
1754 + return NULL_TREE;
1755 +
1756 + /* Don't try to handle division or shifting of infinities. */
1757 + if ((code == TRUNC_DIV_EXPR
1758 + || code == FLOOR_DIV_EXPR
1759 + || code == CEIL_DIV_EXPR
1760 + || code == EXACT_DIV_EXPR
1761 + || code == ROUND_DIV_EXPR
1762 + || code == RSHIFT_EXPR)
1763 + && (is_overflow_infinity (val1)
1764 + || is_overflow_infinity (val2)))
1765 + return NULL_TREE;
1766 +
1767 + /* Notice that we only need to handle the restricted set of
1768 + operations handled by extract_range_from_binary_expr.
1769 + Among them, only multiplication, addition and subtraction
1770 + can yield overflow without overflown operands because we
1771 + are working with integral types only... except in the
1772 + case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1773 + for division too. */
1774 +
1775 + /* For multiplication, the sign of the overflow is given
1776 + by the comparison of the signs of the operands. */
1777 + if ((code == MULT_EXPR && sgn1 == sgn2)
1778 + /* For addition, the operands must be of the same sign
1779 + to yield an overflow. Its sign is therefore that
1780 + of one of the operands, for example the first. For
1781 + infinite operands X + -INF is negative, not positive. */
1782 + || (code == PLUS_EXPR
1783 + && (sgn1 >= 0
1784 + ? !is_negative_overflow_infinity (val2)
1785 + : is_positive_overflow_infinity (val2)))
1786 + /* For subtraction, non-infinite operands must be of
1787 + different signs to yield an overflow. Its sign is
1788 + therefore that of the first operand or the opposite of
1789 + that of the second operand. A first operand of 0 counts
1790 + as positive here, for the corner case 0 - (-INF), which
1791 + overflows, but must yield +INF. For infinite operands 0
1792 + - INF is negative, not positive. */
1793 + || (code == MINUS_EXPR
1794 + && (sgn1 >= 0
1795 + ? !is_positive_overflow_infinity (val2)
1796 + : is_negative_overflow_infinity (val2)))
1797 + /* We only get in here with positive shift count, so the
1798 + overflow direction is the same as the sign of val1.
1799 + Actually rshift does not overflow at all, but we only
1800 + handle the case of shifting overflowed -INF and +INF. */
1801 + || (code == RSHIFT_EXPR
1802 + && sgn1 >= 0)
1803 + /* For division, the only case is -INF / -1 = +INF. */
1804 + || code == TRUNC_DIV_EXPR
1805 + || code == FLOOR_DIV_EXPR
1806 + || code == CEIL_DIV_EXPR
1807 + || code == EXACT_DIV_EXPR
1808 + || code == ROUND_DIV_EXPR)
1809 + return (needs_overflow_infinity (TREE_TYPE (res))
1810 + ? positive_overflow_infinity (TREE_TYPE (res))
1811 + : TYPE_MAX_VALUE (TREE_TYPE (res)));
1812 + else
1813 + return (needs_overflow_infinity (TREE_TYPE (res))
1814 + ? negative_overflow_infinity (TREE_TYPE (res))
1815 + : TYPE_MIN_VALUE (TREE_TYPE (res)));
1816 + }
1817 +
1818 + return res;
1819 +}
1820 +
1821 +
1822 +/* Extract range information from a binary expression EXPR based on
1823 + the ranges of each of its operands and the expression code. */
1824 +
1825 +static void
1826 +extract_range_from_binary_expr (value_range_t *vr, tree expr)
1827 +{
1828 + enum tree_code code = TREE_CODE (expr);
1829 + enum value_range_type type;
1830 + tree op0, op1, min, max;
1831 + int cmp;
1832 + value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
1833 + value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
1834 +
1835 + /* Not all binary expressions can be applied to ranges in a
1836 + meaningful way. Handle only arithmetic operations. */
1837 + if (code != PLUS_EXPR
1838 + && code != MINUS_EXPR
1839 + && code != POINTER_PLUS_EXPR
1840 + && code != MULT_EXPR
1841 + && code != TRUNC_DIV_EXPR
1842 + && code != FLOOR_DIV_EXPR
1843 + && code != CEIL_DIV_EXPR
1844 + && code != EXACT_DIV_EXPR
1845 + && code != ROUND_DIV_EXPR
1846 + && code != RSHIFT_EXPR
1847 + && code != MIN_EXPR
1848 + && code != MAX_EXPR
1849 + && code != BIT_AND_EXPR
1850 + && code != TRUTH_ANDIF_EXPR
1851 + && code != TRUTH_ORIF_EXPR
1852 + && code != TRUTH_AND_EXPR
1853 + && code != TRUTH_OR_EXPR)
1854 + {
1855 + set_value_range_to_varying (vr);
1856 + return;
1857 + }
1858 +
1859 + /* Get value ranges for each operand. For constant operands, create
1860 + a new value range with the operand to simplify processing. */
1861 + op0 = TREE_OPERAND (expr, 0);
1862 + if (TREE_CODE (op0) == SSA_NAME)
1863 + vr0 = *(get_value_range (op0));
1864 + else if (is_gimple_min_invariant (op0))
1865 + set_value_range_to_value (&vr0, op0, NULL);
1866 + else
1867 + set_value_range_to_varying (&vr0);
1868 +
1869 + op1 = TREE_OPERAND (expr, 1);
1870 + if (TREE_CODE (op1) == SSA_NAME)
1871 + vr1 = *(get_value_range (op1));
1872 + else if (is_gimple_min_invariant (op1))
1873 + set_value_range_to_value (&vr1, op1, NULL);
1874 + else
1875 + set_value_range_to_varying (&vr1);
1876 +
1877 + /* If either range is UNDEFINED, so is the result. */
1878 + if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
1879 + {
1880 + set_value_range_to_undefined (vr);
1881 + return;
1882 + }
1883 +
1884 + /* The type of the resulting value range defaults to VR0.TYPE. */
1885 + type = vr0.type;
1886 +
1887 + /* Refuse to operate on VARYING ranges, ranges of different kinds
1888 + and symbolic ranges. As an exception, we allow BIT_AND_EXPR
1889 + because we may be able to derive a useful range even if one of
1890 + the operands is VR_VARYING or symbolic range. TODO, we may be
1891 + able to derive anti-ranges in some cases. */
1892 + if (code != BIT_AND_EXPR
1893 + && code != TRUTH_AND_EXPR
1894 + && code != TRUTH_OR_EXPR
1895 + && (vr0.type == VR_VARYING
1896 + || vr1.type == VR_VARYING
1897 + || vr0.type != vr1.type
1898 + || symbolic_range_p (&vr0)
1899 + || symbolic_range_p (&vr1)))
1900 + {
1901 + set_value_range_to_varying (vr);
1902 + return;
1903 + }
1904 +
1905 + /* Now evaluate the expression to determine the new range. */
1906 + if (POINTER_TYPE_P (TREE_TYPE (expr))
1907 + || POINTER_TYPE_P (TREE_TYPE (op0))
1908 + || POINTER_TYPE_P (TREE_TYPE (op1)))
1909 + {
1910 + if (code == MIN_EXPR || code == MAX_EXPR)
1911 + {
1912 + /* For MIN/MAX expressions with pointers, we only care about
1913 + nullness, if both are non null, then the result is nonnull.
1914 + If both are null, then the result is null. Otherwise they
1915 + are varying. */
1916 + if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1917 + set_value_range_to_nonnull (vr, TREE_TYPE (expr));
1918 + else if (range_is_null (&vr0) && range_is_null (&vr1))
1919 + set_value_range_to_null (vr, TREE_TYPE (expr));
1920 + else
1921 + set_value_range_to_varying (vr);
1922 +
1923 + return;
1924 + }
1925 + gcc_assert (code == POINTER_PLUS_EXPR);
1926 + /* For pointer types, we are really only interested in asserting
1927 + whether the expression evaluates to non-NULL. */
1928 + if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
1929 + set_value_range_to_nonnull (vr, TREE_TYPE (expr));
1930 + else if (range_is_null (&vr0) && range_is_null (&vr1))
1931 + set_value_range_to_null (vr, TREE_TYPE (expr));
1932 + else
1933 + set_value_range_to_varying (vr);
1934 +
1935 + return;
1936 + }
1937 +
1938 + /* For integer ranges, apply the operation to each end of the
1939 + range and see what we end up with. */
1940 + if (code == TRUTH_ANDIF_EXPR
1941 + || code == TRUTH_ORIF_EXPR
1942 + || code == TRUTH_AND_EXPR
1943 + || code == TRUTH_OR_EXPR)
1944 + {
1945 + /* If one of the operands is zero, we know that the whole
1946 + expression evaluates zero. */
1947 + if (code == TRUTH_AND_EXPR
1948 + && ((vr0.type == VR_RANGE
1949 + && integer_zerop (vr0.min)
1950 + && integer_zerop (vr0.max))
1951 + || (vr1.type == VR_RANGE
1952 + && integer_zerop (vr1.min)
1953 + && integer_zerop (vr1.max))))
1954 + {
1955 + type = VR_RANGE;
1956 + min = max = build_int_cst (TREE_TYPE (expr), 0);
1957 + }
1958 + /* If one of the operands is one, we know that the whole
1959 + expression evaluates one. */
1960 + else if (code == TRUTH_OR_EXPR
1961 + && ((vr0.type == VR_RANGE
1962 + && integer_onep (vr0.min)
1963 + && integer_onep (vr0.max))
1964 + || (vr1.type == VR_RANGE
1965 + && integer_onep (vr1.min)
1966 + && integer_onep (vr1.max))))
1967 + {
1968 + type = VR_RANGE;
1969 + min = max = build_int_cst (TREE_TYPE (expr), 1);
1970 + }
1971 + else if (vr0.type != VR_VARYING
1972 + && vr1.type != VR_VARYING
1973 + && vr0.type == vr1.type
1974 + && !symbolic_range_p (&vr0)
1975 + && !overflow_infinity_range_p (&vr0)
1976 + && !symbolic_range_p (&vr1)
1977 + && !overflow_infinity_range_p (&vr1))
1978 + {
1979 + /* Boolean expressions cannot be folded with int_const_binop. */
1980 + min = fold_binary (code, TREE_TYPE (expr), vr0.min, vr1.min);
1981 + max = fold_binary (code, TREE_TYPE (expr), vr0.max, vr1.max);
1982 + }
1983 + else
1984 + {
1985 + /* The result of a TRUTH_*_EXPR is always true or false. */
1986 + set_value_range_to_truthvalue (vr, TREE_TYPE (expr));
1987 + return;
1988 + }
1989 + }
1990 + else if (code == PLUS_EXPR
1991 + || code == MIN_EXPR
1992 + || code == MAX_EXPR)
1993 + {
1994 + /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
1995 + VR_VARYING. It would take more effort to compute a precise
1996 + range for such a case. For example, if we have op0 == 1 and
1997 + op1 == -1 with their ranges both being ~[0,0], we would have
1998 + op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
1999 + Note that we are guaranteed to have vr0.type == vr1.type at
2000 + this point. */
2001 + if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2002 + {
2003 + set_value_range_to_varying (vr);
2004 + return;
2005 + }
2006 +
2007 + /* For operations that make the resulting range directly
2008 + proportional to the original ranges, apply the operation to
2009 + the same end of each range. */
2010 + min = vrp_int_const_binop (code, vr0.min, vr1.min);
2011 + max = vrp_int_const_binop (code, vr0.max, vr1.max);
2012 + }
2013 + else if (code == MULT_EXPR
2014 + || code == TRUNC_DIV_EXPR
2015 + || code == FLOOR_DIV_EXPR
2016 + || code == CEIL_DIV_EXPR
2017 + || code == EXACT_DIV_EXPR
2018 + || code == ROUND_DIV_EXPR
2019 + || code == RSHIFT_EXPR)
2020 + {
2021 + tree val[4];
2022 + size_t i;
2023 + bool sop;
2024 +
2025 + /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2026 + drop to VR_VARYING. It would take more effort to compute a
2027 + precise range for such a case. For example, if we have
2028 + op0 == 65536 and op1 == 65536 with their ranges both being
2029 + ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2030 + we cannot claim that the product is in ~[0,0]. Note that we
2031 + are guaranteed to have vr0.type == vr1.type at this
2032 + point. */
2033 + if (code == MULT_EXPR
2034 + && vr0.type == VR_ANTI_RANGE
2035 + && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2036 + {
2037 + set_value_range_to_varying (vr);
2038 + return;
2039 + }
2040 +
2041 + /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2042 + then drop to VR_VARYING. Outside of this range we get undefined
2043 + behavior from the shift operation. We cannot even trust
2044 + SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2045 + shifts, and the operation at the tree level may be widened. */
2046 + if (code == RSHIFT_EXPR)
2047 + {
2048 + if (vr1.type == VR_ANTI_RANGE
2049 + || !vrp_expr_computes_nonnegative (op1, &sop)
2050 + || (operand_less_p
2051 + (build_int_cst (TREE_TYPE (vr1.max),
2052 + TYPE_PRECISION (TREE_TYPE (expr)) - 1),
2053 + vr1.max) != 0))
2054 + {
2055 + set_value_range_to_varying (vr);
2056 + return;
2057 + }
2058 + }
2059 +
2060 + /* Multiplications and divisions are a bit tricky to handle,
2061 + depending on the mix of signs we have in the two ranges, we
2062 + need to operate on different values to get the minimum and
2063 + maximum values for the new range. One approach is to figure
2064 + out all the variations of range combinations and do the
2065 + operations.
2066 +
2067 + However, this involves several calls to compare_values and it
2068 + is pretty convoluted. It's simpler to do the 4 operations
2069 + (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2070 + MAX1) and then figure the smallest and largest values to form
2071 + the new range. */
2072 +
2073 + /* Divisions by zero result in a VARYING value. */
2074 + else if (code != MULT_EXPR
2075 + && (vr0.type == VR_ANTI_RANGE || range_includes_zero_p (&vr1)))
2076 + {
2077 + set_value_range_to_varying (vr);
2078 + return;
2079 + }
2080 +
2081 + /* Compute the 4 cross operations. */
2082 + sop = false;
2083 + val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2084 + if (val[0] == NULL_TREE)
2085 + sop = true;
2086 +
2087 + if (vr1.max == vr1.min)
2088 + val[1] = NULL_TREE;
2089 + else
2090 + {
2091 + val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2092 + if (val[1] == NULL_TREE)
2093 + sop = true;
2094 + }
2095 +
2096 + if (vr0.max == vr0.min)
2097 + val[2] = NULL_TREE;
2098 + else
2099 + {
2100 + val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2101 + if (val[2] == NULL_TREE)
2102 + sop = true;
2103 + }
2104 +
2105 + if (vr0.min == vr0.max || vr1.min == vr1.max)
2106 + val[3] = NULL_TREE;
2107 + else
2108 + {
2109 + val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2110 + if (val[3] == NULL_TREE)
2111 + sop = true;
2112 + }
2113 +
2114 + if (sop)
2115 + {
2116 + set_value_range_to_varying (vr);
2117 + return;
2118 + }
2119 +
2120 + /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2121 + of VAL[i]. */
2122 + min = val[0];
2123 + max = val[0];
2124 + for (i = 1; i < 4; i++)
2125 + {
2126 + if (!is_gimple_min_invariant (min)
2127 + || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2128 + || !is_gimple_min_invariant (max)
2129 + || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2130 + break;
2131 +
2132 + if (val[i])
2133 + {
2134 + if (!is_gimple_min_invariant (val[i])
2135 + || (TREE_OVERFLOW (val[i])
2136 + && !is_overflow_infinity (val[i])))
2137 + {
2138 + /* If we found an overflowed value, set MIN and MAX
2139 + to it so that we set the resulting range to
2140 + VARYING. */
2141 + min = max = val[i];
2142 + break;
2143 + }
2144 +
2145 + if (compare_values (val[i], min) == -1)
2146 + min = val[i];
2147 +
2148 + if (compare_values (val[i], max) == 1)
2149 + max = val[i];
2150 + }
2151 + }
2152 + }
2153 + else if (code == MINUS_EXPR)
2154 + {
2155 + /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2156 + VR_VARYING. It would take more effort to compute a precise
2157 + range for such a case. For example, if we have op0 == 1 and
2158 + op1 == 1 with their ranges both being ~[0,0], we would have
2159 + op0 - op1 == 0, so we cannot claim that the difference is in
2160 + ~[0,0]. Note that we are guaranteed to have
2161 + vr0.type == vr1.type at this point. */
2162 + if (vr0.type == VR_ANTI_RANGE)
2163 + {
2164 + set_value_range_to_varying (vr);
2165 + return;
2166 + }
2167 +
2168 + /* For MINUS_EXPR, apply the operation to the opposite ends of
2169 + each range. */
2170 + min = vrp_int_const_binop (code, vr0.min, vr1.max);
2171 + max = vrp_int_const_binop (code, vr0.max, vr1.min);
2172 + }
2173 + else if (code == BIT_AND_EXPR)
2174 + {
2175 + if (vr0.type == VR_RANGE
2176 + && vr0.min == vr0.max
2177 + && TREE_CODE (vr0.max) == INTEGER_CST
2178 + && !TREE_OVERFLOW (vr0.max)
2179 + && tree_int_cst_sgn (vr0.max) >= 0)
2180 + {
2181 + min = build_int_cst (TREE_TYPE (expr), 0);
2182 + max = vr0.max;
2183 + }
2184 + else if (vr1.type == VR_RANGE
2185 + && vr1.min == vr1.max
2186 + && TREE_CODE (vr1.max) == INTEGER_CST
2187 + && !TREE_OVERFLOW (vr1.max)
2188 + && tree_int_cst_sgn (vr1.max) >= 0)
2189 + {
2190 + type = VR_RANGE;
2191 + min = build_int_cst (TREE_TYPE (expr), 0);
2192 + max = vr1.max;
2193 + }
2194 + else
2195 + {
2196 + set_value_range_to_varying (vr);
2197 + return;
2198 + }
2199 + }
2200 + else
2201 + gcc_unreachable ();
2202 +
2203 + /* If either MIN or MAX overflowed, then set the resulting range to
2204 + VARYING. But we do accept an overflow infinity
2205 + representation. */
2206 + if (min == NULL_TREE
2207 + || !is_gimple_min_invariant (min)
2208 + || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2209 + || max == NULL_TREE
2210 + || !is_gimple_min_invariant (max)
2211 + || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2212 + {
2213 + set_value_range_to_varying (vr);
2214 + return;
2215 + }
2216 +
2217 + /* We punt if:
2218 + 1) [-INF, +INF]
2219 + 2) [-INF, +-INF(OVF)]
2220 + 3) [+-INF(OVF), +INF]
2221 + 4) [+-INF(OVF), +-INF(OVF)]
2222 + We learn nothing when we have INF and INF(OVF) on both sides.
2223 + Note that we do accept [-INF, -INF] and [+INF, +INF] without
2224 + overflow. */
2225 + if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2226 + && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2227 + {
2228 + set_value_range_to_varying (vr);
2229 + return;
2230 + }
2231 +
2232 + cmp = compare_values (min, max);
2233 + if (cmp == -2 || cmp == 1)
2234 + {
2235 + /* If the new range has its limits swapped around (MIN > MAX),
2236 + then the operation caused one of them to wrap around, mark
2237 + the new range VARYING. */
2238 + set_value_range_to_varying (vr);
2239 + }
2240 + else
2241 + set_value_range (vr, type, min, max, NULL);
2242 +}
2243 +
2244 +
2245 +/* Extract range information from a unary expression EXPR based on
2246 + the range of its operand and the expression code. */
2247 +
2248 +static void
2249 +extract_range_from_unary_expr (value_range_t *vr, tree expr)
2250 +{
2251 + enum tree_code code = TREE_CODE (expr);
2252 + tree min, max, op0;
2253 + int cmp;
2254 + value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2255 +
2256 + /* Refuse to operate on certain unary expressions for which we
2257 + cannot easily determine a resulting range. */
2258 + if (code == FIX_TRUNC_EXPR
2259 + || code == FLOAT_EXPR
2260 + || code == BIT_NOT_EXPR
2261 + || code == NON_LVALUE_EXPR
2262 + || code == CONJ_EXPR)
2263 + {
2264 + set_value_range_to_varying (vr);
2265 + return;
2266 + }
2267 +
2268 + /* Get value ranges for the operand. For constant operands, create
2269 + a new value range with the operand to simplify processing. */
2270 + op0 = TREE_OPERAND (expr, 0);
2271 + if (TREE_CODE (op0) == SSA_NAME)
2272 + vr0 = *(get_value_range (op0));
2273 + else if (is_gimple_min_invariant (op0))
2274 + set_value_range_to_value (&vr0, op0, NULL);
2275 + else
2276 + set_value_range_to_varying (&vr0);
2277 +
2278 + /* If VR0 is UNDEFINED, so is the result. */
2279 + if (vr0.type == VR_UNDEFINED)
2280 + {
2281 + set_value_range_to_undefined (vr);
2282 + return;
2283 + }
2284 +
2285 + /* Refuse to operate on symbolic ranges, or if neither operand is
2286 + a pointer or integral type. */
2287 + if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2288 + && !POINTER_TYPE_P (TREE_TYPE (op0)))
2289 + || (vr0.type != VR_VARYING
2290 + && symbolic_range_p (&vr0)))
2291 + {
2292 + set_value_range_to_varying (vr);
2293 + return;
2294 + }
2295 +
2296 + /* If the expression involves pointers, we are only interested in
2297 + determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2298 + if (POINTER_TYPE_P (TREE_TYPE (expr)) || POINTER_TYPE_P (TREE_TYPE (op0)))
2299 + {
2300 + bool sop;
2301 +
2302 + sop = false;
2303 + if (range_is_nonnull (&vr0)
2304 + || (tree_expr_nonzero_warnv_p (expr, &sop)
2305 + && !sop))
2306 + set_value_range_to_nonnull (vr, TREE_TYPE (expr));
2307 + else if (range_is_null (&vr0))
2308 + set_value_range_to_null (vr, TREE_TYPE (expr));
2309 + else
2310 + set_value_range_to_varying (vr);
2311 +
2312 + return;
2313 + }
2314 +
2315 + /* Handle unary expressions on integer ranges. */
2316 + if (code == NOP_EXPR || code == CONVERT_EXPR)
2317 + {
2318 + tree inner_type = TREE_TYPE (op0);
2319 + tree outer_type = TREE_TYPE (expr);
2320 +
2321 + /* If VR0 represents a simple range, then try to convert
2322 + the min and max values for the range to the same type
2323 + as OUTER_TYPE. If the results compare equal to VR0's
2324 + min and max values and the new min is still less than
2325 + or equal to the new max, then we can safely use the newly
2326 + computed range for EXPR. This allows us to compute
2327 + accurate ranges through many casts. */
2328 + if ((vr0.type == VR_RANGE
2329 + && !overflow_infinity_range_p (&vr0))
2330 + || (vr0.type == VR_VARYING
2331 + && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)))
2332 + {
2333 + tree new_min, new_max, orig_min, orig_max;
2334 +
2335 + /* Convert the input operand min/max to OUTER_TYPE. If
2336 + the input has no range information, then use the min/max
2337 + for the input's type. */
2338 + if (vr0.type == VR_RANGE)
2339 + {
2340 + orig_min = vr0.min;
2341 + orig_max = vr0.max;
2342 + }
2343 + else
2344 + {
2345 + orig_min = TYPE_MIN_VALUE (inner_type);
2346 + orig_max = TYPE_MAX_VALUE (inner_type);
2347 + }
2348 +
2349 + new_min = fold_convert (outer_type, orig_min);
2350 + new_max = fold_convert (outer_type, orig_max);
2351 +
2352 + /* Verify the new min/max values are gimple values and
2353 + that they compare equal to the original input's
2354 + min/max values. */
2355 + if (is_gimple_val (new_min)
2356 + && is_gimple_val (new_max)
2357 + && tree_int_cst_equal (new_min, orig_min)
2358 + && tree_int_cst_equal (new_max, orig_max)
2359 + && (!is_overflow_infinity (new_min)
2360 + || !is_overflow_infinity (new_max))
2361 + && (cmp = compare_values (new_min, new_max)) <= 0
2362 + && cmp >= -1)
2363 + {
2364 + set_value_range (vr, VR_RANGE, new_min, new_max, vr->equiv);
2365 + return;
2366 + }
2367 + }
2368 +
2369 + /* When converting types of different sizes, set the result to
2370 + VARYING. Things like sign extensions and precision loss may
2371 + change the range. For instance, if x_3 is of type 'long long
2372 + int' and 'y_5 = (unsigned short) x_3', if x_3 is ~[0, 0], it
2373 + is impossible to know at compile time whether y_5 will be
2374 + ~[0, 0]. */
2375 + if (TYPE_SIZE (inner_type) != TYPE_SIZE (outer_type)
2376 + || TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type))
2377 + {
2378 + set_value_range_to_varying (vr);
2379 + return;
2380 + }
2381 + }
2382 +
2383 + /* Conversion of a VR_VARYING value to a wider type can result
2384 + in a usable range. So wait until after we've handled conversions
2385 + before dropping the result to VR_VARYING if we had a source
2386 + operand that is VR_VARYING. */
2387 + if (vr0.type == VR_VARYING)
2388 + {
2389 + set_value_range_to_varying (vr);
2390 + return;
2391 + }
2392 +
2393 + /* Apply the operation to each end of the range and see what we end
2394 + up with. */
2395 + if (code == NEGATE_EXPR
2396 + && !TYPE_UNSIGNED (TREE_TYPE (expr)))
2397 + {
2398 + /* NEGATE_EXPR flips the range around. We need to treat
2399 + TYPE_MIN_VALUE specially. */
2400 + if (is_positive_overflow_infinity (vr0.max))
2401 + min = negative_overflow_infinity (TREE_TYPE (expr));
2402 + else if (is_negative_overflow_infinity (vr0.max))
2403 + min = positive_overflow_infinity (TREE_TYPE (expr));
2404 + else if (!vrp_val_is_min (vr0.max))
2405 + min = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.max);
2406 + else if (needs_overflow_infinity (TREE_TYPE (expr)))
2407 + {
2408 + if (supports_overflow_infinity (TREE_TYPE (expr))
2409 + && !is_overflow_infinity (vr0.min)
2410 + && !vrp_val_is_min (vr0.min))
2411 + min = positive_overflow_infinity (TREE_TYPE (expr));
2412 + else
2413 + {
2414 + set_value_range_to_varying (vr);
2415 + return;
2416 + }
2417 + }
2418 + else
2419 + min = TYPE_MIN_VALUE (TREE_TYPE (expr));
2420 +
2421 + if (is_positive_overflow_infinity (vr0.min))
2422 + max = negative_overflow_infinity (TREE_TYPE (expr));
2423 + else if (is_negative_overflow_infinity (vr0.min))
2424 + max = positive_overflow_infinity (TREE_TYPE (expr));
2425 + else if (!vrp_val_is_min (vr0.min))
2426 + max = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.min);
2427 + else if (needs_overflow_infinity (TREE_TYPE (expr)))
2428 + {
2429 + if (supports_overflow_infinity (TREE_TYPE (expr)))
2430 + max = positive_overflow_infinity (TREE_TYPE (expr));
2431 + else
2432 + {
2433 + set_value_range_to_varying (vr);
2434 + return;
2435 + }
2436 + }
2437 + else
2438 + max = TYPE_MIN_VALUE (TREE_TYPE (expr));
2439 + }
2440 + else if (code == NEGATE_EXPR
2441 + && TYPE_UNSIGNED (TREE_TYPE (expr)))
2442 + {
2443 + if (!range_includes_zero_p (&vr0))
2444 + {
2445 + max = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.min);
2446 + min = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.max);
2447 + }
2448 + else
2449 + {
2450 + if (range_is_null (&vr0))
2451 + set_value_range_to_null (vr, TREE_TYPE (expr));
2452 + else
2453 + set_value_range_to_varying (vr);
2454 + return;
2455 + }
2456 + }
2457 + else if (code == ABS_EXPR
2458 + && !TYPE_UNSIGNED (TREE_TYPE (expr)))
2459 + {
2460 + /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2461 + useful range. */
2462 + if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (expr))
2463 + && ((vr0.type == VR_RANGE
2464 + && vrp_val_is_min (vr0.min))
2465 + || (vr0.type == VR_ANTI_RANGE
2466 + && !vrp_val_is_min (vr0.min)
2467 + && !range_includes_zero_p (&vr0))))
2468 + {
2469 + set_value_range_to_varying (vr);
2470 + return;
2471 + }
2472 +
2473 + /* ABS_EXPR may flip the range around, if the original range
2474 + included negative values. */
2475 + if (is_overflow_infinity (vr0.min))
2476 + min = positive_overflow_infinity (TREE_TYPE (expr));
2477 + else if (!vrp_val_is_min (vr0.min))
2478 + min = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.min);
2479 + else if (!needs_overflow_infinity (TREE_TYPE (expr)))
2480 + min = TYPE_MAX_VALUE (TREE_TYPE (expr));
2481 + else if (supports_overflow_infinity (TREE_TYPE (expr)))
2482 + min = positive_overflow_infinity (TREE_TYPE (expr));
2483 + else
2484 + {
2485 + set_value_range_to_varying (vr);
2486 + return;
2487 + }
2488 +
2489 + if (is_overflow_infinity (vr0.max))
2490 + max = positive_overflow_infinity (TREE_TYPE (expr));
2491 + else if (!vrp_val_is_min (vr0.max))
2492 + max = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.max);
2493 + else if (!needs_overflow_infinity (TREE_TYPE (expr)))
2494 + max = TYPE_MAX_VALUE (TREE_TYPE (expr));
2495 + else if (supports_overflow_infinity (TREE_TYPE (expr)))
2496 + max = positive_overflow_infinity (TREE_TYPE (expr));
2497 + else
2498 + {
2499 + set_value_range_to_varying (vr);
2500 + return;
2501 + }
2502 +
2503 + cmp = compare_values (min, max);
2504 +
2505 + /* If a VR_ANTI_RANGEs contains zero, then we have
2506 + ~[-INF, min(MIN, MAX)]. */
2507 + if (vr0.type == VR_ANTI_RANGE)
2508 + {
2509 + if (range_includes_zero_p (&vr0))
2510 + {
2511 + /* Take the lower of the two values. */
2512 + if (cmp != 1)
2513 + max = min;
2514 +
2515 + /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2516 + or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2517 + flag_wrapv is set and the original anti-range doesn't include
2518 + TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2519 + if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (expr)))
2520 + {
2521 + tree type_min_value = TYPE_MIN_VALUE (TREE_TYPE (expr));
2522 +
2523 + min = (vr0.min != type_min_value
2524 + ? int_const_binop (PLUS_EXPR, type_min_value,
2525 + integer_one_node, 0)
2526 + : type_min_value);
2527 + }
2528 + else
2529 + {
2530 + if (overflow_infinity_range_p (&vr0))
2531 + min = negative_overflow_infinity (TREE_TYPE (expr));
2532 + else
2533 + min = TYPE_MIN_VALUE (TREE_TYPE (expr));
2534 + }
2535 + }
2536 + else
2537 + {
2538 + /* All else has failed, so create the range [0, INF], even for
2539 + flag_wrapv since TYPE_MIN_VALUE is in the original
2540 + anti-range. */
2541 + vr0.type = VR_RANGE;
2542 + min = build_int_cst (TREE_TYPE (expr), 0);
2543 + if (needs_overflow_infinity (TREE_TYPE (expr)))
2544 + {
2545 + if (supports_overflow_infinity (TREE_TYPE (expr)))
2546 + max = positive_overflow_infinity (TREE_TYPE (expr));
2547 + else
2548 + {
2549 + set_value_range_to_varying (vr);
2550 + return;
2551 + }
2552 + }
2553 + else
2554 + max = TYPE_MAX_VALUE (TREE_TYPE (expr));
2555 + }
2556 + }
2557 +
2558 + /* If the range contains zero then we know that the minimum value in the
2559 + range will be zero. */
2560 + else if (range_includes_zero_p (&vr0))
2561 + {
2562 + if (cmp == 1)
2563 + max = min;
2564 + min = build_int_cst (TREE_TYPE (expr), 0);
2565 + }
2566 + else
2567 + {
2568 + /* If the range was reversed, swap MIN and MAX. */
2569 + if (cmp == 1)
2570 + {
2571 + tree t = min;
2572 + min = max;
2573 + max = t;
2574 + }
2575 + }
2576 + }
2577 + else
2578 + {
2579 + /* Otherwise, operate on each end of the range. */
2580 + min = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.min);
2581 + max = fold_unary_to_constant (code, TREE_TYPE (expr), vr0.max);
2582 +
2583 + if (needs_overflow_infinity (TREE_TYPE (expr)))
2584 + {
2585 + gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
2586 +
2587 + /* If both sides have overflowed, we don't know
2588 + anything. */
2589 + if ((is_overflow_infinity (vr0.min)
2590 + || TREE_OVERFLOW (min))
2591 + && (is_overflow_infinity (vr0.max)
2592 + || TREE_OVERFLOW (max)))
2593 + {
2594 + set_value_range_to_varying (vr);
2595 + return;
2596 + }
2597 +
2598 + if (is_overflow_infinity (vr0.min))
2599 + min = vr0.min;
2600 + else if (TREE_OVERFLOW (min))
2601 + {
2602 + if (supports_overflow_infinity (TREE_TYPE (expr)))
2603 + min = (tree_int_cst_sgn (min) >= 0
2604 + ? positive_overflow_infinity (TREE_TYPE (min))
2605 + : negative_overflow_infinity (TREE_TYPE (min)));
2606 + else
2607 + {
2608 + set_value_range_to_varying (vr);
2609 + return;
2610 + }
2611 + }
2612 +
2613 + if (is_overflow_infinity (vr0.max))
2614 + max = vr0.max;
2615 + else if (TREE_OVERFLOW (max))
2616 + {
2617 + if (supports_overflow_infinity (TREE_TYPE (expr)))
2618 + max = (tree_int_cst_sgn (max) >= 0
2619 + ? positive_overflow_infinity (TREE_TYPE (max))
2620 + : negative_overflow_infinity (TREE_TYPE (max)));
2621 + else
2622 + {
2623 + set_value_range_to_varying (vr);
2624 + return;
2625 + }
2626 + }
2627 + }
2628 + }
2629 +
2630 + cmp = compare_values (min, max);
2631 + if (cmp == -2 || cmp == 1)
2632 + {
2633 + /* If the new range has its limits swapped around (MIN > MAX),
2634 + then the operation caused one of them to wrap around, mark
2635 + the new range VARYING. */
2636 + set_value_range_to_varying (vr);
2637 + }
2638 + else
2639 + set_value_range (vr, vr0.type, min, max, NULL);
2640 +}
2641 +
2642 +
2643 +/* Extract range information from a conditional expression EXPR based on
2644 + the ranges of each of its operands and the expression code. */
2645 +
2646 +static void
2647 +extract_range_from_cond_expr (value_range_t *vr, tree expr)
2648 +{
2649 + tree op0, op1;
2650 + value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2651 + value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2652 +
2653 + /* Get value ranges for each operand. For constant operands, create
2654 + a new value range with the operand to simplify processing. */
2655 + op0 = COND_EXPR_THEN (expr);
2656 + if (TREE_CODE (op0) == SSA_NAME)
2657 + vr0 = *(get_value_range (op0));
2658 + else if (is_gimple_min_invariant (op0))
2659 + set_value_range_to_value (&vr0, op0, NULL);
2660 + else
2661 + set_value_range_to_varying (&vr0);
2662 +
2663 + op1 = COND_EXPR_ELSE (expr);
2664 + if (TREE_CODE (op1) == SSA_NAME)
2665 + vr1 = *(get_value_range (op1));
2666 + else if (is_gimple_min_invariant (op1))
2667 + set_value_range_to_value (&vr1, op1, NULL);
2668 + else
2669 + set_value_range_to_varying (&vr1);
2670 +
2671 + /* The resulting value range is the union of the operand ranges */
2672 + vrp_meet (&vr0, &vr1);
2673 + copy_value_range (vr, &vr0);
2674 +}
2675 +
2676 +
2677 +/* Extract range information from a comparison expression EXPR based
2678 + on the range of its operand and the expression code. */
2679 +
2680 +static void
2681 +extract_range_from_comparison (value_range_t *vr, tree expr)
2682 +{
2683 + bool sop = false;
2684 + tree val = vrp_evaluate_conditional_warnv (expr, false, &sop);
2685 +
2686 + /* A disadvantage of using a special infinity as an overflow
2687 + representation is that we lose the ability to record overflow
2688 + when we don't have an infinity. So we have to ignore a result
2689 + which relies on overflow. */
2690 +
2691 + if (val && !is_overflow_infinity (val) && !sop)
2692 + {
2693 + /* Since this expression was found on the RHS of an assignment,
2694 + its type may be different from _Bool. Convert VAL to EXPR's
2695 + type. */
2696 + val = fold_convert (TREE_TYPE (expr), val);
2697 + if (is_gimple_min_invariant (val))
2698 + set_value_range_to_value (vr, val, vr->equiv);
2699 + else
2700 + set_value_range (vr, VR_RANGE, val, val, vr->equiv);
2701 + }
2702 + else
2703 + /* The result of a comparison is always true or false. */
2704 + set_value_range_to_truthvalue (vr, TREE_TYPE (expr));
2705 +}
2706 +
2707 +
2708 +/* Try to compute a useful range out of expression EXPR and store it
2709 + in *VR. */
2710 +
2711 +static void
2712 +extract_range_from_expr (value_range_t *vr, tree expr)
2713 +{
2714 + enum tree_code code = TREE_CODE (expr);
2715 +
2716 + if (code == ASSERT_EXPR)
2717 + extract_range_from_assert (vr, expr);
2718 + else if (code == SSA_NAME)
2719 + extract_range_from_ssa_name (vr, expr);
2720 + else if (TREE_CODE_CLASS (code) == tcc_binary
2721 + || code == TRUTH_ANDIF_EXPR
2722 + || code == TRUTH_ORIF_EXPR
2723 + || code == TRUTH_AND_EXPR
2724 + || code == TRUTH_OR_EXPR
2725 + || code == TRUTH_XOR_EXPR)
2726 + extract_range_from_binary_expr (vr, expr);
2727 + else if (TREE_CODE_CLASS (code) == tcc_unary)
2728 + extract_range_from_unary_expr (vr, expr);
2729 + else if (code == COND_EXPR)
2730 + extract_range_from_cond_expr (vr, expr);
2731 + else if (TREE_CODE_CLASS (code) == tcc_comparison)
2732 + extract_range_from_comparison (vr, expr);
2733 + else if (is_gimple_min_invariant (expr))
2734 + set_value_range_to_value (vr, expr, NULL);
2735 + else
2736 + set_value_range_to_varying (vr);
2737 +
2738 + /* If we got a varying range from the tests above, try a final
2739 + time to derive a nonnegative or nonzero range. This time
2740 + relying primarily on generic routines in fold in conjunction
2741 + with range data. */
2742 + if (vr->type == VR_VARYING)
2743 + {
2744 + bool sop = false;
2745 +
2746 + if (INTEGRAL_TYPE_P (TREE_TYPE (expr))
2747 + && vrp_expr_computes_nonnegative (expr, &sop))
2748 + set_value_range_to_nonnegative (vr, TREE_TYPE (expr),
2749 + sop || is_overflow_infinity (expr));
2750 + else if (vrp_expr_computes_nonzero (expr, &sop)
2751 + && !sop)
2752 + set_value_range_to_nonnull (vr, TREE_TYPE (expr));
2753 + }
2754 +}
2755 +
2756 +/* Given a range VR, a LOOP and a variable VAR, determine whether it
2757 + would be profitable to adjust VR using scalar evolution information
2758 + for VAR. If so, update VR with the new limits. */
2759 +
2760 +static void
2761 +adjust_range_with_scev (value_range_t *vr, struct loop *loop, tree stmt,
2762 + tree var)
2763 +{
2764 + tree init, step, chrec, tmin, tmax, min, max, type;
2765 + enum ev_direction dir;
2766 +
2767 + /* TODO. Don't adjust anti-ranges. An anti-range may provide
2768 + better opportunities than a regular range, but I'm not sure. */
2769 + if (vr->type == VR_ANTI_RANGE)
2770 + return;
2771 +
2772 + /* Ensure that there are not values in the scev cache based on assumptions
2773 + on ranges of ssa names that were changed
2774 + (in set_value_range/set_value_range_to_varying). Preserve cached numbers
2775 + of iterations, that were computed before the start of VRP (we do not
2776 + recompute these each time to save the compile time). */
2777 + scev_reset_except_niters ();
2778 +
2779 + chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
2780 +
2781 + /* Like in PR19590, scev can return a constant function. */
2782 + if (is_gimple_min_invariant (chrec))
2783 + {
2784 + set_value_range_to_value (vr, chrec, vr->equiv);
2785 + return;
2786 + }
2787 +
2788 + if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
2789 + return;
2790 +
2791 + init = initial_condition_in_loop_num (chrec, loop->num);
2792 + step = evolution_part_in_loop_num (chrec, loop->num);
2793 +
2794 + /* If STEP is symbolic, we can't know whether INIT will be the
2795 + minimum or maximum value in the range. Also, unless INIT is
2796 + a simple expression, compare_values and possibly other functions
2797 + in tree-vrp won't be able to handle it. */
2798 + if (step == NULL_TREE
2799 + || !is_gimple_min_invariant (step)
2800 + || !valid_value_p (init))
2801 + return;
2802 +
2803 + dir = scev_direction (chrec);
2804 + if (/* Do not adjust ranges if we do not know whether the iv increases
2805 + or decreases, ... */
2806 + dir == EV_DIR_UNKNOWN
2807 + /* ... or if it may wrap. */
2808 + || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
2809 + true))
2810 + return;
2811 +
2812 + /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
2813 + negative_overflow_infinity and positive_overflow_infinity,
2814 + because we have concluded that the loop probably does not
2815 + wrap. */
2816 +
2817 + type = TREE_TYPE (var);
2818 + if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
2819 + tmin = lower_bound_in_type (type, type);
2820 + else
2821 + tmin = TYPE_MIN_VALUE (type);
2822 + if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
2823 + tmax = upper_bound_in_type (type, type);
2824 + else
2825 + tmax = TYPE_MAX_VALUE (type);
2826 +
2827 + if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
2828 + {
2829 + min = tmin;
2830 + max = tmax;
2831 +
2832 + /* For VARYING or UNDEFINED ranges, just about anything we get
2833 + from scalar evolutions should be better. */
2834 +
2835 + if (dir == EV_DIR_DECREASES)
2836 + max = init;
2837 + else
2838 + min = init;
2839 +
2840 + /* If we would create an invalid range, then just assume we
2841 + know absolutely nothing. This may be over-conservative,
2842 + but it's clearly safe, and should happen only in unreachable
2843 + parts of code, or for invalid programs. */
2844 + if (compare_values (min, max) == 1)
2845 + return;
2846 +
2847 + set_value_range (vr, VR_RANGE, min, max, vr->equiv);
2848 + }
2849 + else if (vr->type == VR_RANGE)
2850 + {
2851 + min = vr->min;
2852 + max = vr->max;
2853 +
2854 + if (dir == EV_DIR_DECREASES)
2855 + {
2856 + /* INIT is the maximum value. If INIT is lower than VR->MAX
2857 + but no smaller than VR->MIN, set VR->MAX to INIT. */
2858 + if (compare_values (init, max) == -1)
2859 + {
2860 + max = init;
2861 +
2862 + /* If we just created an invalid range with the minimum
2863 + greater than the maximum, we fail conservatively.
2864 + This should happen only in unreachable
2865 + parts of code, or for invalid programs. */
2866 + if (compare_values (min, max) == 1)
2867 + return;
2868 + }
2869 +
2870 + /* According to the loop information, the variable does not
2871 + overflow. If we think it does, probably because of an
2872 + overflow due to arithmetic on a different INF value,
2873 + reset now. */
2874 + if (is_negative_overflow_infinity (min))
2875 + min = tmin;
2876 + }
2877 + else
2878 + {
2879 + /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
2880 + if (compare_values (init, min) == 1)
2881 + {
2882 + min = init;
2883 +
2884 + /* Again, avoid creating invalid range by failing. */
2885 + if (compare_values (min, max) == 1)
2886 + return;
2887 + }
2888 +
2889 + if (is_positive_overflow_infinity (max))
2890 + max = tmax;
2891 + }
2892 +
2893 + set_value_range (vr, VR_RANGE, min, max, vr->equiv);
2894 + }
2895 +}
2896 +
2897 +/* Return true if VAR may overflow at STMT. This checks any available
2898 + loop information to see if we can determine that VAR does not
2899 + overflow. */
2900 +
2901 +static bool
2902 +vrp_var_may_overflow (tree var, tree stmt)
2903 +{
2904 + struct loop *l;
2905 + tree chrec, init, step;
2906 +
2907 + if (current_loops == NULL)
2908 + return true;
2909 +
2910 + l = loop_containing_stmt (stmt);
2911 + if (l == NULL)
2912 + return true;
2913 +
2914 + chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
2915 + if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
2916 + return true;
2917 +
2918 + init = initial_condition_in_loop_num (chrec, l->num);
2919 + step = evolution_part_in_loop_num (chrec, l->num);
2920 +
2921 + if (step == NULL_TREE
2922 + || !is_gimple_min_invariant (step)
2923 + || !valid_value_p (init))
2924 + return true;
2925 +
2926 + /* If we get here, we know something useful about VAR based on the
2927 + loop information. If it wraps, it may overflow. */
2928 +
2929 + if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
2930 + true))
2931 + return true;
2932 +
2933 + if (dump_file && (dump_flags & TDF_DETAILS) != 0)
2934 + {
2935 + print_generic_expr (dump_file, var, 0);
2936 + fprintf (dump_file, ": loop information indicates does not overflow\n");
2937 + }
2938 +
2939 + return false;
2940 +}
2941 +
2942 +
2943 +/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
2944 +
2945 + - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
2946 + all the values in the ranges.
2947 +
2948 + - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
2949 +
2950 + - Return NULL_TREE if it is not always possible to determine the
2951 + value of the comparison.
2952 +
2953 + Also set *STRICT_OVERFLOW_P to indicate whether a range with an
2954 + overflow infinity was used in the test. */
2955 +
2956 +
2957 +static tree
2958 +compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
2959 + bool *strict_overflow_p)
2960 +{
2961 + /* VARYING or UNDEFINED ranges cannot be compared. */
2962 + if (vr0->type == VR_VARYING
2963 + || vr0->type == VR_UNDEFINED
2964 + || vr1->type == VR_VARYING
2965 + || vr1->type == VR_UNDEFINED)
2966 + return NULL_TREE;
2967 +
2968 + /* Anti-ranges need to be handled separately. */
2969 + if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
2970 + {
2971 + /* If both are anti-ranges, then we cannot compute any
2972 + comparison. */
2973 + if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
2974 + return NULL_TREE;
2975 +
2976 + /* These comparisons are never statically computable. */
2977 + if (comp == GT_EXPR
2978 + || comp == GE_EXPR
2979 + || comp == LT_EXPR
2980 + || comp == LE_EXPR)
2981 + return NULL_TREE;
2982 +
2983 + /* Equality can be computed only between a range and an
2984 + anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
2985 + if (vr0->type == VR_RANGE)
2986 + {
2987 + /* To simplify processing, make VR0 the anti-range. */
2988 + value_range_t *tmp = vr0;
2989 + vr0 = vr1;
2990 + vr1 = tmp;
2991 + }
2992 +
2993 + gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
2994 +
2995 + if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
2996 + && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
2997 + return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
2998 +
2999 + return NULL_TREE;
3000 + }
3001 +
3002 + if (!usable_range_p (vr0, strict_overflow_p)
3003 + || !usable_range_p (vr1, strict_overflow_p))
3004 + return NULL_TREE;
3005 +
3006 + /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3007 + operands around and change the comparison code. */
3008 + if (comp == GT_EXPR || comp == GE_EXPR)
3009 + {
3010 + value_range_t *tmp;
3011 + comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3012 + tmp = vr0;
3013 + vr0 = vr1;
3014 + vr1 = tmp;
3015 + }
3016 +
3017 + if (comp == EQ_EXPR)
3018 + {
3019 + /* Equality may only be computed if both ranges represent
3020 + exactly one value. */
3021 + if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3022 + && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3023 + {
3024 + int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3025 + strict_overflow_p);
3026 + int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3027 + strict_overflow_p);
3028 + if (cmp_min == 0 && cmp_max == 0)
3029 + return boolean_true_node;
3030 + else if (cmp_min != -2 && cmp_max != -2)
3031 + return boolean_false_node;
3032 + }
3033 + /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3034 + else if (compare_values_warnv (vr0->min, vr1->max,
3035 + strict_overflow_p) == 1
3036 + || compare_values_warnv (vr1->min, vr0->max,
3037 + strict_overflow_p) == 1)
3038 + return boolean_false_node;
3039 +
3040 + return NULL_TREE;
3041 + }
3042 + else if (comp == NE_EXPR)
3043 + {
3044 + int cmp1, cmp2;
3045 +
3046 + /* If VR0 is completely to the left or completely to the right
3047 + of VR1, they are always different. Notice that we need to
3048 + make sure that both comparisons yield similar results to
3049 + avoid comparing values that cannot be compared at
3050 + compile-time. */
3051 + cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3052 + cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3053 + if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3054 + return boolean_true_node;
3055 +
3056 + /* If VR0 and VR1 represent a single value and are identical,
3057 + return false. */
3058 + else if (compare_values_warnv (vr0->min, vr0->max,
3059 + strict_overflow_p) == 0
3060 + && compare_values_warnv (vr1->min, vr1->max,
3061 + strict_overflow_p) == 0
3062 + && compare_values_warnv (vr0->min, vr1->min,
3063 + strict_overflow_p) == 0
3064 + && compare_values_warnv (vr0->max, vr1->max,
3065 + strict_overflow_p) == 0)
3066 + return boolean_false_node;
3067 +
3068 + /* Otherwise, they may or may not be different. */
3069 + else
3070 + return NULL_TREE;
3071 + }
3072 + else if (comp == LT_EXPR || comp == LE_EXPR)
3073 + {
3074 + int tst;
3075 +
3076 + /* If VR0 is to the left of VR1, return true. */
3077 + tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3078 + if ((comp == LT_EXPR && tst == -1)
3079 + || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3080 + {
3081 + if (overflow_infinity_range_p (vr0)
3082 + || overflow_infinity_range_p (vr1))
3083 + *strict_overflow_p = true;
3084 + return boolean_true_node;
3085 + }
3086 +
3087 + /* If VR0 is to the right of VR1, return false. */
3088 + tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3089 + if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3090 + || (comp == LE_EXPR && tst == 1))
3091 + {
3092 + if (overflow_infinity_range_p (vr0)
3093 + || overflow_infinity_range_p (vr1))
3094 + *strict_overflow_p = true;
3095 + return boolean_false_node;
3096 + }
3097 +
3098 + /* Otherwise, we don't know. */
3099 + return NULL_TREE;
3100 + }
3101 +
3102 + gcc_unreachable ();
3103 +}
3104 +
3105 +
3106 +/* Given a value range VR, a value VAL and a comparison code COMP, return
3107 + BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3108 + values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3109 + always returns false. Return NULL_TREE if it is not always
3110 + possible to determine the value of the comparison. Also set
3111 + *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3112 + infinity was used in the test. */
3113 +
3114 +static tree
3115 +compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3116 + bool *strict_overflow_p)
3117 +{
3118 + if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3119 + return NULL_TREE;
3120 +
3121 + /* Anti-ranges need to be handled separately. */
3122 + if (vr->type == VR_ANTI_RANGE)
3123 + {
3124 + /* For anti-ranges, the only predicates that we can compute at
3125 + compile time are equality and inequality. */
3126 + if (comp == GT_EXPR
3127 + || comp == GE_EXPR
3128 + || comp == LT_EXPR
3129 + || comp == LE_EXPR)
3130 + return NULL_TREE;
3131 +
3132 + /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3133 + if (value_inside_range (val, vr) == 1)
3134 + return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3135 +
3136 + return NULL_TREE;
3137 + }
3138 +
3139 + if (!usable_range_p (vr, strict_overflow_p))
3140 + return NULL_TREE;
3141 +
3142 + if (comp == EQ_EXPR)
3143 + {
3144 + /* EQ_EXPR may only be computed if VR represents exactly
3145 + one value. */
3146 + if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3147 + {
3148 + int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3149 + if (cmp == 0)
3150 + return boolean_true_node;
3151 + else if (cmp == -1 || cmp == 1 || cmp == 2)
3152 + return boolean_false_node;
3153 + }
3154 + else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3155 + || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3156 + return boolean_false_node;
3157 +
3158 + return NULL_TREE;
3159 + }
3160 + else if (comp == NE_EXPR)
3161 + {
3162 + /* If VAL is not inside VR, then they are always different. */
3163 + if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3164 + || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3165 + return boolean_true_node;
3166 +
3167 + /* If VR represents exactly one value equal to VAL, then return
3168 + false. */
3169 + if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3170 + && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3171 + return boolean_false_node;
3172 +
3173 + /* Otherwise, they may or may not be different. */
3174 + return NULL_TREE;
3175 + }
3176 + else if (comp == LT_EXPR || comp == LE_EXPR)
3177 + {
3178 + int tst;
3179 +
3180 + /* If VR is to the left of VAL, return true. */
3181 + tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3182 + if ((comp == LT_EXPR && tst == -1)
3183 + || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3184 + {
3185 + if (overflow_infinity_range_p (vr))
3186 + *strict_overflow_p = true;
3187 + return boolean_true_node;
3188 + }
3189 +
3190 + /* If VR is to the right of VAL, return false. */
3191 + tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3192 + if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3193 + || (comp == LE_EXPR && tst == 1))
3194 + {
3195 + if (overflow_infinity_range_p (vr))
3196 + *strict_overflow_p = true;
3197 + return boolean_false_node;
3198 + }
3199 +
3200 + /* Otherwise, we don't know. */
3201 + return NULL_TREE;
3202 + }
3203 + else if (comp == GT_EXPR || comp == GE_EXPR)
3204 + {
3205 + int tst;
3206 +
3207 + /* If VR is to the right of VAL, return true. */
3208 + tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3209 + if ((comp == GT_EXPR && tst == 1)
3210 + || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3211 + {
3212 + if (overflow_infinity_range_p (vr))
3213 + *strict_overflow_p = true;
3214 + return boolean_true_node;
3215 + }
3216 +
3217 + /* If VR is to the left of VAL, return false. */
3218 + tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3219 + if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3220 + || (comp == GE_EXPR && tst == -1))
3221 + {
3222 + if (overflow_infinity_range_p (vr))
3223 + *strict_overflow_p = true;
3224 + return boolean_false_node;
3225 + }
3226 +
3227 + /* Otherwise, we don't know. */
3228 + return NULL_TREE;
3229 + }
3230 +
3231 + gcc_unreachable ();
3232 +}
3233 +
3234 +
3235 +/* Debugging dumps. */
3236 +
3237 +void dump_value_range (FILE *, value_range_t *);
3238 +void debug_value_range (value_range_t *);
3239 +void dump_all_value_ranges (FILE *);
3240 +void debug_all_value_ranges (void);
3241 +void dump_vr_equiv (FILE *, bitmap);
3242 +void debug_vr_equiv (bitmap);
3243 +
3244 +
3245 +
3246 +/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3247 + create a new SSA name N and return the assertion assignment
3248 + 'V = ASSERT_EXPR <V, V OP W>'. */
3249 +
3250 +static tree
3251 +build_assert_expr_for (tree cond, tree v)
3252 +{
3253 + tree n, assertion;
3254 +
3255 + gcc_assert (TREE_CODE (v) == SSA_NAME);
3256 + n = duplicate_ssa_name (v, NULL_TREE);
3257 +
3258 + if (COMPARISON_CLASS_P (cond))
3259 + {
3260 + tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3261 + assertion = build_gimple_modify_stmt (n, a);
3262 + }
3263 + else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3264 + {
3265 + /* Given !V, build the assignment N = false. */
3266 + tree op0 = TREE_OPERAND (cond, 0);
3267 + gcc_assert (op0 == v);
3268 + assertion = build_gimple_modify_stmt (n, boolean_false_node);
3269 + }
3270 + else if (TREE_CODE (cond) == SSA_NAME)
3271 + {
3272 + /* Given V, build the assignment N = true. */
3273 + gcc_assert (v == cond);
3274 + assertion = build_gimple_modify_stmt (n, boolean_true_node);
3275 + }
3276 + else
3277 + gcc_unreachable ();
3278 +
3279 + SSA_NAME_DEF_STMT (n) = assertion;
3280 +
3281 + /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3282 + operand of the ASSERT_EXPR. Register the new name and the old one
3283 + in the replacement table so that we can fix the SSA web after
3284 + adding all the ASSERT_EXPRs. */
3285 + register_new_name_mapping (n, v);
3286 +
3287 + return assertion;
3288 +}
3289 +
3290 +
3291 +/* Return false if EXPR is a predicate expression involving floating
3292 + point values. */
3293 +
3294 +static inline bool
3295 +fp_predicate (const_tree expr)
3296 +{
3297 + return (COMPARISON_CLASS_P (expr)
3298 + && FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))));
3299 +}
3300 +
3301 +
3302 +/* If the range of values taken by OP can be inferred after STMT executes,
3303 + return the comparison code (COMP_CODE_P) and value (VAL_P) that
3304 + describes the inferred range. Return true if a range could be
3305 + inferred. */
3306 +
3307 +static bool
3308 +infer_value_range (tree stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
3309 +{
3310 + *val_p = NULL_TREE;
3311 + *comp_code_p = ERROR_MARK;
3312 +
3313 + /* Do not attempt to infer anything in names that flow through
3314 + abnormal edges. */
3315 + if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
3316 + return false;
3317 +
3318 + /* Similarly, don't infer anything from statements that may throw
3319 + exceptions. */
3320 + if (tree_could_throw_p (stmt))
3321 + return false;
3322 +
3323 + /* If STMT is the last statement of a basic block with no
3324 + successors, there is no point inferring anything about any of its
3325 + operands. We would not be able to find a proper insertion point
3326 + for the assertion, anyway. */
3327 + if (stmt_ends_bb_p (stmt) && EDGE_COUNT (bb_for_stmt (stmt)->succs) == 0)
3328 + return false;
3329 +
3330 + /* We can only assume that a pointer dereference will yield
3331 + non-NULL if -fdelete-null-pointer-checks is enabled. */
3332 + if (flag_delete_null_pointer_checks && POINTER_TYPE_P (TREE_TYPE (op)))
3333 + {
3334 + unsigned num_uses, num_loads, num_stores;
3335 +
3336 + count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
3337 + if (num_loads + num_stores > 0)
3338 + {
3339 + *val_p = build_int_cst (TREE_TYPE (op), 0);
3340 + *comp_code_p = NE_EXPR;
3341 + return true;
3342 + }
3343 + }
3344 +
3345 + return false;
3346 +}
3347 +
3348 +
3349 +void dump_asserts_for (FILE *, tree);
3350 +void debug_asserts_for (tree);
3351 +void dump_all_asserts (FILE *);
3352 +void debug_all_asserts (void);
3353 +
3354 +/* Dump all the registered assertions for NAME to FILE. */
3355 +
3356 +/* If NAME doesn't have an ASSERT_EXPR registered for asserting
3357 + 'NAME COMP_CODE VAL' at a location that dominates block BB or
3358 + E->DEST, then register this location as a possible insertion point
3359 + for ASSERT_EXPR <NAME, NAME COMP_CODE VAL>.
3360 +
3361 + BB, E and SI provide the exact insertion point for the new
3362 + ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
3363 + on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
3364 + BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
3365 + must not be NULL. */
3366 +
3367 +static void
3368 +register_new_assert_for (tree name,
3369 + enum tree_code comp_code,
3370 + tree val,
3371 + basic_block bb,
3372 + edge e,
3373 + block_stmt_iterator si)
3374 +{
3375 + assert_locus_t n, loc, last_loc;
3376 + bool found;
3377 + basic_block dest_bb;
3378 +
3379 +#if defined ENABLE_CHECKING
3380 + gcc_assert (bb == NULL || e == NULL);
3381 +
3382 + if (e == NULL)
3383 + gcc_assert (TREE_CODE (bsi_stmt (si)) != COND_EXPR
3384 + && TREE_CODE (bsi_stmt (si)) != SWITCH_EXPR);
3385 +#endif
3386 +
3387 + /* The new assertion A will be inserted at BB or E. We need to
3388 + determine if the new location is dominated by a previously
3389 + registered location for A. If we are doing an edge insertion,
3390 + assume that A will be inserted at E->DEST. Note that this is not
3391 + necessarily true.
3392 +
3393 + If E is a critical edge, it will be split. But even if E is
3394 + split, the new block will dominate the same set of blocks that
3395 + E->DEST dominates.
3396 +
3397 + The reverse, however, is not true, blocks dominated by E->DEST
3398 + will not be dominated by the new block created to split E. So,
3399 + if the insertion location is on a critical edge, we will not use
3400 + the new location to move another assertion previously registered
3401 + at a block dominated by E->DEST. */
3402 + dest_bb = (bb) ? bb : e->dest;
3403 +
3404 + /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
3405 + VAL at a block dominating DEST_BB, then we don't need to insert a new
3406 + one. Similarly, if the same assertion already exists at a block
3407 + dominated by DEST_BB and the new location is not on a critical
3408 + edge, then update the existing location for the assertion (i.e.,
3409 + move the assertion up in the dominance tree).
3410 +
3411 + Note, this is implemented as a simple linked list because there
3412 + should not be more than a handful of assertions registered per
3413 + name. If this becomes a performance problem, a table hashed by
3414 + COMP_CODE and VAL could be implemented. */
3415 + loc = asserts_for[SSA_NAME_VERSION (name)];
3416 + last_loc = loc;
3417 + found = false;
3418 + while (loc)
3419 + {
3420 + if (loc->comp_code == comp_code
3421 + && (loc->val == val
3422 + || operand_equal_p (loc->val, val, 0)))
3423 + {
3424 + /* If the assertion NAME COMP_CODE VAL has already been
3425 + registered at a basic block that dominates DEST_BB, then
3426 + we don't need to insert the same assertion again. Note
3427 + that we don't check strict dominance here to avoid
3428 + replicating the same assertion inside the same basic
3429 + block more than once (e.g., when a pointer is
3430 + dereferenced several times inside a block).
3431 +
3432 + An exception to this rule are edge insertions. If the
3433 + new assertion is to be inserted on edge E, then it will
3434 + dominate all the other insertions that we may want to
3435 + insert in DEST_BB. So, if we are doing an edge
3436 + insertion, don't do this dominance check. */
3437 + if (e == NULL
3438 + && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
3439 + return;
3440 +
3441 + /* Otherwise, if E is not a critical edge and DEST_BB
3442 + dominates the existing location for the assertion, move
3443 + the assertion up in the dominance tree by updating its
3444 + location information. */
3445 + if ((e == NULL || !EDGE_CRITICAL_P (e))
3446 + && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
3447 + {
3448 + loc->bb = dest_bb;
3449 + loc->e = e;
3450 + loc->si = si;
3451 + return;
3452 + }
3453 + }
3454 +
3455 + /* Update the last node of the list and move to the next one. */
3456 + last_loc = loc;
3457 + loc = loc->next;
3458 + }
3459 +
3460 + /* If we didn't find an assertion already registered for
3461 + NAME COMP_CODE VAL, add a new one at the end of the list of
3462 + assertions associated with NAME. */
3463 + n = XNEW (struct assert_locus_d);
3464 + n->bb = dest_bb;
3465 + n->e = e;
3466 + n->si = si;
3467 + n->comp_code = comp_code;
3468 + n->val = val;
3469 + n->next = NULL;
3470 +
3471 + if (last_loc)
3472 + last_loc->next = n;
3473 + else
3474 + asserts_for[SSA_NAME_VERSION (name)] = n;
3475 +
3476 + bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
3477 +}
3478 +
3479 +/* COND is a predicate which uses NAME. Extract a suitable test code
3480 + and value and store them into *CODE_P and *VAL_P so the predicate
3481 + is normalized to NAME *CODE_P *VAL_P.
3482 +
3483 + If no extraction was possible, return FALSE, otherwise return TRUE.
3484 +
3485 + If INVERT is true, then we invert the result stored into *CODE_P. */
3486 +
3487 +static bool
3488 +extract_code_and_val_from_cond (tree name, tree cond, bool invert,
3489 + enum tree_code *code_p, tree *val_p)
3490 +{
3491 + enum tree_code comp_code;
3492 + tree val;
3493 +
3494 + /* Predicates may be a single SSA name or NAME OP VAL. */
3495 + if (cond == name)
3496 + {
3497 + /* If the predicate is a name, it must be NAME, in which
3498 + case we create the predicate NAME == true or
3499 + NAME == false accordingly. */
3500 + comp_code = EQ_EXPR;
3501 + val = invert ? boolean_false_node : boolean_true_node;
3502 + }
3503 + else
3504 + {
3505 + /* Otherwise, we have a comparison of the form NAME COMP VAL
3506 + or VAL COMP NAME. */
3507 + if (name == TREE_OPERAND (cond, 1))
3508 + {
3509 + /* If the predicate is of the form VAL COMP NAME, flip
3510 + COMP around because we need to register NAME as the
3511 + first operand in the predicate. */
3512 + comp_code = swap_tree_comparison (TREE_CODE (cond));
3513 + val = TREE_OPERAND (cond, 0);
3514 + }
3515 + else
3516 + {
3517 + /* The comparison is of the form NAME COMP VAL, so the
3518 + comparison code remains unchanged. */
3519 + comp_code = TREE_CODE (cond);
3520 + val = TREE_OPERAND (cond, 1);
3521 + }
3522 +
3523 + /* Invert the comparison code as necessary. */
3524 + if (invert)
3525 + comp_code = invert_tree_comparison (comp_code, 0);
3526 +
3527 + /* VRP does not handle float types. */
3528 + if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
3529 + return false;
3530 +
3531 + /* Do not register always-false predicates.
3532 + FIXME: this works around a limitation in fold() when dealing with
3533 + enumerations. Given 'enum { N1, N2 } x;', fold will not
3534 + fold 'if (x > N2)' to 'if (0)'. */
3535 + if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
3536 + && INTEGRAL_TYPE_P (TREE_TYPE (val)))
3537 + {
3538 + tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
3539 + tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
3540 +
3541 + if (comp_code == GT_EXPR
3542 + && (!max
3543 + || compare_values (val, max) == 0))
3544 + return false;
3545 +
3546 + if (comp_code == LT_EXPR
3547 + && (!min
3548 + || compare_values (val, min) == 0))
3549 + return false;
3550 + }
3551 + }
3552 + *code_p = comp_code;
3553 + *val_p = val;
3554 + return true;
3555 +}
3556 +
3557 +/* OP is an operand of a truth value expression which is known to have
3558 + a particular value. Register any asserts for OP and for any
3559 + operands in OP's defining statement.
3560 +
3561 + If CODE is EQ_EXPR, then we want to register OP is zero (false),
3562 + if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3563 +
3564 +static bool
3565 +register_edge_assert_for_1 (tree op, enum tree_code code,
3566 + edge e, block_stmt_iterator bsi)
3567 +{
3568 + bool retval = false;
3569 + tree op_def, rhs, val;
3570 +
3571 + /* We only care about SSA_NAMEs. */
3572 + if (TREE_CODE (op) != SSA_NAME)
3573 + return false;
3574 +
3575 + /* We know that OP will have a zero or nonzero value. If OP is used
3576 + more than once go ahead and register an assert for OP.
3577 +
3578 + The FOUND_IN_SUBGRAPH support is not helpful in this situation as
3579 + it will always be set for OP (because OP is used in a COND_EXPR in
3580 + the subgraph). */
3581 + if (!has_single_use (op))
3582 + {
3583 + val = build_int_cst (TREE_TYPE (op), 0);
3584 + register_new_assert_for (op, code, val, NULL, e, bsi);
3585 + retval = true;
3586 + }
3587 +
3588 + /* Now look at how OP is set. If it's set from a comparison,
3589 + a truth operation or some bit operations, then we may be able
3590 + to register information about the operands of that assignment. */
3591 + op_def = SSA_NAME_DEF_STMT (op);
3592 + if (TREE_CODE (op_def) != GIMPLE_MODIFY_STMT)
3593 + return retval;
3594 +
3595 + rhs = GIMPLE_STMT_OPERAND (op_def, 1);
3596 +
3597 + if (COMPARISON_CLASS_P (rhs))
3598 + {
3599 + bool invert = (code == EQ_EXPR ? true : false);
3600 + tree op0 = TREE_OPERAND (rhs, 0);
3601 + tree op1 = TREE_OPERAND (rhs, 1);
3602 +
3603 + /* Conditionally register an assert for each SSA_NAME in the
3604 + comparison. */
3605 + if (TREE_CODE (op0) == SSA_NAME
3606 + && !has_single_use (op0)
3607 + && extract_code_and_val_from_cond (op0, rhs,
3608 + invert, &code, &val))
3609 + {
3610 + register_new_assert_for (op0, code, val, NULL, e, bsi);
3611 + retval = true;
3612 + }
3613 +
3614 + /* Similarly for the second operand of the comparison. */
3615 + if (TREE_CODE (op1) == SSA_NAME
3616 + && !has_single_use (op1)
3617 + && extract_code_and_val_from_cond (op1, rhs,
3618 + invert, &code, &val))
3619 + {
3620 + register_new_assert_for (op1, code, val, NULL, e, bsi);
3621 + retval = true;
3622 + }
3623 + }
3624 + else if ((code == NE_EXPR
3625 + && (TREE_CODE (rhs) == TRUTH_AND_EXPR
3626 + || TREE_CODE (rhs) == BIT_AND_EXPR))
3627 + || (code == EQ_EXPR
3628 + && (TREE_CODE (rhs) == TRUTH_OR_EXPR
3629 + || TREE_CODE (rhs) == BIT_IOR_EXPR)))
3630 + {
3631 + /* Recurse on each operand. */
3632 + retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
3633 + code, e, bsi);
3634 + retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 1),
3635 + code, e, bsi);
3636 + }
3637 + else if (TREE_CODE (rhs) == TRUTH_NOT_EXPR)
3638 + {
3639 + /* Recurse, flipping CODE. */
3640 + code = invert_tree_comparison (code, false);
3641 + retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
3642 + code, e, bsi);
3643 + }
3644 + else if (TREE_CODE (rhs) == SSA_NAME)
3645 + {
3646 + /* Recurse through the copy. */
3647 + retval |= register_edge_assert_for_1 (rhs, code, e, bsi);
3648 + }
3649 + else if (TREE_CODE (rhs) == NOP_EXPR
3650 + || TREE_CODE (rhs) == CONVERT_EXPR
3651 + || TREE_CODE (rhs) == NON_LVALUE_EXPR)
3652 + {
3653 + /* Recurse through the type conversion. */
3654 + retval |= register_edge_assert_for_1 (TREE_OPERAND (rhs, 0),
3655 + code, e, bsi);
3656 + }
3657 +
3658 + return retval;
3659 +}
3660 +
3661 +/* Try to register an edge assertion for SSA name NAME on edge E for
3662 + the condition COND contributing to the conditional jump pointed to by SI.
3663 + Return true if an assertion for NAME could be registered. */
3664 +
3665 +static bool
3666 +register_edge_assert_for (tree name, edge e, block_stmt_iterator si, tree cond)
3667 +{
3668 + tree val;
3669 + enum tree_code comp_code;
3670 + bool retval = false;
3671 + bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3672 +
3673 + /* Do not attempt to infer anything in names that flow through
3674 + abnormal edges. */
3675 + if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3676 + return false;
3677 +
3678 + if (!extract_code_and_val_from_cond (name, cond, is_else_edge,
3679 + &comp_code, &val))
3680 + return false;
3681 +
3682 + /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3683 + reachable from E. */
3684 + if (1)/*(TEST_BIT (found_in_subgraph, SSA_NAME_VERSION (name)))*/
3685 + {
3686 + register_new_assert_for (name, comp_code, val, NULL, e, si);
3687 + retval = true;
3688 + }
3689 +
3690 + /* If COND is effectively an equality test of an SSA_NAME against
3691 + the value zero or one, then we may be able to assert values
3692 + for SSA_NAMEs which flow into COND. */
3693 +
3694 + /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
3695 + statement of NAME we can assert both operands of the TRUTH_AND_EXPR
3696 + have nonzero value. */
3697 + if (((comp_code == EQ_EXPR && integer_onep (val))
3698 + || (comp_code == NE_EXPR && integer_zerop (val))))
3699 + {
3700 + tree def_stmt = SSA_NAME_DEF_STMT (name);
3701 +
3702 + if (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
3703 + && (TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == TRUTH_AND_EXPR
3704 + || TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == BIT_AND_EXPR))
3705 + {
3706 + tree op0 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
3707 + tree op1 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 1);
3708 + retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
3709 + retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
3710 + }
3711 + }
3712 +
3713 + /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
3714 + statement of NAME we can assert both operands of the TRUTH_OR_EXPR
3715 + have zero value. */
3716 + if (((comp_code == EQ_EXPR && integer_zerop (val))
3717 + || (comp_code == NE_EXPR && integer_onep (val))))
3718 + {
3719 + tree def_stmt = SSA_NAME_DEF_STMT (name);
3720 +
3721 + if (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
3722 + && (TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == TRUTH_OR_EXPR
3723 + || TREE_CODE (GIMPLE_STMT_OPERAND (def_stmt, 1)) == BIT_IOR_EXPR))
3724 + {
3725 + tree op0 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
3726 + tree op1 = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 1);
3727 + retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
3728 + retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
3729 + }
3730 + }
3731 +
3732 + return retval;
3733 +}
3734 +
3735 +
3736 +static bool find_assert_locations (basic_block bb);
3737 +
3738 +/* Determine whether the outgoing edges of BB should receive an
3739 + ASSERT_EXPR for each of the operands of BB's LAST statement.
3740 + The last statement of BB must be a COND_EXPR.
3741 +
3742 + If any of the sub-graphs rooted at BB have an interesting use of
3743 + the predicate operands, an assert location node is added to the
3744 + list of assertions for the corresponding operands. */
3745 +
3746 +static bool
3747 +find_conditional_asserts (basic_block bb, tree last)
3748 +{
3749 + bool need_assert;
3750 + block_stmt_iterator bsi;
3751 + tree op;
3752 + edge_iterator ei;
3753 + edge e;
3754 + ssa_op_iter iter;
3755 +
3756 + need_assert = false;
3757 + bsi = bsi_for_stmt (last);
3758 +
3759 + /* Look for uses of the operands in each of the sub-graphs
3760 + rooted at BB. We need to check each of the outgoing edges
3761 + separately, so that we know what kind of ASSERT_EXPR to
3762 + insert. */
3763 + FOR_EACH_EDGE (e, ei, bb->succs)
3764 + {
3765 + if (e->dest == bb)
3766 + continue;
3767 +
3768 + /* Remove the COND_EXPR operands from the FOUND_IN_SUBGRAPH bitmap.
3769 + Otherwise, when we finish traversing each of the sub-graphs, we
3770 + won't know whether the variables were found in the sub-graphs or
3771 + if they had been found in a block upstream from BB.
3772 +
3773 + This is actually a bad idea is some cases, particularly jump
3774 + threading. Consider a CFG like the following:
3775 +
3776 + 0
3777 + /|
3778 + 1 |
3779 + \|
3780 + 2
3781 + / \
3782 + 3 4
3783 +
3784 + Assume that one or more operands in the conditional at the
3785 + end of block 0 are used in a conditional in block 2, but not
3786 + anywhere in block 1. In this case we will not insert any
3787 + assert statements in block 1, which may cause us to miss
3788 + opportunities to optimize, particularly for jump threading. */
3789 + FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3790 + RESET_BIT (found_in_subgraph, SSA_NAME_VERSION (op));
3791 +
3792 + /* Traverse the strictly dominated sub-graph rooted at E->DEST
3793 + to determine if any of the operands in the conditional
3794 + predicate are used. */
3795 + if (e->dest != bb)
3796 + need_assert |= find_assert_locations (e->dest);
3797 +
3798 + /* Register the necessary assertions for each operand in the
3799 + conditional predicate. */
3800 + FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3801 + need_assert |= register_edge_assert_for (op, e, bsi,
3802 + COND_EXPR_COND (last));
3803 + }
3804 +
3805 + /* Finally, indicate that we have found the operands in the
3806 + conditional. */
3807 + FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3808 + SET_BIT (found_in_subgraph, SSA_NAME_VERSION (op));
3809 +
3810 + return need_assert;
3811 +}
3812 +
3813 +/* Compare two case labels sorting first by the destination label uid
3814 + and then by the case value. */
3815 +
3816 +static int
3817 +compare_case_labels (const void *p1, const void *p2)
3818 +{
3819 + const_tree const case1 = *(const_tree const*)p1;
3820 + const_tree const case2 = *(const_tree const*)p2;
3821 + unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
3822 + unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
3823 +
3824 + if (uid1 < uid2)
3825 + return -1;
3826 + else if (uid1 == uid2)
3827 + {
3828 + /* Make sure the default label is first in a group. */
3829 + if (!CASE_LOW (case1))
3830 + return -1;
3831 + else if (!CASE_LOW (case2))
3832 + return 1;
3833 + else
3834 + return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
3835 + }
3836 + else
3837 + return 1;
3838 +}
3839 +
3840 +/* Determine whether the outgoing edges of BB should receive an
3841 + ASSERT_EXPR for each of the operands of BB's LAST statement.
3842 + The last statement of BB must be a SWITCH_EXPR.
3843 +
3844 + If any of the sub-graphs rooted at BB have an interesting use of
3845 + the predicate operands, an assert location node is added to the
3846 + list of assertions for the corresponding operands. */
3847 +
3848 +static bool
3849 +find_switch_asserts (basic_block bb, tree last)
3850 +{
3851 + bool need_assert;
3852 + block_stmt_iterator bsi;
3853 + tree op, cond;
3854 + edge e;
3855 + tree vec = SWITCH_LABELS (last), vec2;
3856 + size_t n = TREE_VEC_LENGTH (vec);
3857 + unsigned int idx;
3858 +
3859 + need_assert = false;
3860 + bsi = bsi_for_stmt (last);
3861 + op = TREE_OPERAND (last, 0);
3862 + if (TREE_CODE (op) != SSA_NAME)
3863 + return false;
3864 +
3865 + /* Build a vector of case labels sorted by destination label. */
3866 + vec2 = make_tree_vec (n);
3867 + for (idx = 0; idx < n; ++idx)
3868 + TREE_VEC_ELT (vec2, idx) = TREE_VEC_ELT (vec, idx);
3869 + qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
3870 +
3871 + for (idx = 0; idx < n; ++idx)
3872 + {
3873 + tree min, max;
3874 + tree cl = TREE_VEC_ELT (vec2, idx);
3875 +
3876 + min = CASE_LOW (cl);
3877 + max = CASE_HIGH (cl);
3878 +
3879 + /* If there are multiple case labels with the same destination
3880 + we need to combine them to a single value range for the edge. */
3881 + if (idx + 1 < n
3882 + && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
3883 + {
3884 + /* Skip labels until the last of the group. */
3885 + do {
3886 + ++idx;
3887 + } while (idx < n
3888 + && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
3889 + --idx;
3890 +
3891 + /* Pick up the maximum of the case label range. */
3892 + if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
3893 + max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
3894 + else
3895 + max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
3896 + }
3897 +
3898 + /* Nothing to do if the range includes the default label until we
3899 + can register anti-ranges. */
3900 + if (min == NULL_TREE)
3901 + continue;
3902 +
3903 + /* Find the edge to register the assert expr on. */
3904 + e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
3905 +
3906 + /* Remove the SWITCH_EXPR operand from the FOUND_IN_SUBGRAPH bitmap.
3907 + Otherwise, when we finish traversing each of the sub-graphs, we
3908 + won't know whether the variables were found in the sub-graphs or
3909 + if they had been found in a block upstream from BB. */
3910 + RESET_BIT (found_in_subgraph, SSA_NAME_VERSION (op));
3911 +
3912 + /* Traverse the strictly dominated sub-graph rooted at E->DEST
3913 + to determine if any of the operands in the conditional
3914 + predicate are used. */
3915 + if (e->dest != bb)
3916 + need_assert |= find_assert_locations (e->dest);
3917 +
3918 + /* Register the necessary assertions for the operand in the
3919 + SWITCH_EXPR. */
3920 + cond = build2 (max ? GE_EXPR : EQ_EXPR, boolean_type_node,
3921 + op, fold_convert (TREE_TYPE (op), min));
3922 + need_assert |= register_edge_assert_for (op, e, bsi, cond);
3923 + if (max)
3924 + {
3925 + cond = build2 (LE_EXPR, boolean_type_node,
3926 + op, fold_convert (TREE_TYPE (op), max));
3927 + need_assert |= register_edge_assert_for (op, e, bsi, cond);
3928 + }
3929 + }
3930 +
3931 + /* Finally, indicate that we have found the operand in the
3932 + SWITCH_EXPR. */
3933 + SET_BIT (found_in_subgraph, SSA_NAME_VERSION (op));
3934 +
3935 + return need_assert;
3936 +}
3937 +
3938 +
3939 +/* Traverse all the statements in block BB looking for statements that
3940 + may generate useful assertions for the SSA names in their operand.
3941 + If a statement produces a useful assertion A for name N_i, then the
3942 + list of assertions already generated for N_i is scanned to
3943 + determine if A is actually needed.
3944 +
3945 + If N_i already had the assertion A at a location dominating the
3946 + current location, then nothing needs to be done. Otherwise, the
3947 + new location for A is recorded instead.
3948 +
3949 + 1- For every statement S in BB, all the variables used by S are
3950 + added to bitmap FOUND_IN_SUBGRAPH.
3951 +
3952 + 2- If statement S uses an operand N in a way that exposes a known
3953 + value range for N, then if N was not already generated by an
3954 + ASSERT_EXPR, create a new assert location for N. For instance,
3955 + if N is a pointer and the statement dereferences it, we can
3956 + assume that N is not NULL.
3957 +
3958 + 3- COND_EXPRs are a special case of #2. We can derive range
3959 + information from the predicate but need to insert different
3960 + ASSERT_EXPRs for each of the sub-graphs rooted at the
3961 + conditional block. If the last statement of BB is a conditional
3962 + expression of the form 'X op Y', then
3963 +
3964 + a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3965 +
3966 + b) If the conditional is the only entry point to the sub-graph
3967 + corresponding to the THEN_CLAUSE, recurse into it. On
3968 + return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3969 + an ASSERT_EXPR is added for the corresponding variable.
3970 +
3971 + c) Repeat step (b) on the ELSE_CLAUSE.
3972 +
3973 + d) Mark X and Y in FOUND_IN_SUBGRAPH.
3974 +
3975 + For instance,
3976 +
3977 + if (a == 9)
3978 + b = a;
3979 + else
3980 + b = c + 1;
3981 +
3982 + In this case, an assertion on the THEN clause is useful to
3983 + determine that 'a' is always 9 on that edge. However, an assertion
3984 + on the ELSE clause would be unnecessary.
3985 +
3986 + 4- If BB does not end in a conditional expression, then we recurse
3987 + into BB's dominator children.
3988 +
3989 + At the end of the recursive traversal, every SSA name will have a
3990 + list of locations where ASSERT_EXPRs should be added. When a new
3991 + location for name N is found, it is registered by calling
3992 + register_new_assert_for. That function keeps track of all the
3993 + registered assertions to prevent adding unnecessary assertions.
3994 + For instance, if a pointer P_4 is dereferenced more than once in a
3995 + dominator tree, only the location dominating all the dereference of
3996 + P_4 will receive an ASSERT_EXPR.
3997 +
3998 + If this function returns true, then it means that there are names
3999 + for which we need to generate ASSERT_EXPRs. Those assertions are
4000 + inserted by process_assert_insertions. */
4001 +
4002 +static bool
4003 +find_assert_locations (basic_block bb)
4004 +{
4005 + block_stmt_iterator si;
4006 + tree last, phi;
4007 + bool need_assert;
4008 + basic_block son;
4009 +
4010 + if (TEST_BIT (blocks_visited, bb->index))
4011 + return false;
4012 +
4013 + SET_BIT (blocks_visited, bb->index);
4014 +
4015 + need_assert = false;
4016 +
4017 + /* Traverse all PHI nodes in BB marking used operands. */
4018 + for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
4019 + {
4020 + use_operand_p arg_p;
4021 + ssa_op_iter i;
4022 +
4023 + FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4024 + {
4025 + tree arg = USE_FROM_PTR (arg_p);
4026 + if (TREE_CODE (arg) == SSA_NAME)
4027 + {
4028 + gcc_assert (is_gimple_reg (PHI_RESULT (phi)));
4029 + SET_BIT (found_in_subgraph, SSA_NAME_VERSION (arg));
4030 + }
4031 + }
4032 + }
4033 +
4034 + /* Traverse all the statements in BB marking used names and looking
4035 + for statements that may infer assertions for their used operands. */
4036 + last = NULL_TREE;
4037 + for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
4038 + {
4039 + tree stmt, op;
4040 + ssa_op_iter i;
4041 +
4042 + stmt = bsi_stmt (si);
4043 +
4044 + /* See if we can derive an assertion for any of STMT's operands. */
4045 + FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4046 + {
4047 + tree value;
4048 + enum tree_code comp_code;
4049 +
4050 + /* Mark OP in bitmap FOUND_IN_SUBGRAPH. If STMT is inside
4051 + the sub-graph of a conditional block, when we return from
4052 + this recursive walk, our parent will use the
4053 + FOUND_IN_SUBGRAPH bitset to determine if one of the
4054 + operands it was looking for was present in the sub-graph. */
4055 + SET_BIT (found_in_subgraph, SSA_NAME_VERSION (op));
4056 +
4057 + /* If OP is used in such a way that we can infer a value
4058 + range for it, and we don't find a previous assertion for
4059 + it, create a new assertion location node for OP. */
4060 + if (infer_value_range (stmt, op, &comp_code, &value))
4061 + {
4062 + /* If we are able to infer a nonzero value range for OP,
4063 + then walk backwards through the use-def chain to see if OP
4064 + was set via a typecast.
4065 +
4066 + If so, then we can also infer a nonzero value range
4067 + for the operand of the NOP_EXPR. */
4068 + if (comp_code == NE_EXPR && integer_zerop (value))
4069 + {
4070 + tree t = op;
4071 + tree def_stmt = SSA_NAME_DEF_STMT (t);
4072 +
4073 + while (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT
4074 + && TREE_CODE
4075 + (GIMPLE_STMT_OPERAND (def_stmt, 1)) == NOP_EXPR
4076 + && TREE_CODE
4077 + (TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1),
4078 + 0)) == SSA_NAME
4079 + && POINTER_TYPE_P
4080 + (TREE_TYPE (TREE_OPERAND
4081 + (GIMPLE_STMT_OPERAND (def_stmt,
4082 + 1), 0))))
4083 + {
4084 + t = TREE_OPERAND (GIMPLE_STMT_OPERAND (def_stmt, 1), 0);
4085 + def_stmt = SSA_NAME_DEF_STMT (t);
4086 +
4087 + /* Note we want to register the assert for the
4088 + operand of the NOP_EXPR after SI, not after the
4089 + conversion. */
4090 + if (! has_single_use (t))
4091 + {
4092 + register_new_assert_for (t, comp_code, value,
4093 + bb, NULL, si);
4094 + need_assert = true;
4095 + }
4096 + }
4097 + }
4098 +
4099 + /* If OP is used only once, namely in this STMT, don't
4100 + bother creating an ASSERT_EXPR for it. Such an
4101 + ASSERT_EXPR would do nothing but increase compile time. */
4102 + if (!has_single_use (op))
4103 + {
4104 + register_new_assert_for (op, comp_code, value, bb, NULL, si);
4105 + need_assert = true;
4106 + }
4107 + }
4108 + }
4109 +
4110 + /* Remember the last statement of the block. */
4111 + last = stmt;
4112 + }
4113 +
4114 + /* If BB's last statement is a conditional expression
4115 + involving integer operands, recurse into each of the sub-graphs
4116 + rooted at BB to determine if we need to add ASSERT_EXPRs. */
4117 + if (last
4118 + && TREE_CODE (last) == COND_EXPR
4119 + && !fp_predicate (COND_EXPR_COND (last))
4120 + && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4121 + need_assert |= find_conditional_asserts (bb, last);
4122 +
4123 + if (last
4124 + && TREE_CODE (last) == SWITCH_EXPR
4125 + && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4126 + need_assert |= find_switch_asserts (bb, last);
4127 +
4128 + /* Recurse into the dominator children of BB. */
4129 + for (son = first_dom_son (CDI_DOMINATORS, bb);
4130 + son;
4131 + son = next_dom_son (CDI_DOMINATORS, son))
4132 + need_assert |= find_assert_locations (son);
4133 +
4134 + return need_assert;
4135 +}
4136 +
4137 +
4138 +/* Create an ASSERT_EXPR for NAME and insert it in the location
4139 + indicated by LOC. Return true if we made any edge insertions. */
4140 +
4141 +static bool
4142 +process_assert_insertions_for (tree name, assert_locus_t loc)
4143 +{
4144 + /* Build the comparison expression NAME_i COMP_CODE VAL. */
4145 + tree stmt, cond, assert_expr;
4146 + edge_iterator ei;
4147 + edge e;
4148 +
4149 + cond = build2 (loc->comp_code, boolean_type_node, name, loc->val);
4150 + assert_expr = build_assert_expr_for (cond, name);
4151 +
4152 + if (loc->e)
4153 + {
4154 + /* We have been asked to insert the assertion on an edge. This
4155 + is used only by COND_EXPR and SWITCH_EXPR assertions. */
4156 +#if defined ENABLE_CHECKING
4157 + gcc_assert (TREE_CODE (bsi_stmt (loc->si)) == COND_EXPR
4158 + || TREE_CODE (bsi_stmt (loc->si)) == SWITCH_EXPR);
4159 +#endif
4160 +
4161 + bsi_insert_on_edge (loc->e, assert_expr);
4162 + return true;
4163 + }
4164 +
4165 + /* Otherwise, we can insert right after LOC->SI iff the
4166 + statement must not be the last statement in the block. */
4167 + stmt = bsi_stmt (loc->si);
4168 + if (!stmt_ends_bb_p (stmt))
4169 + {
4170 + bsi_insert_after (&loc->si, assert_expr, BSI_SAME_STMT);
4171 + return false;
4172 + }
4173 +
4174 + /* If STMT must be the last statement in BB, we can only insert new
4175 + assertions on the non-abnormal edge out of BB. Note that since
4176 + STMT is not control flow, there may only be one non-abnormal edge
4177 + out of BB. */
4178 + FOR_EACH_EDGE (e, ei, loc->bb->succs)
4179 + if (!(e->flags & EDGE_ABNORMAL))
4180 + {
4181 + bsi_insert_on_edge (e, assert_expr);
4182 + return true;
4183 + }
4184 +
4185 + gcc_unreachable ();
4186 +}
4187 +
4188 +
4189 +/* Process all the insertions registered for every name N_i registered
4190 + in NEED_ASSERT_FOR. The list of assertions to be inserted are
4191 + found in ASSERTS_FOR[i]. */
4192 +
4193 +static void
4194 +process_assert_insertions (void)
4195 +{
4196 + unsigned i;
4197 + bitmap_iterator bi;
4198 + bool update_edges_p = false;
4199 + int num_asserts = 0;
4200 +
4201 + if (dump_file && (dump_flags & TDF_DETAILS))
4202 + dump_all_asserts (dump_file);
4203 +
4204 + EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4205 + {
4206 + assert_locus_t loc = asserts_for[i];
4207 + gcc_assert (loc);
4208 +
4209 + while (loc)
4210 + {
4211 + assert_locus_t next = loc->next;
4212 + update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4213 + free (loc);
4214 + loc = next;
4215 + num_asserts++;
4216 + }
4217 + }
4218 +
4219 + if (update_edges_p)
4220 + bsi_commit_edge_inserts ();
4221 +
4222 + if (dump_file && (dump_flags & TDF_STATS))
4223 + fprintf (dump_file, "\nNumber of ASSERT_EXPR expressions inserted: %d\n\n",
4224 + num_asserts);
4225 +}
4226 +
4227 +
4228 +/* Traverse the flowgraph looking for conditional jumps to insert range
4229 + expressions. These range expressions are meant to provide information
4230 + to optimizations that need to reason in terms of value ranges. They
4231 + will not be expanded into RTL. For instance, given:
4232 +
4233 + x = ...
4234 + y = ...
4235 + if (x < y)
4236 + y = x - 2;
4237 + else
4238 + x = y + 3;
4239 +
4240 + this pass will transform the code into:
4241 +
4242 + x = ...
4243 + y = ...
4244 + if (x < y)
4245 + {
4246 + x = ASSERT_EXPR <x, x < y>
4247 + y = x - 2
4248 + }
4249 + else
4250 + {
4251 + y = ASSERT_EXPR <y, x <= y>
4252 + x = y + 3
4253 + }
4254 +
4255 + The idea is that once copy and constant propagation have run, other
4256 + optimizations will be able to determine what ranges of values can 'x'
4257 + take in different paths of the code, simply by checking the reaching
4258 + definition of 'x'. */
4259 +
4260 +static void
4261 +insert_range_assertions (void)
4262 +{
4263 + edge e;
4264 + edge_iterator ei;
4265 + bool update_ssa_p;
4266 +
4267 + found_in_subgraph = sbitmap_alloc (num_ssa_names);
4268 + sbitmap_zero (found_in_subgraph);
4269 +
4270 + blocks_visited = sbitmap_alloc (last_basic_block);
4271 + sbitmap_zero (blocks_visited);
4272 +
4273 + need_assert_for = BITMAP_ALLOC (NULL);
4274 + asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
4275 +
4276 + calculate_dominance_info (CDI_DOMINATORS);
4277 +
4278 + update_ssa_p = false;
4279 + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
4280 + if (find_assert_locations (e->dest))
4281 + update_ssa_p = true;
4282 +
4283 + if (update_ssa_p)
4284 + {
4285 + process_assert_insertions ();
4286 + update_ssa (TODO_update_ssa_no_phi);
4287 + }
4288 +
4289 + if (dump_file && (dump_flags & TDF_DETAILS))
4290 + {
4291 + fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4292 + dump_function_to_file (current_function_decl, dump_file, dump_flags);
4293 + }
4294 +
4295 + sbitmap_free (found_in_subgraph);
4296 + free (asserts_for);
4297 + BITMAP_FREE (need_assert_for);
4298 +}
4299 +
4300 +/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4301 + and "struct" hacks. If VRP can determine that the
4302 + array subscript is a constant, check if it is outside valid
4303 + range. If the array subscript is a RANGE, warn if it is
4304 + non-overlapping with valid range.
4305 + IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4306 +
4307 +static void
4308 +check_array_ref (tree ref, location_t* locus, bool ignore_off_by_one)
4309 +{
4310 + value_range_t* vr = NULL;
4311 + tree low_sub, up_sub;
4312 + tree low_bound, up_bound = array_ref_up_bound (ref);
4313 +
4314 + low_sub = up_sub = TREE_OPERAND (ref, 1);
4315 +
4316 + if (!up_bound || TREE_NO_WARNING (ref)
4317 + || TREE_CODE (up_bound) != INTEGER_CST
4318 + /* Can not check flexible arrays. */
4319 + || (TYPE_SIZE (TREE_TYPE (ref)) == NULL_TREE
4320 + && TYPE_DOMAIN (TREE_TYPE (ref)) != NULL_TREE
4321 + && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (ref))) == NULL_TREE)
4322 + /* Accesses after the end of arrays of size 0 (gcc
4323 + extension) and 1 are likely intentional ("struct
4324 + hack"). */
4325 + || compare_tree_int (up_bound, 1) <= 0)
4326 + return;
4327 +
4328 + low_bound = array_ref_low_bound (ref);
4329 +
4330 + if (TREE_CODE (low_sub) == SSA_NAME)
4331 + {
4332 + vr = get_value_range (low_sub);
4333 + if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4334 + {
4335 + low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4336 + up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4337 + }
4338 + }
4339 +
4340 + if (vr && vr->type == VR_ANTI_RANGE)
4341 + {
4342 + if (TREE_CODE (up_sub) == INTEGER_CST
4343 + && tree_int_cst_lt (up_bound, up_sub)
4344 + && TREE_CODE (low_sub) == INTEGER_CST
4345 + && tree_int_cst_lt (low_sub, low_bound))
4346 + {
4347 + warning (OPT_Warray_bounds,
4348 + "%Harray subscript is outside array bounds", locus);
4349 + TREE_NO_WARNING (ref) = 1;
4350 + }
4351 + }
4352 + else if (TREE_CODE (up_sub) == INTEGER_CST
4353 + && tree_int_cst_lt (up_bound, up_sub)
4354 + && !tree_int_cst_equal (up_bound, up_sub)
4355 + && (!ignore_off_by_one
4356 + || !tree_int_cst_equal (int_const_binop (PLUS_EXPR,
4357 + up_bound,
4358 + integer_one_node,
4359 + 0),
4360 + up_sub)))
4361 + {
4362 + warning (OPT_Warray_bounds, "%Harray subscript is above array bounds",
4363 + locus);
4364 + TREE_NO_WARNING (ref) = 1;
4365 + }
4366 + else if (TREE_CODE (low_sub) == INTEGER_CST
4367 + && tree_int_cst_lt (low_sub, low_bound))
4368 + {
4369 + warning (OPT_Warray_bounds, "%Harray subscript is below array bounds",
4370 + locus);
4371 + TREE_NO_WARNING (ref) = 1;
4372 + }
4373 +}
4374 +
4375 +/* Searches if the expr T, located at LOCATION computes
4376 + address of an ARRAY_REF, and call check_array_ref on it. */
4377 +
4378 +static void
4379 +search_for_addr_array(tree t, location_t* location)
4380 +{
4381 + while (TREE_CODE (t) == SSA_NAME)
4382 + {
4383 + t = SSA_NAME_DEF_STMT (t);
4384 + if (TREE_CODE (t) != GIMPLE_MODIFY_STMT)
4385 + return;
4386 + t = GIMPLE_STMT_OPERAND (t, 1);
4387 + }
4388 +
4389 +
4390 + /* We are only interested in addresses of ARRAY_REF's. */
4391 + if (TREE_CODE (t) != ADDR_EXPR)
4392 + return;
4393 +
4394 + /* Check each ARRAY_REFs in the reference chain. */
4395 + do
4396 + {
4397 + if (TREE_CODE (t) == ARRAY_REF)
4398 + check_array_ref (t, location, true /*ignore_off_by_one*/);
4399 +
4400 + t = TREE_OPERAND(t,0);
4401 + }
4402 + while (handled_component_p (t));
4403 +}
4404 +
4405 +/* walk_tree() callback that checks if *TP is
4406 + an ARRAY_REF inside an ADDR_EXPR (in which an array
4407 + subscript one outside the valid range is allowed). Call
4408 + check_array_ref for each ARRAY_REF found. The location is
4409 + passed in DATA. */
4410 +
4411 +static tree
4412 +check_array_bounds (tree *tp, int *walk_subtree, void *data)
4413 +{
4414 + tree t = *tp;
4415 + tree stmt = (tree)data;
4416 + location_t *location = EXPR_LOCUS (stmt);
4417 +
4418 + if (!EXPR_HAS_LOCATION (stmt))
4419 + {
4420 + *walk_subtree = FALSE;
4421 + return NULL_TREE;
4422 + }
4423 +
4424 + *walk_subtree = TRUE;
4425 +
4426 + if (TREE_CODE (t) == ARRAY_REF)
4427 + check_array_ref (t, location, false /*ignore_off_by_one*/);
4428 +
4429 + if (TREE_CODE (t) == INDIRECT_REF
4430 + || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
4431 + search_for_addr_array (TREE_OPERAND (t, 0), location);
4432 + else if (TREE_CODE (t) == CALL_EXPR)
4433 + {
4434 + tree arg;
4435 + call_expr_arg_iterator iter;
4436 +
4437 + FOR_EACH_CALL_EXPR_ARG (arg, iter, t)
4438 + search_for_addr_array (arg, location);
4439 + }
4440 +
4441 + if (TREE_CODE (t) == ADDR_EXPR)
4442 + *walk_subtree = FALSE;
4443 +
4444 + return NULL_TREE;
4445 +}
4446 +
4447 +/* Walk over all statements of all reachable BBs and call check_array_bounds
4448 + on them. */
4449 +
4450 +static void
4451 +check_all_array_refs (void)
4452 +{
4453 + basic_block bb;
4454 + block_stmt_iterator si;
4455 +
4456 + FOR_EACH_BB (bb)
4457 + {
4458 + /* Skip bb's that are clearly unreachable. */
4459 + if (single_pred_p (bb))
4460 + {
4461 + basic_block pred_bb = EDGE_PRED (bb, 0)->src;
4462 + tree ls = NULL_TREE;
4463 +
4464 + if (!bsi_end_p (bsi_last (pred_bb)))
4465 + ls = bsi_stmt (bsi_last (pred_bb));
4466 +
4467 + if (ls && TREE_CODE (ls) == COND_EXPR
4468 + && ((COND_EXPR_COND (ls) == boolean_false_node
4469 + && (EDGE_PRED (bb, 0)->flags & EDGE_TRUE_VALUE))
4470 + || (COND_EXPR_COND (ls) == boolean_true_node
4471 + && (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE))))
4472 + continue;
4473 + }
4474 + for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
4475 + walk_tree (bsi_stmt_ptr (si), check_array_bounds,
4476 + bsi_stmt (si), NULL);
4477 + }
4478 +}
4479 +
4480 +/* Convert range assertion expressions into the implied copies and
4481 + copy propagate away the copies. Doing the trivial copy propagation
4482 + here avoids the need to run the full copy propagation pass after
4483 + VRP.
4484 +
4485 + FIXME, this will eventually lead to copy propagation removing the
4486 + names that had useful range information attached to them. For
4487 + instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4488 + then N_i will have the range [3, +INF].
4489 +
4490 + However, by converting the assertion into the implied copy
4491 + operation N_i = N_j, we will then copy-propagate N_j into the uses
4492 + of N_i and lose the range information. We may want to hold on to
4493 + ASSERT_EXPRs a little while longer as the ranges could be used in
4494 + things like jump threading.
4495 +
4496 + The problem with keeping ASSERT_EXPRs around is that passes after
4497 + VRP need to handle them appropriately.
4498 +
4499 + Another approach would be to make the range information a first
4500 + class property of the SSA_NAME so that it can be queried from
4501 + any pass. This is made somewhat more complex by the need for
4502 + multiple ranges to be associated with one SSA_NAME. */
4503 +
4504 +static void
4505 +remove_range_assertions (void)
4506 +{
4507 + basic_block bb;
4508 + block_stmt_iterator si;
4509 +
4510 + /* Note that the BSI iterator bump happens at the bottom of the
4511 + loop and no bump is necessary if we're removing the statement
4512 + referenced by the current BSI. */
4513 + FOR_EACH_BB (bb)
4514 + for (si = bsi_start (bb); !bsi_end_p (si);)
4515 + {
4516 + tree stmt = bsi_stmt (si);
4517 + tree use_stmt;
4518 +
4519 + if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT
4520 + && TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == ASSERT_EXPR)
4521 + {
4522 + tree rhs = GIMPLE_STMT_OPERAND (stmt, 1), var;
4523 + tree cond = fold (ASSERT_EXPR_COND (rhs));
4524 + use_operand_p use_p;
4525 + imm_use_iterator iter;
4526 +
4527 + gcc_assert (cond != boolean_false_node);
4528 +
4529 + /* Propagate the RHS into every use of the LHS. */
4530 + var = ASSERT_EXPR_VAR (rhs);
4531 + FOR_EACH_IMM_USE_STMT (use_stmt, iter,
4532 + GIMPLE_STMT_OPERAND (stmt, 0))
4533 + FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4534 + {
4535 + SET_USE (use_p, var);
4536 + gcc_assert (TREE_CODE (var) == SSA_NAME);
4537 + }
4538 +
4539 + /* And finally, remove the copy, it is not needed. */
4540 + bsi_remove (&si, true);
4541 + release_defs (stmt);
4542 + }
4543 + else
4544 + bsi_next (&si);
4545 + }
4546 +
4547 + sbitmap_free (blocks_visited);
4548 +}
4549 +
4550 +
4551 +/* Return true if STMT is interesting for VRP. */
4552 +
4553 +static bool
4554 +stmt_interesting_for_vrp (tree stmt)
4555 +{
4556 + if (TREE_CODE (stmt) == PHI_NODE
4557 + && is_gimple_reg (PHI_RESULT (stmt))
4558 + && (INTEGRAL_TYPE_P (TREE_TYPE (PHI_RESULT (stmt)))
4559 + || POINTER_TYPE_P (TREE_TYPE (PHI_RESULT (stmt)))))
4560 + return true;
4561 + else if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
4562 + {
4563 + tree lhs = GIMPLE_STMT_OPERAND (stmt, 0);
4564 + tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
4565 +
4566 + /* In general, assignments with virtual operands are not useful
4567 + for deriving ranges, with the obvious exception of calls to
4568 + builtin functions. */
4569 + if (TREE_CODE (lhs) == SSA_NAME
4570 + && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4571 + || POINTER_TYPE_P (TREE_TYPE (lhs)))
4572 + && ((TREE_CODE (rhs) == CALL_EXPR
4573 + && TREE_CODE (CALL_EXPR_FN (rhs)) == ADDR_EXPR
4574 + && DECL_P (TREE_OPERAND (CALL_EXPR_FN (rhs), 0))
4575 + && DECL_IS_BUILTIN (TREE_OPERAND (CALL_EXPR_FN (rhs), 0)))
4576 + || ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)))
4577 + return true;
4578 + }
4579 + else if (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR)
4580 + return true;
4581 +
4582 + return false;
4583 +}
4584 +
4585 +
4586 +/* Initialize local data structures for VRP. */
4587 +
4588 +static void
4589 +vrp_initialize (void)
4590 +{
4591 + basic_block bb;
4592 +
4593 + vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
4594 + vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
4595 +
4596 + FOR_EACH_BB (bb)
4597 + {
4598 + block_stmt_iterator si;
4599 + tree phi;
4600 +
4601 + for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
4602 + {
4603 + if (!stmt_interesting_for_vrp (phi))
4604 + {
4605 + tree lhs = PHI_RESULT (phi);
4606 + set_value_range_to_varying (get_value_range (lhs));
4607 + DONT_SIMULATE_AGAIN (phi) = true;
4608 + }
4609 + else
4610 + DONT_SIMULATE_AGAIN (phi) = false;
4611 + }
4612 +
4613 + for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
4614 + {
4615 + tree stmt = bsi_stmt (si);
4616 +
4617 + if (!stmt_interesting_for_vrp (stmt))
4618 + {
4619 + ssa_op_iter i;
4620 + tree def;
4621 + FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
4622 + set_value_range_to_varying (get_value_range (def));
4623 + DONT_SIMULATE_AGAIN (stmt) = true;
4624 + }
4625 + else
4626 + {
4627 + DONT_SIMULATE_AGAIN (stmt) = false;
4628 + }
4629 + }
4630 + }
4631 +}
4632 +
4633 +
4634 +/* Visit assignment STMT. If it produces an interesting range, record
4635 + the SSA name in *OUTPUT_P. */
4636 +
4637 +static enum ssa_prop_result
4638 +vrp_visit_assignment (tree stmt, tree *output_p)
4639 +{
4640 + tree lhs, rhs, def;
4641 + ssa_op_iter iter;
4642 +
4643 + lhs = GIMPLE_STMT_OPERAND (stmt, 0);
4644 + rhs = GIMPLE_STMT_OPERAND (stmt, 1);
4645 +
4646 + /* We only keep track of ranges in integral and pointer types. */
4647 + if (TREE_CODE (lhs) == SSA_NAME
4648 + && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4649 + /* It is valid to have NULL MIN/MAX values on a type. See
4650 + build_range_type. */
4651 + && TYPE_MIN_VALUE (TREE_TYPE (lhs))
4652 + && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
4653 + || POINTER_TYPE_P (TREE_TYPE (lhs))))
4654 + {
4655 + struct loop *l;
4656 + value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
4657 +
4658 + extract_range_from_expr (&new_vr, rhs);
4659 +
4660 + /* If STMT is inside a loop, we may be able to know something
4661 + else about the range of LHS by examining scalar evolution
4662 + information. */
4663 + if (current_loops && (l = loop_containing_stmt (stmt)))
4664 + adjust_range_with_scev (&new_vr, l, stmt, lhs);
4665 +
4666 + if (update_value_range (lhs, &new_vr))
4667 + {
4668 + *output_p = lhs;
4669 +
4670 + if (dump_file && (dump_flags & TDF_DETAILS))
4671 + {
4672 + fprintf (dump_file, "Found new range for ");
4673 + print_generic_expr (dump_file, lhs, 0);
4674 + fprintf (dump_file, ": ");
4675 + dump_value_range (dump_file, &new_vr);
4676 + fprintf (dump_file, "\n\n");
4677 + }
4678 +
4679 + if (new_vr.type == VR_VARYING)
4680 + return SSA_PROP_VARYING;
4681 +
4682 + return SSA_PROP_INTERESTING;
4683 + }
4684 +
4685 + return SSA_PROP_NOT_INTERESTING;
4686 + }
4687 +
4688 + /* Every other statement produces no useful ranges. */
4689 + FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
4690 + set_value_range_to_varying (get_value_range (def));
4691 +
4692 + return SSA_PROP_VARYING;
4693 +}
4694 +
4695 +/* Helper that gets the value range of the SSA_NAME with version I
4696 + or a symbolic range containing the SSA_NAME only if the value range
4697 + is varying or undefined. */
4698 +
4699 +static inline value_range_t
4700 +get_vr_for_comparison (int i)
4701 +{
4702 + value_range_t vr = *(vr_value[i]);
4703 +
4704 + /* If name N_i does not have a valid range, use N_i as its own
4705 + range. This allows us to compare against names that may
4706 + have N_i in their ranges. */
4707 + if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
4708 + {
4709 + vr.type = VR_RANGE;
4710 + vr.min = ssa_name (i);
4711 + vr.max = ssa_name (i);
4712 + }
4713 +
4714 + return vr;
4715 +}
4716 +
4717 +/* Compare all the value ranges for names equivalent to VAR with VAL
4718 + using comparison code COMP. Return the same value returned by
4719 + compare_range_with_value, including the setting of
4720 + *STRICT_OVERFLOW_P. */
4721 +
4722 +static tree
4723 +compare_name_with_value (enum tree_code comp, tree var, tree val,
4724 + bool *strict_overflow_p)
4725 +{
4726 + bitmap_iterator bi;
4727 + unsigned i;
4728 + bitmap e;
4729 + tree retval, t;
4730 + int used_strict_overflow;
4731 + bool sop;
4732 + value_range_t equiv_vr;
4733 +
4734 + /* Get the set of equivalences for VAR. */
4735 + e = get_value_range (var)->equiv;
4736 +
4737 + /* Start at -1. Set it to 0 if we do a comparison without relying
4738 + on overflow, or 1 if all comparisons rely on overflow. */
4739 + used_strict_overflow = -1;
4740 +
4741 + /* Compare vars' value range with val. */
4742 + equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
4743 + sop = false;
4744 + retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
4745 + if (retval)
4746 + used_strict_overflow = sop ? 1 : 0;
4747 +
4748 + /* If the equiv set is empty we have done all work we need to do. */
4749 + if (e == NULL)
4750 + {
4751 + if (retval
4752 + && used_strict_overflow > 0)
4753 + *strict_overflow_p = true;
4754 + return retval;
4755 + }
4756 +
4757 + EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
4758 + {
4759 + equiv_vr = get_vr_for_comparison (i);
4760 + sop = false;
4761 + t = compare_range_with_value (comp, &equiv_vr, val, &sop);
4762 + if (t)
4763 + {
4764 + /* If we get different answers from different members
4765 + of the equivalence set this check must be in a dead
4766 + code region. Folding it to a trap representation
4767 + would be correct here. For now just return don't-know. */
4768 + if (retval != NULL
4769 + && t != retval)
4770 + {
4771 + retval = NULL_TREE;
4772 + break;
4773 + }
4774 + retval = t;
4775 +
4776 + if (!sop)
4777 + used_strict_overflow = 0;
4778 + else if (used_strict_overflow < 0)
4779 + used_strict_overflow = 1;
4780 + }
4781 + }
4782 +
4783 + if (retval
4784 + && used_strict_overflow > 0)
4785 + *strict_overflow_p = true;
4786 +
4787 + return retval;
4788 +}
4789 +
4790 +
4791 +/* Given a comparison code COMP and names N1 and N2, compare all the
4792 + ranges equivalent to N1 against all the ranges equivalent to N2
4793 + to determine the value of N1 COMP N2. Return the same value
4794 + returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
4795 + whether we relied on an overflow infinity in the comparison. */
4796 +
4797 +
4798 +static tree
4799 +compare_names (enum tree_code comp, tree n1, tree n2,
4800 + bool *strict_overflow_p)
4801 +{
4802 + tree t, retval;
4803 + bitmap e1, e2;
4804 + bitmap_iterator bi1, bi2;
4805 + unsigned i1, i2;
4806 + int used_strict_overflow;
4807 + static bitmap_obstack *s_obstack = NULL;
4808 + static bitmap s_e1 = NULL, s_e2 = NULL;
4809 +
4810 + /* Compare the ranges of every name equivalent to N1 against the
4811 + ranges of every name equivalent to N2. */
4812 + e1 = get_value_range (n1)->equiv;
4813 + e2 = get_value_range (n2)->equiv;
4814 +
4815 + /* Use the fake bitmaps if e1 or e2 are not available. */
4816 + if (s_obstack == NULL)
4817 + {
4818 + s_obstack = XNEW (bitmap_obstack);
4819 + bitmap_obstack_initialize (s_obstack);
4820 + s_e1 = BITMAP_ALLOC (s_obstack);
4821 + s_e2 = BITMAP_ALLOC (s_obstack);
4822 + }
4823 + if (e1 == NULL)
4824 + e1 = s_e1;
4825 + if (e2 == NULL)
4826 + e2 = s_e2;
4827 +
4828 + /* Add N1 and N2 to their own set of equivalences to avoid
4829 + duplicating the body of the loop just to check N1 and N2
4830 + ranges. */
4831 + bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
4832 + bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
4833 +
4834 + /* If the equivalence sets have a common intersection, then the two
4835 + names can be compared without checking their ranges. */
4836 + if (bitmap_intersect_p (e1, e2))
4837 + {
4838 + bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
4839 + bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
4840 +
4841 + return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
4842 + ? boolean_true_node
4843 + : boolean_false_node;
4844 + }
4845 +
4846 + /* Start at -1. Set it to 0 if we do a comparison without relying
4847 + on overflow, or 1 if all comparisons rely on overflow. */
4848 + used_strict_overflow = -1;
4849 +
4850 + /* Otherwise, compare all the equivalent ranges. First, add N1 and
4851 + N2 to their own set of equivalences to avoid duplicating the body
4852 + of the loop just to check N1 and N2 ranges. */
4853 + EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
4854 + {
4855 + value_range_t vr1 = get_vr_for_comparison (i1);
4856 +
4857 + t = retval = NULL_TREE;
4858 + EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
4859 + {
4860 + bool sop = false;
4861 +
4862 + value_range_t vr2 = get_vr_for_comparison (i2);
4863 +
4864 + t = compare_ranges (comp, &vr1, &vr2, &sop);
4865 + if (t)
4866 + {
4867 + /* If we get different answers from different members
4868 + of the equivalence set this check must be in a dead
4869 + code region. Folding it to a trap representation
4870 + would be correct here. For now just return don't-know. */
4871 + if (retval != NULL
4872 + && t != retval)
4873 + {
4874 + bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
4875 + bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
4876 + return NULL_TREE;
4877 + }
4878 + retval = t;
4879 +
4880 + if (!sop)
4881 + used_strict_overflow = 0;
4882 + else if (used_strict_overflow < 0)
4883 + used_strict_overflow = 1;
4884 + }
4885 + }
4886 +
4887 + if (retval)
4888 + {
4889 + bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
4890 + bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
4891 + if (used_strict_overflow > 0)
4892 + *strict_overflow_p = true;
4893 + return retval;
4894 + }
4895 + }
4896 +
4897 + /* None of the equivalent ranges are useful in computing this
4898 + comparison. */
4899 + bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
4900 + bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
4901 + return NULL_TREE;
4902 +}
4903 +
4904 +
4905 +/* Given a conditional predicate COND, try to determine if COND yields
4906 + true or false based on the value ranges of its operands. Return
4907 + BOOLEAN_TRUE_NODE if the conditional always evaluates to true,
4908 + BOOLEAN_FALSE_NODE if the conditional always evaluates to false, and,
4909 + NULL if the conditional cannot be evaluated at compile time.
4910 +
4911 + If USE_EQUIV_P is true, the ranges of all the names equivalent with
4912 + the operands in COND are used when trying to compute its value.
4913 + This is only used during final substitution. During propagation,
4914 + we only check the range of each variable and not its equivalents.
4915 +
4916 + Set *STRICT_OVERFLOW_P to indicate whether we relied on an overflow
4917 + infinity to produce the result. */
4918 +
4919 +static tree
4920 +vrp_evaluate_conditional_warnv (tree cond, bool use_equiv_p,
4921 + bool *strict_overflow_p)
4922 +{
4923 + gcc_assert (TREE_CODE (cond) == SSA_NAME
4924 + || TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison);
4925 +
4926 + if (TREE_CODE (cond) == SSA_NAME)
4927 + {
4928 + value_range_t *vr;
4929 + tree retval;
4930 +
4931 + if (use_equiv_p)
4932 + retval = compare_name_with_value (NE_EXPR, cond, boolean_false_node,
4933 + strict_overflow_p);
4934 + else
4935 + {
4936 + value_range_t *vr = get_value_range (cond);
4937 + retval = compare_range_with_value (NE_EXPR, vr, boolean_false_node,
4938 + strict_overflow_p);
4939 + }
4940 +
4941 + /* If COND has a known boolean range, return it. */
4942 + if (retval)
4943 + return retval;
4944 +
4945 + /* Otherwise, if COND has a symbolic range of exactly one value,
4946 + return it. */
4947 + vr = get_value_range (cond);
4948 + if (vr->type == VR_RANGE && vr->min == vr->max)
4949 + return vr->min;
4950 + }
4951 + else
4952 + {
4953 + tree op0 = TREE_OPERAND (cond, 0);
4954 + tree op1 = TREE_OPERAND (cond, 1);
4955 +
4956 + /* We only deal with integral and pointer types. */
4957 + if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
4958 + && !POINTER_TYPE_P (TREE_TYPE (op0)))
4959 + return NULL_TREE;
4960 +
4961 + if (use_equiv_p)
4962 + {
4963 + if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
4964 + return compare_names (TREE_CODE (cond), op0, op1,
4965 + strict_overflow_p);
4966 + else if (TREE_CODE (op0) == SSA_NAME)
4967 + return compare_name_with_value (TREE_CODE (cond), op0, op1,
4968 + strict_overflow_p);
4969 + else if (TREE_CODE (op1) == SSA_NAME)
4970 + return (compare_name_with_value
4971 + (swap_tree_comparison (TREE_CODE (cond)), op1, op0,
4972 + strict_overflow_p));
4973 + }
4974 + else
4975 + {
4976 + value_range_t *vr0, *vr1;
4977 +
4978 + vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
4979 + vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
4980 +
4981 + if (vr0 && vr1)
4982 + return compare_ranges (TREE_CODE (cond), vr0, vr1,
4983 + strict_overflow_p);
4984 + else if (vr0 && vr1 == NULL)
4985 + return compare_range_with_value (TREE_CODE (cond), vr0, op1,
4986 + strict_overflow_p);
4987 + else if (vr0 == NULL && vr1)
4988 + return (compare_range_with_value
4989 + (swap_tree_comparison (TREE_CODE (cond)), vr1, op0,
4990 + strict_overflow_p));
4991 + }
4992 + }
4993 +
4994 + /* Anything else cannot be computed statically. */
4995 + return NULL_TREE;
4996 +}
4997 +
4998 +
4999 +
5000 +/* Visit conditional statement STMT. If we can determine which edge
5001 + will be taken out of STMT's basic block, record it in
5002 + *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5003 + SSA_PROP_VARYING. */
5004 +
5005 +static enum ssa_prop_result
5006 +vrp_visit_cond_stmt (tree stmt, edge *taken_edge_p)
5007 +{
5008 + tree cond, val;
5009 + bool sop;
5010 +
5011 + *taken_edge_p = NULL;
5012 +
5013 + /* FIXME. Handle SWITCH_EXPRs. */
5014 + if (TREE_CODE (stmt) == SWITCH_EXPR)
5015 + return SSA_PROP_VARYING;
5016 +
5017 + cond = COND_EXPR_COND (stmt);
5018 +
5019 + if (dump_file && (dump_flags & TDF_DETAILS))
5020 + {
5021 + tree use;
5022 + ssa_op_iter i;
5023 +
5024 + fprintf (dump_file, "\nVisiting conditional with predicate: ");
5025 + print_generic_expr (dump_file, cond, 0);
5026 + fprintf (dump_file, "\nWith known ranges\n");
5027 +
5028 + FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
5029 + {
5030 + fprintf (dump_file, "\t");
5031 + print_generic_expr (dump_file, use, 0);
5032 + fprintf (dump_file, ": ");
5033 + dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
5034 + }
5035 +
5036 + fprintf (dump_file, "\n");
5037 + }
5038 +
5039 + /* Compute the value of the predicate COND by checking the known
5040 + ranges of each of its operands.
5041 +
5042 + Note that we cannot evaluate all the equivalent ranges here
5043 + because those ranges may not yet be final and with the current
5044 + propagation strategy, we cannot determine when the value ranges
5045 + of the names in the equivalence set have changed.
5046 +
5047 + For instance, given the following code fragment
5048 +
5049 + i_5 = PHI <8, i_13>
5050 + ...
5051 + i_14 = ASSERT_EXPR <i_5, i_5 != 0>
5052 + if (i_14 == 1)
5053 + ...
5054 +
5055 + Assume that on the first visit to i_14, i_5 has the temporary
5056 + range [8, 8] because the second argument to the PHI function is
5057 + not yet executable. We derive the range ~[0, 0] for i_14 and the
5058 + equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
5059 + the first time, since i_14 is equivalent to the range [8, 8], we
5060 + determine that the predicate is always false.
5061 +
5062 + On the next round of propagation, i_13 is determined to be
5063 + VARYING, which causes i_5 to drop down to VARYING. So, another
5064 + visit to i_14 is scheduled. In this second visit, we compute the
5065 + exact same range and equivalence set for i_14, namely ~[0, 0] and
5066 + { i_5 }. But we did not have the previous range for i_5
5067 + registered, so vrp_visit_assignment thinks that the range for
5068 + i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
5069 + is not visited again, which stops propagation from visiting
5070 + statements in the THEN clause of that if().
5071 +
5072 + To properly fix this we would need to keep the previous range
5073 + value for the names in the equivalence set. This way we would've
5074 + discovered that from one visit to the other i_5 changed from
5075 + range [8, 8] to VR_VARYING.
5076 +
5077 + However, fixing this apparent limitation may not be worth the
5078 + additional checking. Testing on several code bases (GCC, DLV,
5079 + MICO, TRAMP3D and SPEC2000) showed that doing this results in
5080 + 4 more predicates folded in SPEC. */
5081 + sop = false;
5082 + val = vrp_evaluate_conditional_warnv (cond, false, &sop);
5083 + if (val)
5084 + {
5085 + if (!sop)
5086 + *taken_edge_p = find_taken_edge (bb_for_stmt (stmt), val);
5087 + else
5088 + {
5089 + if (dump_file && (dump_flags & TDF_DETAILS))
5090 + fprintf (dump_file,
5091 + "\nIgnoring predicate evaluation because "
5092 + "it assumes that signed overflow is undefined");
5093 + val = NULL_TREE;
5094 + }
5095 + }
5096 +
5097 + if (dump_file && (dump_flags & TDF_DETAILS))
5098 + {
5099 + fprintf (dump_file, "\nPredicate evaluates to: ");
5100 + if (val == NULL_TREE)
5101 + fprintf (dump_file, "DON'T KNOW\n");
5102 + else
5103 + print_generic_stmt (dump_file, val, 0);
5104 + }
5105 +
5106 + return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5107 +}
5108 +
5109 +
5110 +/* Evaluate statement STMT. If the statement produces a useful range,
5111 + return SSA_PROP_INTERESTING and record the SSA name with the
5112 + interesting range into *OUTPUT_P.
5113 +
5114 + If STMT is a conditional branch and we can determine its truth
5115 + value, the taken edge is recorded in *TAKEN_EDGE_P.
5116 +
5117 + If STMT produces a varying value, return SSA_PROP_VARYING. */
5118 +
5119 +static enum ssa_prop_result
5120 +vrp_visit_stmt (tree stmt, edge *taken_edge_p, tree *output_p)
5121 +{
5122 + tree def;
5123 + ssa_op_iter iter;
5124 + stmt_ann_t ann;
5125 +
5126 + if (dump_file && (dump_flags & TDF_DETAILS))
5127 + {
5128 + fprintf (dump_file, "\nVisiting statement:\n");
5129 + print_generic_stmt (dump_file, stmt, dump_flags);
5130 + fprintf (dump_file, "\n");
5131 + }
5132 +
5133 + ann = stmt_ann (stmt);
5134 + if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
5135 + {
5136 + tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5137 +
5138 + /* In general, assignments with virtual operands are not useful
5139 + for deriving ranges, with the obvious exception of calls to
5140 + builtin functions. */
5141 + if ((TREE_CODE (rhs) == CALL_EXPR
5142 + && TREE_CODE (CALL_EXPR_FN (rhs)) == ADDR_EXPR
5143 + && DECL_P (TREE_OPERAND (CALL_EXPR_FN (rhs), 0))
5144 + && DECL_IS_BUILTIN (TREE_OPERAND (CALL_EXPR_FN (rhs), 0)))
5145 + || ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
5146 + return vrp_visit_assignment (stmt, output_p);
5147 + }
5148 + else if (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR)
5149 + return vrp_visit_cond_stmt (stmt, taken_edge_p);
5150 +
5151 + /* All other statements produce nothing of interest for VRP, so mark
5152 + their outputs varying and prevent further simulation. */
5153 + FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5154 + set_value_range_to_varying (get_value_range (def));
5155 +
5156 + return SSA_PROP_VARYING;
5157 +}
5158 +
5159 +
5160 +/* Meet operation for value ranges. Given two value ranges VR0 and
5161 + VR1, store in VR0 a range that contains both VR0 and VR1. This
5162 + may not be the smallest possible such range. */
5163 +
5164 +static void
5165 +vrp_meet (value_range_t *vr0, value_range_t *vr1)
5166 +{
5167 + if (vr0->type == VR_UNDEFINED)
5168 + {
5169 + copy_value_range (vr0, vr1);
5170 + return;
5171 + }
5172 +
5173 + if (vr1->type == VR_UNDEFINED)
5174 + {
5175 + /* Nothing to do. VR0 already has the resulting range. */
5176 + return;
5177 + }
5178 +
5179 + if (vr0->type == VR_VARYING)
5180 + {
5181 + /* Nothing to do. VR0 already has the resulting range. */
5182 + return;
5183 + }
5184 +
5185 + if (vr1->type == VR_VARYING)
5186 + {
5187 + set_value_range_to_varying (vr0);
5188 + return;
5189 + }
5190 +
5191 + if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
5192 + {
5193 + int cmp;
5194 + tree min, max;
5195 +
5196 + /* Compute the convex hull of the ranges. The lower limit of
5197 + the new range is the minimum of the two ranges. If they
5198 + cannot be compared, then give up. */
5199 + cmp = compare_values (vr0->min, vr1->min);
5200 + if (cmp == 0 || cmp == 1)
5201 + min = vr1->min;
5202 + else if (cmp == -1)
5203 + min = vr0->min;
5204 + else
5205 + goto give_up;
5206 +
5207 + /* Similarly, the upper limit of the new range is the maximum
5208 + of the two ranges. If they cannot be compared, then
5209 + give up. */
5210 + cmp = compare_values (vr0->max, vr1->max);
5211 + if (cmp == 0 || cmp == -1)
5212 + max = vr1->max;
5213 + else if (cmp == 1)
5214 + max = vr0->max;
5215 + else
5216 + goto give_up;
5217 +
5218 + /* Check for useless ranges. */
5219 + if (INTEGRAL_TYPE_P (TREE_TYPE (min))
5220 + && ((vrp_val_is_min (min) || is_overflow_infinity (min))
5221 + && (vrp_val_is_max (max) || is_overflow_infinity (max))))
5222 + goto give_up;
5223 +
5224 + /* The resulting set of equivalences is the intersection of
5225 + the two sets. */
5226 + if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5227 + bitmap_and_into (vr0->equiv, vr1->equiv);
5228 + else if (vr0->equiv && !vr1->equiv)
5229 + bitmap_clear (vr0->equiv);
5230 +
5231 + set_value_range (vr0, vr0->type, min, max, vr0->equiv);
5232 + }
5233 + else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
5234 + {
5235 + /* Two anti-ranges meet only if their complements intersect.
5236 + Only handle the case of identical ranges. */
5237 + if (compare_values (vr0->min, vr1->min) == 0
5238 + && compare_values (vr0->max, vr1->max) == 0
5239 + && compare_values (vr0->min, vr0->max) == 0)
5240 + {
5241 + /* The resulting set of equivalences is the intersection of
5242 + the two sets. */
5243 + if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5244 + bitmap_and_into (vr0->equiv, vr1->equiv);
5245 + else if (vr0->equiv && !vr1->equiv)
5246 + bitmap_clear (vr0->equiv);
5247 + }
5248 + else
5249 + goto give_up;
5250 + }
5251 + else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
5252 + {
5253 + /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
5254 + only handle the case where the ranges have an empty intersection.
5255 + The result of the meet operation is the anti-range. */
5256 + if (!symbolic_range_p (vr0)
5257 + && !symbolic_range_p (vr1)
5258 + && !value_ranges_intersect_p (vr0, vr1))
5259 + {
5260 + /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
5261 + set. We need to compute the intersection of the two
5262 + equivalence sets. */
5263 + if (vr1->type == VR_ANTI_RANGE)
5264 + set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
5265 +
5266 + /* The resulting set of equivalences is the intersection of
5267 + the two sets. */
5268 + if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5269 + bitmap_and_into (vr0->equiv, vr1->equiv);
5270 + else if (vr0->equiv && !vr1->equiv)
5271 + bitmap_clear (vr0->equiv);
5272 + }
5273 + else
5274 + goto give_up;
5275 + }
5276 + else
5277 + gcc_unreachable ();
5278 +
5279 + return;
5280 +
5281 +give_up:
5282 + /* Failed to find an efficient meet. Before giving up and setting
5283 + the result to VARYING, see if we can at least derive a useful
5284 + anti-range. FIXME, all this nonsense about distinguishing
5285 + anti-ranges from ranges is necessary because of the odd
5286 + semantics of range_includes_zero_p and friends. */
5287 + if (!symbolic_range_p (vr0)
5288 + && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
5289 + || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
5290 + && !symbolic_range_p (vr1)
5291 + && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
5292 + || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
5293 + {
5294 + set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
5295 +
5296 + /* Since this meet operation did not result from the meeting of
5297 + two equivalent names, VR0 cannot have any equivalences. */
5298 + if (vr0->equiv)
5299 + bitmap_clear (vr0->equiv);
5300 + }
5301 + else
5302 + set_value_range_to_varying (vr0);
5303 +}
5304 +
5305 +
5306 +/* Visit all arguments for PHI node PHI that flow through executable
5307 + edges. If a valid value range can be derived from all the incoming
5308 + value ranges, set a new range for the LHS of PHI. */
5309 +
5310 +static enum ssa_prop_result
5311 +vrp_visit_phi_node (tree phi)
5312 +{
5313 + int i;
5314 + tree lhs = PHI_RESULT (phi);
5315 + value_range_t *lhs_vr = get_value_range (lhs);
5316 + value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5317 + int edges, old_edges;
5318 +
5319 + copy_value_range (&vr_result, lhs_vr);
5320 +
5321 + if (dump_file && (dump_flags & TDF_DETAILS))
5322 + {
5323 + fprintf (dump_file, "\nVisiting PHI node: ");
5324 + print_generic_expr (dump_file, phi, dump_flags);
5325 + }
5326 +
5327 + edges = 0;
5328 + for (i = 0; i < PHI_NUM_ARGS (phi); i++)
5329 + {
5330 + edge e = PHI_ARG_EDGE (phi, i);
5331 +
5332 + if (dump_file && (dump_flags & TDF_DETAILS))
5333 + {
5334 + fprintf (dump_file,
5335 + "\n Argument #%d (%d -> %d %sexecutable)\n",
5336 + i, e->src->index, e->dest->index,
5337 + (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
5338 + }
5339 +
5340 + if (e->flags & EDGE_EXECUTABLE)
5341 + {
5342 + tree arg = PHI_ARG_DEF (phi, i);
5343 + value_range_t vr_arg;
5344 +
5345 + ++edges;
5346 +
5347 + if (TREE_CODE (arg) == SSA_NAME)
5348 + {
5349 + vr_arg = *(get_value_range (arg));
5350 + }
5351 + else
5352 + {
5353 + if (is_overflow_infinity (arg))
5354 + {
5355 + arg = copy_node (arg);
5356 + TREE_OVERFLOW (arg) = 0;
5357 + }
5358 +
5359 + vr_arg.type = VR_RANGE;
5360 + vr_arg.min = arg;
5361 + vr_arg.max = arg;
5362 + vr_arg.equiv = NULL;
5363 + }
5364 +
5365 + if (dump_file && (dump_flags & TDF_DETAILS))
5366 + {
5367 + fprintf (dump_file, "\t");
5368 + print_generic_expr (dump_file, arg, dump_flags);
5369 + fprintf (dump_file, "\n\tValue: ");
5370 + dump_value_range (dump_file, &vr_arg);
5371 + fprintf (dump_file, "\n");
5372 + }
5373 +
5374 + vrp_meet (&vr_result, &vr_arg);
5375 +
5376 + if (vr_result.type == VR_VARYING)
5377 + break;
5378 + }
5379 + }
5380 +
5381 + if (vr_result.type == VR_VARYING)
5382 + goto varying;
5383 +
5384 + old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
5385 + vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
5386 +
5387 + /* To prevent infinite iterations in the algorithm, derive ranges
5388 + when the new value is slightly bigger or smaller than the
5389 + previous one. We don't do this if we have seen a new executable
5390 + edge; this helps us avoid an overflow infinity for conditionals
5391 + which are not in a loop. */
5392 + if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
5393 + && edges <= old_edges)
5394 + {
5395 + if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
5396 + {
5397 + int cmp_min = compare_values (lhs_vr->min, vr_result.min);
5398 + int cmp_max = compare_values (lhs_vr->max, vr_result.max);
5399 +
5400 + /* If the new minimum is smaller or larger than the previous
5401 + one, go all the way to -INF. In the first case, to avoid
5402 + iterating millions of times to reach -INF, and in the
5403 + other case to avoid infinite bouncing between different
5404 + minimums. */
5405 + if (cmp_min > 0 || cmp_min < 0)
5406 + {
5407 + /* If we will end up with a (-INF, +INF) range, set it
5408 + to VARYING. */
5409 + if (vrp_val_is_max (vr_result.max))
5410 + goto varying;
5411 +
5412 + if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
5413 + || !vrp_var_may_overflow (lhs, phi))
5414 + vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
5415 + else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
5416 + vr_result.min =
5417 + negative_overflow_infinity (TREE_TYPE (vr_result.min));
5418 + else
5419 + goto varying;
5420 + }
5421 +
5422 + /* Similarly, if the new maximum is smaller or larger than
5423 + the previous one, go all the way to +INF. */
5424 + if (cmp_max < 0 || cmp_max > 0)
5425 + {
5426 + /* If we will end up with a (-INF, +INF) range, set it
5427 + to VARYING. */
5428 + if (vrp_val_is_min (vr_result.min))
5429 + goto varying;
5430 +
5431 + if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
5432 + || !vrp_var_may_overflow (lhs, phi))
5433 + vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
5434 + else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
5435 + vr_result.max =
5436 + positive_overflow_infinity (TREE_TYPE (vr_result.max));
5437 + else
5438 + goto varying;
5439 + }
5440 + }
5441 + }
5442 +
5443 + /* If the new range is different than the previous value, keep
5444 + iterating. */
5445 + if (update_value_range (lhs, &vr_result))
5446 + return SSA_PROP_INTERESTING;
5447 +
5448 + /* Nothing changed, don't add outgoing edges. */
5449 + return SSA_PROP_NOT_INTERESTING;
5450 +
5451 + /* No match found. Set the LHS to VARYING. */
5452 +varying:
5453 + set_value_range_to_varying (lhs_vr);
5454 + return SSA_PROP_VARYING;
5455 +}
5456 +
5457 +
5458 +/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
5459 + a known value range VR.
5460 +
5461 + If there is one and only one value which will satisfy the
5462 + conditional, then return that value. Else return NULL. */
5463 +
5464 +
5465 +/* Simplify STMT using ranges if possible. */
5466 +
5467 +
5468 +/* Stack of dest,src equivalency pairs that need to be restored after
5469 + each attempt to thread a block's incoming edge to an outgoing edge.
5470 +
5471 + A NULL entry is used to mark the end of pairs which need to be
5472 + restored. */
5473 +static VEC(tree,heap) *stack;
5474 +
5475 +/* A trivial wrapper so that we can present the generic jump threading
5476 + code with a simple API for simplifying statements. STMT is the
5477 + statement we want to simplify, WITHIN_STMT provides the location
5478 + for any overflow warnings. */
5479 +
5480 +static tree
5481 +simplify_stmt_for_jump_threading (tree stmt, tree within_stmt)
5482 +{
5483 + /* We only use VRP information to simplify conditionals. This is
5484 + overly conservative, but it's unclear if doing more would be
5485 + worth the compile time cost. */
5486 + if (TREE_CODE (stmt) != COND_EXPR)
5487 + return NULL;
5488 +
5489 + return vrp_evaluate_conditional (COND_EXPR_COND (stmt), within_stmt);
5490 +}
5491 +
5492 +/* Blocks which have more than one predecessor and more than
5493 + one successor present jump threading opportunities. ie,
5494 + when the block is reached from a specific predecessor, we
5495 + may be able to determine which of the outgoing edges will
5496 + be traversed. When this optimization applies, we are able
5497 + to avoid conditionals at runtime and we may expose secondary
5498 + optimization opportunities.
5499 +
5500 + This routine is effectively a driver for the generic jump
5501 + threading code. It basically just presents the generic code
5502 + with edges that may be suitable for jump threading.
5503 +
5504 + Unlike DOM, we do not iterate VRP if jump threading was successful.
5505 + While iterating may expose new opportunities for VRP, it is expected
5506 + those opportunities would be very limited and the compile time cost
5507 + to expose those opportunities would be significant.
5508 +
5509 + As jump threading opportunities are discovered, they are registered
5510 + for later realization. */
5511 +
5512 +static void
5513 +identify_jump_threads (void)
5514 +{
5515 + basic_block bb;
5516 + tree dummy;
5517 +
5518 + /* Ugh. When substituting values earlier in this pass we can
5519 + wipe the dominance information. So rebuild the dominator
5520 + information as we need it within the jump threading code. */
5521 + calculate_dominance_info (CDI_DOMINATORS);
5522 +
5523 + /* We do not allow VRP information to be used for jump threading
5524 + across a back edge in the CFG. Otherwise it becomes too
5525 + difficult to avoid eliminating loop exit tests. Of course
5526 + EDGE_DFS_BACK is not accurate at this time so we have to
5527 + recompute it. */
5528 + mark_dfs_back_edges ();
5529 +
5530 + /* Allocate our unwinder stack to unwind any temporary equivalences
5531 + that might be recorded. */
5532 + stack = VEC_alloc (tree, heap, 20);
5533 +
5534 + /* To avoid lots of silly node creation, we create a single
5535 + conditional and just modify it in-place when attempting to
5536 + thread jumps. */
5537 + dummy = build2 (EQ_EXPR, boolean_type_node, NULL, NULL);
5538 + dummy = build3 (COND_EXPR, void_type_node, dummy, NULL, NULL);
5539 +
5540 + /* Walk through all the blocks finding those which present a
5541 + potential jump threading opportunity. We could set this up
5542 + as a dominator walker and record data during the walk, but
5543 + I doubt it's worth the effort for the classes of jump
5544 + threading opportunities we are trying to identify at this
5545 + point in compilation. */
5546 + FOR_EACH_BB (bb)
5547 + {
5548 + tree last, cond, last1, cond1;
5549 +
5550 + basic_block dombb, dombbsave;
5551 +
5552 + if (single_succ_p(bb))
5553 + continue;
5554 + /* We only care about blocks ending in a COND_EXPR. While there
5555 + may be some value in handling SWITCH_EXPR here, I doubt it's
5556 + terribly important. */
5557 + last = bsi_stmt (bsi_last (bb));
5558 + if (TREE_CODE (last) != COND_EXPR)
5559 + continue;
5560 +
5561 + cond = COND_EXPR_COND (last);
5562 + if ((TREE_CODE (cond) == SSA_NAME
5563 + && INTEGRAL_TYPE_P (TREE_TYPE (cond)))
5564 + || (COMPARISON_CLASS_P (cond)
5565 + && TREE_CODE (TREE_OPERAND (cond, 0)) == SSA_NAME
5566 + && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (cond, 0)))
5567 + && (TREE_CODE (TREE_OPERAND (cond, 1)) == SSA_NAME
5568 + || is_gimple_min_invariant (TREE_OPERAND (cond, 1)))
5569 + && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (cond, 1)))))
5570 + ;
5571 + else continue;
5572 +
5573 + if (get_immediate_dominator(1, bb)->index == 0)
5574 + continue;
5575 +
5576 + dombbsave = bb;
5577 + dombb = get_immediate_dominator(1, bb);
5578 +
5579 + while (dombb->index != 0)
5580 + {
5581 + if (bsi_end_p(bsi_last(dombb))) {
5582 + dombbsave = dombb;
5583 + dombb = get_immediate_dominator(1, dombbsave);
5584 + continue;
5585 + }
5586 + last1 = bsi_stmt(bsi_last(dombb));
5587 + if (TREE_CODE(last1) != COND_EXPR){
5588 + dombbsave = dombb;
5589 + dombb = get_immediate_dominator(1, dombbsave);
5590 + continue;
5591 + }
5592 + cond1 = COND_EXPR_COND (last1);
5593 + if (TREE_CODE (cond1) == SSA_NAME && cond1 == cond)
5594 + break;
5595 + if (COMPARISON_CLASS_P (cond1) && COMPARISON_CLASS_P(cond)
5596 + && TREE_CODE (TREE_OPERAND (cond1, 0)) == SSA_NAME
5597 + && TREE_OPERAND(cond, 0) == TREE_OPERAND(cond1, 0))
5598 + break;
5599 + if (COMPARISON_CLASS_P (cond1) && COMPARISON_CLASS_P(cond)
5600 + && TREE_CODE (TREE_OPERAND (cond1, 1)) == SSA_NAME
5601 + && TREE_OPERAND(cond, 1) == TREE_OPERAND(cond1, 1))
5602 + break;
5603 + dombbsave = dombb;
5604 + dombb = get_immediate_dominator(1, dombbsave);
5605 + continue;
5606 + }
5607 +
5608 + if (dombb->index == 0)
5609 + continue;
5610 +
5611 + /* We're basically looking for any kind of conditional with
5612 + integral type arguments. */
5613 +
5614 + {
5615 + edge_iterator ei;
5616 + edge e;
5617 +
5618 + /* We've got a block with multiple predecessors and multiple
5619 + successors which also ends in a suitable conditional. For
5620 + each predecessor, see if we can thread it to a specific
5621 + successor. */
5622 + FOR_EACH_EDGE (e, ei, bb->preds)
5623 + {
5624 + /* Do not thread across back edges or abnormal edges
5625 + in the CFG. */
5626 + if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
5627 + continue;
5628 +
5629 + thread_across_block (dummy, e, true,
5630 + &stack,
5631 + simplify_stmt_for_jump_threading, dombbsave);
5632 + }
5633 + }
5634 + }
5635 +
5636 + /* We do not actually update the CFG or SSA graphs at this point as
5637 + ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
5638 + handle ASSERT_EXPRs gracefully. */
5639 +}
5640 +
5641 +/* We identified all the jump threading opportunities earlier, but could
5642 + not transform the CFG at that time. This routine transforms the
5643 + CFG and arranges for the dominator tree to be rebuilt if necessary.
5644 +
5645 + Note the SSA graph update will occur during the normal TODO
5646 + processing by the pass manager. */
5647 +static void
5648 +finalize_jump_threads (void)
5649 +{
5650 + bool cfg_altered = false;
5651 + cfg_altered = thread_through_all_blocks_1 (false);
5652 + if (cfg_altered) {
5653 + free_dominance_info(CDI_DOMINATORS);
5654 + }
5655 + if (profile_status == PROFILE_GUESSED)
5656 + profile_status = PROFILE_ABSENT;
5657 + VEC_free (tree, heap, stack);
5658 +}
5659 +
5660 +
5661 +/* Traverse all the blocks folding conditionals with known ranges. */
5662 +
5663 +static void
5664 +vrp_finalize (void)
5665 +{
5666 + size_t i;
5667 + prop_value_t *single_val_range;
5668 + bool do_value_subst_p;
5669 +
5670 + if (dump_file)
5671 + {
5672 + fprintf (dump_file, "\nValue ranges after VRP:\n\n");
5673 + dump_all_value_ranges (dump_file);
5674 + fprintf (dump_file, "\n");
5675 + }
5676 +
5677 + /* We may have ended with ranges that have exactly one value. Those
5678 + values can be substituted as any other copy/const propagated
5679 + value using substitute_and_fold. */
5680 + single_val_range = XCNEWVEC (prop_value_t, num_ssa_names);
5681 +
5682 + do_value_subst_p = false;
5683 + for (i = 0; i < num_ssa_names; i++)
5684 + if (vr_value[i]
5685 + && vr_value[i]->type == VR_RANGE
5686 + && vr_value[i]->min == vr_value[i]->max)
5687 + {
5688 + single_val_range[i].value = vr_value[i]->min;
5689 + do_value_subst_p = true;
5690 + }
5691 +
5692 + if (!do_value_subst_p)
5693 + {
5694 + /* We found no single-valued ranges, don't waste time trying to
5695 + do single value substitution in substitute_and_fold. */
5696 + free (single_val_range);
5697 + single_val_range = NULL;
5698 + }
5699 +
5700 + substitute_and_fold (single_val_range, true);
5701 +
5702 + if (warn_array_bounds)
5703 + check_all_array_refs ();
5704 +
5705 + /* We must identify jump threading opportunities before we release
5706 + the datastructures built by VRP. */
5707 + identify_jump_threads ();
5708 +
5709 + /* Free allocated memory. */
5710 + for (i = 0; i < num_ssa_names; i++)
5711 + if (vr_value[i])
5712 + {
5713 + BITMAP_FREE (vr_value[i]->equiv);
5714 + free (vr_value[i]);
5715 + }
5716 +
5717 + free (single_val_range);
5718 + free (vr_value);
5719 + free (vr_phi_edge_counts);
5720 +
5721 + /* So that we can distinguish between VRP data being available
5722 + and not available. */
5723 + vr_value = NULL;
5724 + vr_phi_edge_counts = NULL;
5725 +}
5726 +
5727 +/* Calculates number of iterations for all loops, to ensure that they are
5728 + cached. */
5729 +
5730 +static void
5731 +record_numbers_of_iterations (void)
5732 +{
5733 + loop_iterator li;
5734 + struct loop *loop;
5735 +
5736 + FOR_EACH_LOOP (li, loop, 0)
5737 + {
5738 + number_of_latch_executions (loop);
5739 + }
5740 +}
5741 +
5742 +/* Main entry point to VRP (Value Range Propagation). This pass is
5743 + loosely based on J. R. C. Patterson, ``Accurate Static Branch
5744 + Prediction by Value Range Propagation,'' in SIGPLAN Conference on
5745 + Programming Language Design and Implementation, pp. 67-78, 1995.
5746 + Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
5747 +
5748 + This is essentially an SSA-CCP pass modified to deal with ranges
5749 + instead of constants.
5750 +
5751 + While propagating ranges, we may find that two or more SSA name
5752 + have equivalent, though distinct ranges. For instance,
5753 +
5754 + 1 x_9 = p_3->a;
5755 + 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
5756 + 3 if (p_4 == q_2)
5757 + 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
5758 + 5 endif
5759 + 6 if (q_2)
5760 +
5761 + In the code above, pointer p_5 has range [q_2, q_2], but from the
5762 + code we can also determine that p_5 cannot be NULL and, if q_2 had
5763 + a non-varying range, p_5's range should also be compatible with it.
5764 +
5765 + These equivalences are created by two expressions: ASSERT_EXPR and
5766 + copy operations. Since p_5 is an assertion on p_4, and p_4 was the
5767 + result of another assertion, then we can use the fact that p_5 and
5768 + p_4 are equivalent when evaluating p_5's range.
5769 +
5770 + Together with value ranges, we also propagate these equivalences
5771 + between names so that we can take advantage of information from
5772 + multiple ranges when doing final replacement. Note that this
5773 + equivalency relation is transitive but not symmetric.
5774 +
5775 + In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
5776 + cannot assert that q_2 is equivalent to p_5 because q_2 may be used
5777 + in contexts where that assertion does not hold (e.g., in line 6).
5778 +
5779 + TODO, the main difference between this pass and Patterson's is that
5780 + we do not propagate edge probabilities. We only compute whether
5781 + edges can be taken or not. That is, instead of having a spectrum
5782 + of jump probabilities between 0 and 1, we only deal with 0, 1 and
5783 + DON'T KNOW. In the future, it may be worthwhile to propagate
5784 + probabilities to aid branch prediction. */
5785 +
5786 +static unsigned int
5787 +execute_avrp (void)
5788 +{
5789 + loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
5790 + rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
5791 + scev_initialize ();
5792 +
5793 + insert_range_assertions ();
5794 +
5795 + /* Compute the # of iterations for each loop before we start the VRP
5796 + analysis. The value ranges determined by VRP are used in expression
5797 + simplification, that is also used by the # of iterations analysis.
5798 + However, in the middle of the VRP analysis, the value ranges do not take
5799 + all the possible paths in CFG into account, so they do not have to be
5800 + correct, and the # of iterations analysis can obtain wrong results.
5801 + This is a problem, since the results of the # of iterations analysis
5802 + are cached, so these mistakes would not be corrected when the value
5803 + ranges are corrected. */
5804 + record_numbers_of_iterations ();
5805 +
5806 + vrp_initialize ();
5807 + ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
5808 + vrp_finalize ();
5809 +
5810 + /* ASSERT_EXPRs must be removed before finalizing jump threads
5811 + as finalizing jump threads calls the CFG cleanup code which
5812 + does not properly handle ASSERT_EXPRs. */
5813 + remove_range_assertions ();
5814 +
5815 + /* If we exposed any new variables, go ahead and put them into
5816 + SSA form now, before we handle jump threading. This simplifies
5817 + interactions between rewriting of _DECL nodes into SSA form
5818 + and rewriting SSA_NAME nodes into SSA form after block
5819 + duplication and CFG manipulation. */
5820 + update_ssa (TODO_update_ssa);
5821 +
5822 + finalize_jump_threads ();
5823 + scev_finalize ();
5824 + loop_optimizer_finalize ();
5825 +
5826 + return 0;
5827 +}
5828 +
5829 +
5830 +static bool
5831 +gate_avrp (void)
5832 +{
5833 + return flag_tree_vrp != 0;
5834 +}
5835 +
5836 +struct tree_opt_pass pass_avrp =
5837 +{
5838 + "avrp", /* name */
5839 + gate_avrp, /* gate */
5840 + execute_avrp, /* execute */
5841 + NULL, /* sub */
5842 + NULL, /* next */
5843 + 0, /* static_pass_number */
5844 + TV_TREE_VRP, /* tv_id */
5845 + PROP_ssa | PROP_alias, /* properties_required */
5846 + 0, /* properties_provided */
5847 + 0, /* properties_destroyed */
5848 + 0, /* todo_flags_start */
5849 + TODO_cleanup_cfg
5850 + | TODO_ggc_collect
5851 + | TODO_verify_ssa
5852 + | TODO_dump_func
5853 + | TODO_update_ssa, /* todo_flags_finish */
5854 + 0 /* letter */
5855 +};
5856 Index: tree-pass.h
5857 ===================================================================
5858 --- tree-pass.h (revision 131599)
5859 +++ tree-pass.h (working copy)
5860 @@ -322,6 +322,7 @@
5861 extern struct tree_opt_pass pass_copy_prop;
5862 extern struct tree_opt_pass pass_store_ccp;
5863 extern struct tree_opt_pass pass_vrp;
5864 +extern struct tree_opt_pass pass_avrp;
5865 extern struct tree_opt_pass pass_create_structure_vars;
5866 extern struct tree_opt_pass pass_uncprop;
5867 extern struct tree_opt_pass pass_return_slot;
5868 Index: predict.c
5869 ===================================================================
5870 --- predict.c (revision 131599)
5871 +++ predict.c (working copy)
5872 @@ -289,7 +289,10 @@
5873 void
5874 tree_predict_edge (edge e, enum br_predictor predictor, int probability)
5875 {
5876 - gcc_assert (profile_status != PROFILE_GUESSED);
5877 + /* gcc_assert (profile_status != PROFILE_GUESSED);*/
5878 + if (profile_status == PROFILE_GUESSED)
5879 + profile_status = PROFILE_ABSENT;
5880 +
5881 if ((e->src != ENTRY_BLOCK_PTR && EDGE_COUNT (e->src->succs) > 1)
5882 && flag_guess_branch_prob && optimize)
5883 {
5884 Index: tree-cfgcleanup.c
5885 ===================================================================
5886 --- tree-cfgcleanup.c (revision 132891)
5887 +++ tree-cfgcleanup.c (working copy)
5888 @@ -418,6 +418,9 @@
5889 {
5890 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
5891
5892 + /* bb->frequency -= EDGE_FREQUENCY(e);
5893 + bb->count -= e->count;*/
5894 +
5895 if (e->flags & EDGE_ABNORMAL)
5896 {
5897 /* If there is an abnormal edge, redirect it anyway, and
5898 Index: tree-inline.c
5899 ===================================================================
5900 --- tree-inline.c (revision 131627)
5901 +++ tree-inline.c (working copy)
5902 @@ -234,11 +234,11 @@
5903 remap_decl (tree decl, copy_body_data *id)
5904 {
5905 tree *n;
5906 - tree fn;
5907
5908 +
5909 /* We only remap local variables in the current function. */
5910 - fn = id->src_fn;
5911
5912 +
5913 /* See if we have remapped this declaration. */
5914
5915 n = (tree *) pointer_map_contains (id->decl_map, decl);
5916 Index: tree-flow.h
5917 ===================================================================
5918 --- tree-flow.h (revision 131599)
5919 +++ tree-flow.h (working copy)
5920 @@ -1043,6 +1043,9 @@
5921 extern bool potentially_threadable_block (basic_block);
5922 extern void thread_across_edge (tree, edge, bool,
5923 VEC(tree, heap) **, tree (*) (tree, tree));
5924 +extern void thread_across_block (tree, edge, bool,
5925 + VEC(tree, heap) **, tree (*) (tree, tree),
5926 + basic_block dombbsave);
5927
5928 /* In tree-ssa-loop-im.c */
5929 /* The possibilities of statement movement. */
5930 @@ -1119,6 +1122,7 @@
5931
5932 /* In tree-ssa-threadupdate.c. */
5933 extern bool thread_through_all_blocks (bool);
5934 +extern bool thread_through_all_blocks_1 (bool);
5935 extern void register_jump_thread (edge, edge);
5936
5937 /* In gimplify.c */
5938 Index: Makefile.in
5939 ===================================================================
5940 --- Makefile.in (revision 131599)
5941 +++ Makefile.in (working copy)
5942 @@ -1211,6 +1211,7 @@
5943 tree-vectorizer.o \
5944 tree-vn.o \
5945 tree-vrp.o \
5946 + tree-avrp.o \
5947 tree.o \
5948 value-prof.o \
5949 var-tracking.o \
5950 @@ -2084,6 +2085,10 @@
5951 $(TREE_FLOW_H) tree-pass.h $(TREE_DUMP_H) $(DIAGNOSTIC_H) $(GGC_H) \
5952 $(BASIC_BLOCK_H) tree-ssa-propagate.h $(FLAGS_H) $(TREE_DUMP_H) \
5953 $(CFGLOOP_H) $(SCEV_H) tree-chrec.h $(TIMEVAR_H) toplev.h intl.h
5954 +tree-avrp.o : tree-avrp.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \
5955 + $(TREE_FLOW_H) tree-pass.h $(TREE_DUMP_H) $(DIAGNOSTIC_H) $(GGC_H) \
5956 + $(BASIC_BLOCK_H) tree-ssa-propagate.h $(FLAGS_H) $(TREE_DUMP_H) \
5957 + $(CFGLOOP_H) $(SCEV_H) tree-chrec.h $(TIMEVAR_H) toplev.h intl.h
5958 tree-cfg.o : tree-cfg.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
5959 $(RTL_H) $(TREE_H) $(TM_P_H) $(EXPR_H) $(GGC_H) $(FLAGS_H) output.h \
5960 $(DIAGNOSTIC_H) $(FUNCTION_H) $(TIMEVAR_H) $(TM_H) coretypes.h \
5961 Index: passes.c
5962 ===================================================================
5963 --- passes.c (revision 131599)
5964 +++ passes.c (working copy)
5965 @@ -569,6 +569,8 @@
5966 NEXT_PASS (pass_copy_prop);
5967 NEXT_PASS (pass_merge_phi);
5968 NEXT_PASS (pass_vrp);
5969 + NEXT_PASS (pass_avrp);
5970 + NEXT_PASS (pass_profile);
5971 NEXT_PASS (pass_dce);
5972 NEXT_PASS (pass_cselim);
5973 NEXT_PASS (pass_dominator);
5974 Index: tree-ssa-threadedge.c
5975 ===================================================================
5976 --- tree-ssa-threadedge.c (revision 131599)
5977 +++ tree-ssa-threadedge.c (working copy)
5978 @@ -570,3 +570,81 @@
5979 fail:
5980 remove_temporary_equivalences (stack);
5981 }
5982 +
5983 +void
5984 +thread_across_block (tree dummy_cond,
5985 + edge e,
5986 + bool handle_dominating_asserts,
5987 + VEC(tree, heap) **stack,
5988 + tree (*simplify) (tree, tree),basic_block dombbsave)
5989 +{
5990 + tree stmt;
5991 + basic_block bb1;
5992 + bb1 = dombbsave;
5993 + bb1 = 0;
5994 + /* If E is a backedge, then we want to verify that the COND_EXPR,
5995 + SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
5996 + by any statements in e->dest. If it is affected, then it is not
5997 + safe to thread this edge. */
5998 + if (e->flags & EDGE_DFS_BACK)
5999 + {
6000 + ssa_op_iter iter;
6001 + use_operand_p use_p;
6002 + tree last = bsi_stmt (bsi_last (e->dest));
6003 +
6004 + FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
6005 + {
6006 + tree use = USE_FROM_PTR (use_p);
6007 +
6008 + if (TREE_CODE (use) == SSA_NAME
6009 + && TREE_CODE (SSA_NAME_DEF_STMT (use)) != PHI_NODE
6010 + && bb_for_stmt (SSA_NAME_DEF_STMT (use)) == e->dest)
6011 + goto fail;
6012 + }
6013 + }
6014 +
6015 + stmt_count = 0;
6016 +
6017 + /* PHIs create temporary equivalences. */
6018 + if (!record_temporary_equivalences_from_phis (e, stack))
6019 + goto fail;
6020 +
6021 + /* Now walk each statement recording any context sensitive
6022 + temporary equivalences we can detect. */
6023 + stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
6024 + if (!stmt)
6025 + goto fail;
6026 +
6027 + /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
6028 + will be taken. */
6029 + if (TREE_CODE (stmt) == COND_EXPR
6030 + || TREE_CODE (stmt) == GOTO_EXPR
6031 + || TREE_CODE (stmt) == SWITCH_EXPR)
6032 + {
6033 + tree cond;
6034 +/* edge_iterator ei;
6035 + edge e1;
6036 +
6037 + FOR_EACH_EDGE (e1, ei, dombbsave->preds)
6038 + {*/
6039 +
6040 + /* Extract and simplify the condition. */
6041 + cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify, handle_dominating_asserts);
6042 +
6043 + if (cond && is_gimple_min_invariant (cond))
6044 + {
6045 + edge taken_edge = find_taken_edge (e->dest, cond);
6046 + basic_block dest = (taken_edge ? taken_edge->dest : NULL);
6047 +
6048 + if (dest == e->dest)
6049 + goto fail;
6050 +
6051 + remove_temporary_equivalences (stack);
6052 + register_jump_thread (e, taken_edge);
6053 + }
6054 + /* }*/
6055 + }
6056 +
6057 + fail:
6058 + remove_temporary_equivalences (stack);
6059 +}
6060
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.